diff -Nru ceilometer-5.0.0~b2/AUTHORS ceilometer-5.0.0~b3/AUTHORS --- ceilometer-5.0.0~b2/AUTHORS 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/AUTHORS 2015-09-03 13:09:31.000000000 +0000 @@ -6,6 +6,7 @@ Akhil Hingane Ala Rezmerita Alessandro Pilotti +Alex Holden Alexei Kornienko Ana Malagon Andreas Jaeger @@ -15,6 +16,7 @@ Angus Salkeld Ann Kamyshnikova Artur Svechnikov +Ashwin Agate Balazs Gibizer Bartosz Górski Ben Nemec @@ -27,6 +29,7 @@ Can ZHANG Cedric Soulas Chad Lung +Chandan Kumar ChangBo Guo(gcb) ChenZheng Chinmaya Bharadwaj @@ -51,6 +54,7 @@ Dirk Mueller Divya Doug Hellmann +Drew Thorstensen Edwin Zhai Endre Karlson Eoghan Glynn @@ -74,6 +78,7 @@ Haomeng, Wang Harri Hämäläinen Hisashi Osanai +Hongbin Lu Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa @@ -95,6 +100,7 @@ John H. Tran John Herndon JordanP +JuPing Julien Danjou Justin SB KIYOHIRO ADACHI @@ -131,6 +137,7 @@ Monsyne Dragon Monty Taylor Nadya Privalova +Nadya Shakhat Nejc Saje Nick Barcet Nicolas Barcet (nijaba) @@ -214,13 +221,16 @@ ZhiQiang Fan Zhongyue Luo annegentle +ansaba ccrouch eNovance emilienm florent fujioka yuuichi gengjh +gord chung guillaume pernot +jiaxi joyce kiwik-chenrui leizhang @@ -243,6 +253,7 @@ tanlin terriyu vagrant +venkatamahesh vivek.nandavanam vivek.nandavanam xingzhou diff -Nru ceilometer-5.0.0~b2/ceilometer/agent/base.py ceilometer-5.0.0~b3/ceilometer/agent/base.py --- ceilometer-5.0.0~b2/ceilometer/agent/base.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/agent/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -43,6 +43,11 @@ LOG = log.getLogger(__name__) OPTS = [ + cfg.BoolOpt('batch_polled_samples', + default=True, + help='To reduce polling agent load, samples are sent to the ' + 'notification agent in a batch. To gain higher ' + 'throughput at the cost of load set this to False.'), cfg.IntOpt('shuffle_time_before_polling_task', default=0, help='To reduce large requests at same time to Nova or other ' @@ -112,6 +117,9 @@ resource_factory = lambda: Resources(agent_manager) self.resources = collections.defaultdict(resource_factory) + self._batch = cfg.CONF.batch_polled_samples + self._telemetry_secret = cfg.CONF.publisher.telemetry_secret + def add(self, pollster, source): self.pollster_matches[source.name].add(pollster) key = Resources.key(source.name, pollster) @@ -121,11 +129,9 @@ """Polling sample and notify.""" cache = {} discovery_cache = {} + poll_history = {} for source_name in self.pollster_matches: for pollster in self.pollster_matches[source_name]: - LOG.info(_("Polling pollster %(poll)s in the context of " - "%(src)s"), - dict(poll=pollster.name, src=source_name)) key = Resources.key(source_name, pollster) candidate_res = list( self.resources[key].get(discovery_cache)) @@ -136,64 +142,67 @@ # Remove duplicated resources and black resources. Using # set() requires well defined __hash__ for each resource. # Since __eq__ is defined, 'not in' is safe here. - seen = [] - duplicated = [] polling_resources = [] black_res = self.resources[key].blacklist + history = poll_history.get(pollster.name, []) for x in candidate_res: - if x not in seen: - seen.append(x) + if x not in history: + history.append(x) if x not in black_res: polling_resources.append(x) - else: - duplicated.append(x) - - # Warn duplicated resources for the 1st time - if self.resources[key].last_dup != duplicated: - self.resources[key].last_dup = duplicated - LOG.warning(_( - 'Found following duplicated resoures for ' - '%(name)s in context of %(source)s:%(list)s. ' - 'Check pipeline configuration.') - % ({'name': pollster.name, - 'source': source_name, - 'list': duplicated - })) + poll_history[pollster.name] = history # If no resources, skip for this pollster if not polling_resources: - LOG.info(_("Skip polling pollster %s, no resources" - " found"), pollster.name) + p_context = 'new ' if history else '' + LOG.info(_("Skip pollster %(name)s, no %(p_context)s" + "resources found this cycle"), + {'name': pollster.name, 'p_context': p_context}) continue + LOG.info(_("Polling pollster %(poll)s in the context of " + "%(src)s"), + dict(poll=pollster.name, src=source_name)) try: samples = pollster.obj.get_samples( manager=self.manager, cache=cache, resources=polling_resources ) + sample_batch = [] + for sample in samples: sample_dict = ( publisher_utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret + sample, self._telemetry_secret )) - self.manager.notifier.info( - self.manager.context.to_dict(), - 'telemetry.api', - [sample_dict] - ) + if self._batch: + sample_batch.append(sample_dict) + else: + self._send_notification([sample_dict]) + + if sample_batch: + self._send_notification(sample_batch) + except plugin_base.PollsterPermanentError as err: LOG.error(_( 'Prevent pollster %(name)s for ' 'polling source %(source)s anymore!') % ({'name': pollster.name, 'source': source_name})) - self.resources[key].blacklist.append(err.fail_res) + self.resources[key].blacklist.extend(err.fail_res_list) except Exception as err: LOG.warning(_( 'Continue after error from %(name)s: %(error)s') % ({'name': pollster.name, 'error': err}), exc_info=True) + def _send_notification(self, samples): + self.manager.notifier.info( + self.manager.context.to_dict(), + 'telemetry.polling', + {'samples': samples} + ) + class AgentManager(service_base.BaseService): @@ -287,15 +296,11 @@ polling_task = None for pollster in self.extensions: if source.support_meter(pollster.name): + polling_task = polling_tasks.get(source.get_interval()) if not polling_task: polling_task = self.create_polling_task() + polling_tasks[source.get_interval()] = polling_task polling_task.add(pollster, source) - if polling_task: - polling_tasks[source.name] = { - 'task': polling_task, - 'interval': source.get_interval() - } - return polling_tasks def construct_group_id(self, discovery_group_id): @@ -313,15 +318,13 @@ pollster_timers = [] data = self.setup_polling_tasks() - for name, polling_task in data.items(): - interval = polling_task['interval'] - task = polling_task['task'] + for interval, polling_task in data.items(): delay_time = (interval + delay_polling_time if delay_start else delay_polling_time) pollster_timers.append(self.tg.add_timer(interval, self.interval_task, initial_delay=delay_time, - task=task)) + task=polling_task)) self.tg.add_timer(cfg.CONF.coordination.heartbeat, self.partition_coordinator.heartbeat) diff -Nru ceilometer-5.0.0~b2/ceilometer/agent/plugin_base.py ceilometer-5.0.0~b3/ceilometer/agent/plugin_base.py --- ceilometer-5.0.0~b2/ceilometer/agent/plugin_base.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/agent/plugin_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -93,7 +93,7 @@ super(NotificationBase, self).__init__() # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch # messages to an endpoint. - if self.event_types is not None: + if self.event_types: self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) self.manager = manager @@ -178,8 +178,8 @@ error. """ - def __init__(self, resource): - self.fail_res = resource + def __init__(self, resources): + self.fail_res_list = resources @six.add_metaclass(abc.ABCMeta) diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/combination.py ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/combination.py --- ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/combination.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/combination.py 2015-09-03 13:05:55.000000000 +0000 @@ -101,8 +101,8 @@ def evaluate(self, alarm): if not self.within_time_constraint(alarm): - LOG.debug(_('Attempted to evaluate alarm %s, but it is not ' - 'within its time constraint.') % alarm.alarm_id) + LOG.debug('Attempted to evaluate alarm %s, but it is not ' + 'within its time constraint.', alarm.alarm_id) return states = zip(alarm.rule['alarm_ids'], diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/gnocchi.py ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/gnocchi.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/gnocchi.py 2015-09-03 13:05:55.000000000 +0000 @@ -103,13 +103,13 @@ alarm.rule['resource_type'], alarm.rule['resource_id'], alarm.rule['metric']) - LOG.debug(_('stats query %s') % req['url']) + LOG.debug('stats query %s', req['url']) try: r = getattr(requests, method)(**req) except Exception: LOG.exception(_('alarm stats retrieval failed')) return [] - if int(r.status_code / 100) != 2: + if r.status_code // 100 != 2: LOG.exception(_('alarm stats retrieval failed: %s') % r.text) return [] else: @@ -125,8 +125,8 @@ window = (alarm.rule['granularity'] * (alarm.rule['evaluation_periods'] + cls.look_back)) start = now - datetime.timedelta(seconds=window) - LOG.debug(_('query stats from %(start)s to ' - '%(now)s') % {'start': start, 'now': now}) + LOG.debug('query stats from %(start)s to ' + '%(now)s', {'start': start, 'now': now}) return start.isoformat(), now.isoformat() def _sufficient(self, alarm, statistics): @@ -211,8 +211,8 @@ def evaluate(self, alarm): if not self.within_time_constraint(alarm): - LOG.debug(_('Attempted to evaluate alarm %s, but it is not ' - 'within its time constraint.') % alarm.alarm_id) + LOG.debug('Attempted to evaluate alarm %s, but it is not ' + 'within its time constraint.', alarm.alarm_id) return start, end = self._bound_duration(alarm) @@ -223,9 +223,8 @@ def _compare(value): op = COMPARATORS[alarm.rule['comparison_operator']] limit = alarm.rule['threshold'] - LOG.debug(_('comparing value %(value)s against threshold' - ' %(limit)s') % - {'value': value, 'limit': limit}) + LOG.debug('comparing value %(value)s against threshold' + ' %(limit)s', {'value': value, 'limit': limit}) return op(value, limit) self._transition(alarm, diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/threshold.py ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/threshold.py --- ceilometer-5.0.0~b2/ceilometer/alarm/evaluator/threshold.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/evaluator/threshold.py 2015-09-03 13:05:55.000000000 +0000 @@ -53,8 +53,8 @@ window = (alarm.rule['period'] * (alarm.rule['evaluation_periods'] + look_back)) start = now - datetime.timedelta(seconds=window) - LOG.debug(_('query stats from %(start)s to ' - '%(now)s') % {'start': start, 'now': now}) + LOG.debug('query stats from %(start)s to ' + '%(now)s', {'start': start, 'now': now}) after = dict(field='timestamp', op='ge', value=start.isoformat()) before = dict(field='timestamp', op='le', value=now.isoformat()) constraints.extend([before, after]) @@ -63,7 +63,7 @@ @staticmethod def _sanitize(alarm, statistics): """Sanitize statistics.""" - LOG.debug(_('sanitize stats %s') % statistics) + LOG.debug('sanitize stats %s', statistics) if alarm.rule.get('exclude_outliers'): key = operator.attrgetter('count') mean = utils.mean(statistics, key) @@ -72,7 +72,7 @@ upper = mean + 2 * stddev inliers, outliers = utils.anomalies(statistics, key, lower, upper) if outliers: - LOG.debug(_('excluded weak datapoints with sample counts %s'), + LOG.debug('excluded weak datapoints with sample counts %s', [s.count for s in outliers]) statistics = inliers else: @@ -81,12 +81,12 @@ # in practice statistics are always sorted by period start, not # strictly required by the API though statistics = statistics[-alarm.rule['evaluation_periods']:] - LOG.debug(_('pruned statistics to %d') % len(statistics)) + LOG.debug('pruned statistics to %d', len(statistics)) return statistics def _statistics(self, alarm, query): """Retrieve statistics over the current window.""" - LOG.debug(_('stats query %s') % query) + LOG.debug('stats query %s', query) try: return self._client.statistics.list( meter_name=alarm.rule['meter_name'], q=query, @@ -175,8 +175,8 @@ def evaluate(self, alarm): if not self.within_time_constraint(alarm): - LOG.debug(_('Attempted to evaluate alarm %s, but it is not ' - 'within its time constraint.') % alarm.alarm_id) + LOG.debug('Attempted to evaluate alarm %s, but it is not ' + 'within its time constraint.', alarm.alarm_id) return query = self._bound_duration( @@ -194,9 +194,8 @@ op = COMPARATORS[alarm.rule['comparison_operator']] value = getattr(stat, alarm.rule['statistic']) limit = alarm.rule['threshold'] - LOG.debug(_('comparing value %(value)s against threshold' - ' %(limit)s') % - {'value': value, 'limit': limit}) + LOG.debug('comparing value %(value)s against threshold' + ' %(limit)s', {'value': value, 'limit': limit}) return op(value, limit) self._transition(alarm, diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/rpc.py ceilometer-5.0.0~b3/ceilometer/alarm/rpc.py --- ceilometer-5.0.0~b2/ceilometer/alarm/rpc.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/rpc.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,7 +21,6 @@ import six from ceilometer.alarm.storage import models -from ceilometer.i18n import _ from ceilometer import messaging OPTS = [ @@ -46,9 +45,9 @@ def notify(self, alarm, previous, reason, reason_data): actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state]) if not actions: - LOG.debug(_('alarm %(alarm_id)s has no action configured ' - 'for state transition from %(previous)s to ' - 'state %(state)s, skipping the notification.') % + LOG.debug('alarm %(alarm_id)s has no action configured ' + 'for state transition from %(previous)s to ' + 'state %(state)s, skipping the notification.', {'alarm_id': alarm.alarm_id, 'previous': previous, 'state': alarm.state}) diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/service.py ceilometer-5.0.0~b3/ceilometer/alarm/service.py --- ceilometer-5.0.0~b2/ceilometer/alarm/service.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/service.py 2015-09-03 13:05:55.000000000 +0000 @@ -100,11 +100,10 @@ def _evaluate_alarm(self, alarm): """Evaluate the alarms assigned to this evaluator.""" if alarm.type not in self.supported_evaluators: - LOG.debug(_('skipping alarm %s: type unsupported') % - alarm.alarm_id) + LOG.debug('skipping alarm %s: type unsupported', alarm.alarm_id) return - LOG.debug(_('evaluating alarm %s') % alarm.alarm_id) + LOG.debug('evaluating alarm %s', alarm.alarm_id) try: self.evaluators[alarm.type].obj.evaluate(alarm) except Exception: @@ -191,8 +190,8 @@ return try: - LOG.debug(_("Notifying alarm %(id)s with action %(act)s") % ( - {'id': alarm_id, 'act': action})) + LOG.debug("Notifying alarm %(id)s with action %(act)s", + {'id': alarm_id, 'act': action}) notifier.notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data) except Exception: diff -Nru ceilometer-5.0.0~b2/ceilometer/alarm/storage/impl_hbase.py ceilometer-5.0.0~b3/ceilometer/alarm/storage/impl_hbase.py --- ceilometer-5.0.0~b2/ceilometer/alarm/storage/impl_hbase.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/alarm/storage/impl_hbase.py 2015-09-03 13:05:55.000000000 +0000 @@ -19,7 +19,6 @@ import ceilometer from ceilometer.alarm.storage import base from ceilometer.alarm.storage import models -from ceilometer.i18n import _ from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import migration as hbase_migration from ceilometer.storage.hbase import utils as hbase_utils @@ -84,18 +83,18 @@ hbase_migration.migrate_tables(conn, tables) def clear(self): - LOG.debug(_('Dropping HBase schema...')) + LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.ALARM_TABLE, self.ALARM_HISTORY_TABLE]: try: conn.disable_table(table) except Exception: - LOG.debug(_('Cannot disable table but ignoring error')) + LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: - LOG.debug(_('Cannot delete table but ignoring error')) + LOG.debug('Cannot delete table but ignoring error') def update_alarm(self, alarm): """Create an alarm. diff -Nru ceilometer-5.0.0~b2/ceilometer/api/app.py ceilometer-5.0.0~b3/ceilometer/api/app.py --- ceilometer-5.0.0~b2/ceilometer/api/app.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/app.py 2015-09-03 13:06:00.000000000 +0000 @@ -27,7 +27,6 @@ from ceilometer.api import middleware from ceilometer.i18n import _ from ceilometer.i18n import _LW -from ceilometer import service LOG = log.getLogger(__name__) @@ -38,8 +37,6 @@ default="api_paste.ini", help="Configuration file for WSGI definition of API." ), - cfg.IntOpt('api_workers', default=1, - help='Number of workers for Ceilometer API server.'), ] API_OPTS = [ @@ -47,7 +44,8 @@ default=False, help='Toggle Pecan Debug Middleware.'), cfg.IntOpt('default_api_return_limit', - default=1000, + min=1, + default=100, help='Default maximum number of items returned by API request.' ), ] @@ -78,7 +76,7 @@ # NOTE(sileht): pecan debug won't work in multi-process environment pecan_debug = CONF.api.pecan_debug - if service.get_workers('api') != 1 and pecan_debug: + if CONF.api.workers and CONF.api.workers != 1 and pecan_debug: pecan_debug = False LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, ' 'the value is overrided with False')) @@ -144,9 +142,8 @@ LOG.info(_("serving on http://%(host)s:%(port)s") % ( {'host': host, 'port': port})) - workers = service.get_workers('api') serving.run_simple(cfg.CONF.api.host, cfg.CONF.api.port, - app, processes=workers) + app, processes=CONF.api.workers) def app_factory(global_config, **local_conf): diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/alarms.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/alarms.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/alarms.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/alarms.py 2015-09-03 13:05:55.000000000 +0000 @@ -411,22 +411,28 @@ url[1] = netloc actions[index] = urlparse.urlunsplit(url) if old_alarm: - for key in ('ok_actions', 'alarm_actions', - 'insufficient_data_actions'): - for action in getattr(old_alarm, key): - url = netutils.urlsplit(action) - if (self._is_trust_url(url) and url.password and - action not in getattr(self, key)): - keystone_client.delete_trust_id( - url.username, auth_plugin) + new_actions = list(itertools.chain( + self.ok_actions or [], + self.alarm_actions or [], + self.insufficient_data_actions or [])) + for action in itertools.chain( + old_alarm.ok_actions or [], + old_alarm.alarm_actions or [], + old_alarm.insufficient_data_actions or []): + if action not in new_actions: + self.delete_trust(action) def delete_actions(self): + for action in itertools.chain(self.ok_actions or [], + self.alarm_actions or [], + self.insufficient_data_actions or []): + self.delete_trust(action) + + def delete_trust(self, action): auth_plugin = pecan.request.environ.get('keystone.token_auth') - for action in itertools.chain(self.ok_actions, self.alarm_actions, - self.insufficient_data_actions): - url = netutils.urlsplit(action) - if self._is_trust_url(url) and url.password: - keystone_client.delete_trust_id(url.username, auth_plugin) + url = netutils.urlsplit(action) + if self._is_trust_url(url) and url.password: + keystone_client.delete_trust_id(url.username, auth_plugin) Alarm.add_attributes(**{"%s_rule" % ext.name: ext.plugin diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/events.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/events.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/events.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/events.py 2015-09-03 13:05:55.000000000 +0000 @@ -30,6 +30,7 @@ from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils as v2_utils +from ceilometer.api import rbac from ceilometer.event.storage import models as event_models from ceilometer.i18n import _ from ceilometer import storage @@ -156,6 +157,18 @@ ) +def _add_user_proj_filter(): + traits_filter = [] + # Returns user_id, proj_id for non-admins + user_id, proj_id = rbac.get_limited_to(pecan.request.headers) + # If non-admin, filter events by user and project + if (user_id and proj_id): + traits_filter.append({"key": "project_id", "string": proj_id, + "op": "eq"}) + traits_filter.append({"key": "user_id", "string": user_id, "op": "eq"}) + return traits_filter + + def _event_query_to_event_filter(q): evt_model_filter = { 'event_type': None, @@ -163,7 +176,7 @@ 'start_timestamp': None, 'end_timestamp': None } - traits_filter = [] + traits_filter = _add_user_proj_filter() for i in q: if not i.op: @@ -194,7 +207,7 @@ :param event_type: Event type to filter traits by :param trait_name: Trait to return values for """ - LOG.debug(_("Getting traits for %s") % event_type) + LOG.debug("Getting traits for %s", event_type) return [Trait._convert_storage_trait(t) for t in pecan.request.event_storage_conn .get_traits(event_type, trait_name)] @@ -237,7 +250,7 @@ class EventsController(rest.RestController): """Works on Events.""" - @v2_utils.requires_admin + @v2_utils.requires_context @wsme_pecan.wsexpose([Event], [EventQuery], int) def get_all(self, q=None, limit=None): """Return all events matching the query filters. @@ -245,6 +258,7 @@ :param q: Filter arguments for which Events to return :param limit: Maximum number of samples to be returned. """ + rbac.enforce("events:index", pecan.request) q = q or [] limit = v2_utils.enforce_limit(limit) event_filter = _event_query_to_event_filter(q) @@ -257,14 +271,17 @@ pecan.request.event_storage_conn.get_events(event_filter, limit)] - @v2_utils.requires_admin + @v2_utils.requires_context @wsme_pecan.wsexpose(Event, wtypes.text) def get_one(self, message_id): """Return a single event with the given message id. :param message_id: Message ID of the Event to be returned """ - event_filter = storage.EventFilter(message_id=message_id) + rbac.enforce("events:show", pecan.request) + t_filter = _add_user_proj_filter() + event_filter = storage.EventFilter(traits_filter=t_filter, + message_id=message_id) events = [event for event in pecan.request.event_storage_conn.get_events(event_filter)] if not events: diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/meters.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/meters.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/meters.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/meters.py 2015-09-03 13:05:55.000000000 +0000 @@ -181,12 +181,12 @@ self.duration_start and self.duration_start < start_timestamp): self.duration_start = start_timestamp - LOG.debug(_('clamping min timestamp to range')) + LOG.debug('clamping min timestamp to range') if (end_timestamp and self.duration_end and self.duration_end > end_timestamp): self.duration_end = end_timestamp - LOG.debug(_('clamping max timestamp to range')) + LOG.debug('clamping max timestamp to range') # If we got valid timestamps back, compute a duration in seconds. # @@ -367,7 +367,8 @@ tenant=def_project_id, is_admin=True) notifier = pecan.request.notifier - notifier.info(ctxt.to_dict(), 'telemetry.api', published_samples) + notifier.info(ctxt.to_dict(), 'telemetry.api', + {'samples': published_samples}) return samples diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/query.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/query.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/query.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/query.py 2015-09-03 13:06:00.000000000 +0000 @@ -32,6 +32,7 @@ from ceilometer.api.controllers.v2 import alarms from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import samples +from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer import storage @@ -201,7 +202,7 @@ self._validate_filter(self.filter_expr) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( - _("Filter expression not valid: %s") % e.message) + _("Filter expression not valid: %s") % e) self._replace_isotime_with_datetime(self.filter_expr) self._convert_operator_to_lower_case(self.filter_expr) self._normalize_field_names_for_db_model(self.filter_expr) @@ -220,14 +221,10 @@ self._convert_orderby_to_lower_case(self.orderby) self._normalize_field_names_in_orderby(self.orderby) - if self.original_query.limit is wtypes.Unset: - self.limit = None - else: - self.limit = self.original_query.limit + self.limit = (None if self.original_query.limit is wtypes.Unset + else self.original_query.limit) - if self.limit is not None and self.limit <= 0: - msg = _('Limit should be positive') - raise base.ClientSideError(msg) + self.limit = v2_utils.enforce_limit(self.limit) @staticmethod def _convert_orderby_to_lower_case(orderby): diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/root.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/root.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/root.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/root.py 2015-09-03 13:05:55.000000000 +0000 @@ -18,7 +18,7 @@ # License for the specific language governing permissions and limitations # under the License. -from keystoneclient.openstack.common.apiclient import exceptions +from keystoneclient import exceptions from oslo_config import cfg from oslo_log import log import pecan @@ -59,7 +59,7 @@ def gnocchi_abort(): pecan.abort(410, ("This telemetry installation is configured to use " "Gnocchi. Please use the Gnocchi API available on " - "the metric endpoint to retreive data.")) + "the metric endpoint to retrieve data.")) def aodh_redirect(url): diff -Nru ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/utils.py ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/utils.py --- ceilometer-5.0.0~b2/ceilometer/api/controllers/v2/utils.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/controllers/v2/utils.py 2015-09-03 13:06:00.000000000 +0000 @@ -46,7 +46,7 @@ limit = cfg.CONF.api.default_api_return_limit LOG.info(_LI('No limit value provided, result set will be' 'limited to %(limit)d.'), {'limit': limit}) - if limit and limit < 0: + if not limit or limit <= 0: raise base.ClientSideError(_("Limit must be positive")) return limit @@ -353,3 +353,17 @@ return func(*args, **kwargs) return wrapped + + +def requires_context(func): + + @functools.wraps(func) + def wrapped(*args, **kwargs): + req_usr = pecan.request.headers.get('X-User-Id') + proj_usr = pecan.request.headers.get('X-Project-Id') + if ((not req_usr) or (not proj_usr)): + pecan.core.abort(status_code=403, + detail='RBAC Authorization Failed') + return func(*args, **kwargs) + + return wrapped diff -Nru ceilometer-5.0.0~b2/ceilometer/api/hooks.py ceilometer-5.0.0~b3/ceilometer/api/hooks.py --- ceilometer-5.0.0~b2/ceilometer/api/hooks.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/hooks.py 2015-09-03 13:05:55.000000000 +0000 @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import threading - from oslo_config import cfg from oslo_log import log import oslo_messaging @@ -90,17 +88,11 @@ class TranslationHook(hooks.PecanHook): - def __init__(self): - # Use thread local storage to make this thread safe in situations - # where one pecan instance is being used to serve multiple request - # threads. - self.local_error = threading.local() - self.local_error.translatable_error = None - - def before(self, state): - self.local_error.translatable_error = None - def after(self, state): + # After a request has been done, we need to see if + # ClientSideError has added an error onto the response. + # If it has we need to get it info the thread-safe WSGI + # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): - self.local_error.translatable_error = ( + state.request.environ['translatable_error'] = ( state.response.translatable_error) diff -Nru ceilometer-5.0.0~b2/ceilometer/api/__init__.py ceilometer-5.0.0~b3/ceilometer/api/__init__.py --- ceilometer-5.0.0~b2/ceilometer/api/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -19,6 +19,7 @@ OPTS = [ cfg.IntOpt('port', default=8777, + min=1, max=65535, deprecated_name='metering_api_port', deprecated_group='DEFAULT', help='The port for the ceilometer API server.', diff -Nru ceilometer-5.0.0~b2/ceilometer/api/middleware.py ceilometer-5.0.0~b3/ceilometer/api/middleware.py --- ceilometer-5.0.0~b2/ceilometer/api/middleware.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/api/middleware.py 2015-09-03 13:05:55.000000000 +0000 @@ -26,7 +26,6 @@ import six import webob -from ceilometer.api import hooks from ceilometer import i18n from ceilometer.i18n import _ @@ -67,7 +66,7 @@ 'status %s' % status )) else: - if (state['status_code'] / 100) not in (2, 3): + if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. @@ -82,13 +81,7 @@ app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) - # Find the first TranslationHook in the array of hooks and use the - # translatable_error object from it - error = None - for hook in self.app.hooks: - if isinstance(hook, hooks.TranslationHook): - error = hook.local_error.translatable_error - break + error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): diff -Nru ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/agent_notification.py ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/agent_notification.py --- ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/agent_notification.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/agent_notification.py 2015-09-03 13:05:55.000000000 +0000 @@ -26,4 +26,4 @@ def main(): service.prepare_service() os_service.launch(CONF, notification.NotificationService(), - workers=service.get_workers('notification')).wait() + workers=CONF.notification.workers).wait() diff -Nru ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/collector.py ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/collector.py --- ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/collector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/collector.py 2015-09-03 13:05:55.000000000 +0000 @@ -26,4 +26,4 @@ def main(): service.prepare_service() os_service.launch(CONF, collector.CollectorService(), - workers=service.get_workers('collector')).wait() + workers=CONF.collector.workers).wait() diff -Nru ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/storage.py ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/storage.py --- ceilometer-5.0.0~b2/ceilometer/cmd/eventlet/storage.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/cmd/eventlet/storage.py 2015-09-03 13:05:55.000000000 +0000 @@ -18,7 +18,7 @@ from oslo_config import cfg -from ceilometer.i18n import _, _LI +from ceilometer.i18n import _LI from ceilometer import service from ceilometer import storage @@ -37,7 +37,7 @@ service.prepare_service() if cfg.CONF.database.metering_time_to_live > 0: - LOG.debug(_("Clearing expired metering data")) + LOG.debug("Clearing expired metering data") storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering') storage_conn.clear_expired_metering_data( cfg.CONF.database.metering_time_to_live) @@ -46,7 +46,7 @@ "is disabled")) if cfg.CONF.database.event_time_to_live > 0: - LOG.debug(_("Clearing expired event data")) + LOG.debug("Clearing expired event data") event_conn = storage.get_connection_from_config(cfg.CONF, 'event') event_conn.clear_expired_event_data( cfg.CONF.database.event_time_to_live) diff -Nru ceilometer-5.0.0~b2/ceilometer/collector.py ceilometer-5.0.0~b3/ceilometer/collector.py --- ceilometer-5.0.0~b2/ceilometer/collector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/collector.py 2015-09-03 13:05:55.000000000 +0000 @@ -35,6 +35,7 @@ 'an empty string to disable.'), cfg.IntOpt('udp_port', default=4952, + min=1, max=65535, help='Port to which the UDP socket is bound.'), cfg.BoolOpt('requeue_sample_on_dispatcher_error', default=False, @@ -45,6 +46,11 @@ default=False, help='Requeue the event on the collector event queue ' 'when the collector fails to dispatch it.'), + cfg.BoolOpt('enable_rpc', + default=False, + help='Enable the RPC functionality of collector. This ' + 'functionality is now deprecated in favour of notifier ' + 'publisher and queues.') ] cfg.CONF.register_opts(OPTS, group="collector") @@ -77,8 +83,11 @@ transport = messaging.get_transport(optional=True) if transport: - self.rpc_server = messaging.get_rpc_server( - transport, cfg.CONF.publisher_rpc.metering_topic, self) + if cfg.CONF.collector.enable_rpc: + LOG.warning('RPC collector is deprecated in favour of queues. ' + 'Please switch to notifier publisher.') + self.rpc_server = messaging.get_rpc_server( + transport, cfg.CONF.publisher_rpc.metering_topic, self) sample_target = oslo_messaging.Target( topic=cfg.CONF.publisher_notifier.metering_topic) @@ -98,7 +107,8 @@ requeue_event_on_dispatcher_error)) self.event_listener.start() - self.rpc_server.start() + if cfg.CONF.collector.enable_rpc: + self.rpc_server.start() self.sample_listener.start() if not cfg.CONF.collector.udp_address: @@ -125,7 +135,7 @@ LOG.warn(_("UDP: Cannot decode data sent by %s"), source) else: try: - LOG.debug(_("UDP: Storing %s"), sample) + LOG.debug("UDP: Storing %s", sample) self.dispatcher_manager.map_method('record_metering_data', sample) except Exception: @@ -133,7 +143,7 @@ def stop(self): self.udp_run = False - if self.rpc_server: + if cfg.CONF.collector.enable_rpc and self.rpc_server: self.rpc_server.stop() if self.sample_listener: utils.kill_listeners([self.sample_listener]) diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/notifications/cpu.py ceilometer-5.0.0~b3/ceilometer/compute/notifications/cpu.py --- ceilometer-5.0.0~b2/ceilometer/compute/notifications/cpu.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/notifications/cpu.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,136 +0,0 @@ -# -# Copyright 2013 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing compute CPU sample messages from notification -events. -""" - -from oslo_log import log -from oslo_utils import timeutils - -from ceilometer.compute import notifications -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class ComputeMetricsNotificationBase(notifications.ComputeNotificationBase): - """Convert compute.metrics.update notifications into Samples.""" - event_types = ['compute.metrics.update'] - metric = None - sample_type = None - unit = None - - @staticmethod - def _get_sample(message, name): - try: - for metric in message['payload']['metrics']: - if name == metric['name']: - info = {'payload': metric, - 'event_type': message['event_type'], - 'publisher_id': message['publisher_id'], - 'resource_id': '%s_%s' % ( - message['payload']['host'], - message['payload']['nodename']), - 'timestamp': str(timeutils.parse_strtime( - metric['timestamp']))} - return info - except Exception as err: - LOG.warning(_('An error occurred while building %(m)s ' - 'sample: %(e)s') % {'m': name, 'e': err}) - - def process_notification(self, message): - info = self._get_sample(message, self.metric) - if info: - yield sample.Sample.from_notification( - name='compute.node.%s' % self.metric, - type=self.sample_type, - unit=self.unit, - volume=(info['payload']['value'] * 100 if self.unit == '%' - else info['payload']['value']), - user_id=None, - project_id=None, - resource_id=info['resource_id'], - message=info) - - -class CpuFrequency(ComputeMetricsNotificationBase): - """Handle CPU current frequency message.""" - metric = 'cpu.frequency' - sample_type = sample.TYPE_GAUGE - unit = 'MHz' - - -class CpuUserTime(ComputeMetricsNotificationBase): - """Handle CPU user mode time message.""" - metric = 'cpu.user.time' - sample_type = sample.TYPE_CUMULATIVE - unit = 'ns' - - -class CpuKernelTime(ComputeMetricsNotificationBase): - """Handle CPU kernel time message.""" - metric = 'cpu.kernel.time' - unit = 'ns' - sample_type = sample.TYPE_CUMULATIVE - - -class CpuIdleTime(ComputeMetricsNotificationBase): - """Handle CPU idle time message.""" - metric = 'cpu.idle.time' - unit = 'ns' - sample_type = sample.TYPE_CUMULATIVE - - -class CpuIowaitTime(ComputeMetricsNotificationBase): - """Handle CPU I/O wait time message.""" - metric = 'cpu.iowait.time' - unit = 'ns' - sample_type = sample.TYPE_CUMULATIVE - - -class CpuKernelPercent(ComputeMetricsNotificationBase): - """Handle CPU kernel percentage message.""" - metric = 'cpu.kernel.percent' - unit = '%' - sample_type = sample.TYPE_GAUGE - - -class CpuIdlePercent(ComputeMetricsNotificationBase): - """Handle CPU idle percentage message.""" - metric = 'cpu.idle.percent' - unit = '%' - sample_type = sample.TYPE_GAUGE - - -class CpuUserPercent(ComputeMetricsNotificationBase): - """Handle CPU user mode percentage message.""" - metric = 'cpu.user.percent' - unit = '%' - sample_type = sample.TYPE_GAUGE - - -class CpuIowaitPercent(ComputeMetricsNotificationBase): - """Handle CPU I/O wait percentage message.""" - metric = 'cpu.iowait.percent' - unit = '%' - sample_type = sample.TYPE_GAUGE - - -class CpuPercent(ComputeMetricsNotificationBase): - """Handle generic CPU utilization message.""" - metric = 'cpu.percent' - unit = '%' - sample_type = sample.TYPE_GAUGE diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/notifications/instance.py ceilometer-5.0.0~b3/ceilometer/compute/notifications/instance.py --- ceilometer-5.0.0~b2/ceilometer/compute/notifications/instance.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/notifications/instance.py 2015-09-03 13:05:55.000000000 +0000 @@ -89,74 +89,6 @@ message=message) -class Memory(ComputeInstanceNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='memory', - type=sample.TYPE_GAUGE, - unit='MB', - volume=message['payload']['memory_mb'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class VCpus(ComputeInstanceNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='vcpus', - type=sample.TYPE_GAUGE, - unit='vcpu', - volume=message['payload']['vcpus'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class RootDiskSize(ComputeInstanceNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='disk.root.size', - type=sample.TYPE_GAUGE, - unit='GB', - volume=message['payload']['root_gb'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class EphemeralDiskSize(ComputeInstanceNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='disk.ephemeral.size', - type=sample.TYPE_GAUGE, - unit='GB', - volume=message['payload']['ephemeral_gb'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class InstanceFlavor(ComputeInstanceNotificationBase, - plugin_base.NonMetricNotificationBase): - def get_sample(self, message): - instance_type = message.get('payload', {}).get('instance_type') - if instance_type: - yield sample.Sample.from_notification( - name='instance:%s' % instance_type, - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) - - class InstanceDelete(ComputeInstanceNotificationBase): """Handle the messages sent by the nova notifier plugin. diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/cpu.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/cpu.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/cpu.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/cpu.py 2015-09-03 13:05:55.000000000 +0000 @@ -30,11 +30,11 @@ def get_samples(self, manager, cache, resources): for instance in resources: - LOG.debug(_('checking instance %s'), instance.id) + LOG.debug('checking instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpus(instance) - LOG.debug(_("CPUTIME USAGE: %(instance)s %(time)d"), - {'instance': instance.__dict__, + LOG.debug("CPUTIME USAGE: %(instance)s %(time)d", + {'instance': instance, 'time': cpu_info.time}) cpu_num = {'cpu_number': cpu_info.number} yield util.make_sample_from_instance( @@ -47,11 +47,11 @@ ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('Obtaining CPU time is not implemented for %s' - ), self.inspector.__class__.__name__) + LOG.debug('Obtaining CPU time is not implemented for %s', + self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('could not get CPU time for %(id)s: %(e)s'), {'id': instance.id, 'e': err}) @@ -62,13 +62,13 @@ def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: - LOG.debug(_('Checking CPU util for instance %s'), instance.id) + LOG.debug('Checking CPU util for instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpu_util( instance, self._inspection_duration) - LOG.debug(_("CPU UTIL: %(instance)s %(util)d"), - ({'instance': instance.__dict__, - 'util': cpu_info.util})) + LOG.debug("CPU UTIL: %(instance)s %(util)d", + {'instance': instance, + 'util': cpu_info.util}) yield util.make_sample_from_instance( instance, name='cpu_util', @@ -78,10 +78,10 @@ ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('Obtaining CPU Util is not implemented for %s'), + LOG.debug('Obtaining CPU Util is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'), diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/disk.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/disk.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/disk.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/disk.py 2015-09-03 13:05:55.000000000 +0000 @@ -128,7 +128,7 @@ yield s except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.warn(_LW('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s'), @@ -136,8 +136,8 @@ 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - ' %(pollster)s'), + LOG.debug('%(inspector)s does not provide data for ' + ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: @@ -338,11 +338,11 @@ yield disk_rate except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - ' %(pollster)s'), + LOG.debug('%(inspector)s does not provide data for ' + ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: @@ -520,11 +520,11 @@ yield disk_latency except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - ' %(pollster)s'), + LOG.debug('%(inspector)s does not provide data for ' + ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: @@ -601,11 +601,11 @@ yield disk_iops except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - '%(pollster)s'), + LOG.debug('%(inspector)s does not provide data for ' + '%(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: @@ -696,7 +696,7 @@ yield disk_info except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.warn(_LW('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s'), @@ -704,10 +704,10 @@ 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - ' %(pollster)s'), ( + LOG.debug('%(inspector)s does not provide data for ' + ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__})) + 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s ' diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/__init__.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/__init__.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -24,6 +24,11 @@ @six.add_metaclass(abc.ABCMeta) class BaseComputePollster(plugin_base.PollsterBase): + def setup_environment(self): + super(BaseComputePollster, self).setup_environment() + # propagate exception from check_sanity + self.inspector.check_sanity() + @property def inspector(self): try: diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/instance.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/instance.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/instance.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/instance.py 2015-09-03 13:05:55.000000000 +0000 @@ -31,19 +31,3 @@ unit='instance', volume=1, ) - - -class InstanceFlavorPollster(pollsters.BaseComputePollster): - - @staticmethod - def get_samples(manager, cache, resources): - for instance in resources: - yield util.make_sample_from_instance( - instance, - # Use the "meter name + variable" syntax - name='instance:%s' % - instance.flavor['name'], - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - ) diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/memory.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/memory.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/memory.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/memory.py 2015-09-03 13:05:55.000000000 +0000 @@ -30,13 +30,13 @@ def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: - LOG.debug(_('Checking memory usage for instance %s'), instance.id) + LOG.debug('Checking memory usage for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_usage( instance, self._inspection_duration) - LOG.debug(_("MEMORY USAGE: %(instance)s %(usage)f"), - ({'instance': instance.__dict__, - 'usage': memory_info.usage})) + LOG.debug("MEMORY USAGE: %(instance)s %(usage)f", + {'instance': instance, + 'usage': memory_info.usage}) yield util.make_sample_from_instance( instance, name='memory.usage', @@ -46,7 +46,7 @@ ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.warn(_LW('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s'), @@ -59,8 +59,8 @@ 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('Obtaining Memory Usage is not implemented for %s' - ), self.inspector.__class__.__name__) + LOG.debug('Obtaining Memory Usage is not implemented for %s', + self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, @@ -72,14 +72,14 @@ def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: - LOG.debug(_('Checking resident memory for instance %s'), + LOG.debug('Checking resident memory for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_resident( instance, self._inspection_duration) - LOG.debug(_("RESIDENT MEMORY: %(instance)s %(resident)f"), - ({'instance': instance.__dict__, - 'resident': memory_info.resident})) + LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f", + {'instance': instance, + 'resident': memory_info.resident}) yield util.make_sample_from_instance( instance, name='memory.resident', @@ -89,7 +89,7 @@ ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.warn(_LW('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s'), @@ -102,8 +102,8 @@ 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('Obtaining Resident Memory is not implemented' - ' for %s'), self.inspector.__class__.__name__) + LOG.debug('Obtaining Resident Memory is not implemented' + ' for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_LE('Could not get Resident Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/net.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/net.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/net.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/net.py 2015-09-03 13:05:55.000000000 +0000 @@ -89,7 +89,7 @@ self._inspection_duration = self._record_poll_time() for instance in resources: instance_name = util.instance_name(instance) - LOG.debug(_('checking net info for instance %s'), instance.id) + LOG.debug('checking net info for instance %s', instance.id) try: vnics = self._get_vnics_for_instance( cache, @@ -103,7 +103,7 @@ yield self._get_sample(instance, vnic, info) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. - LOG.debug(_('Exception while getting samples %s'), err) + LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.warn(_LW('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s'), @@ -111,8 +111,8 @@ 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. - LOG.debug(_('%(inspector)s does not provide data for ' - ' %(pollster)s'), + LOG.debug('%(inspector)s does not provide data for ' + ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/pollsters/util.py ceilometer-5.0.0~b3/ceilometer/compute/pollsters/util.py --- ceilometer-5.0.0~b2/ceilometer/compute/pollsters/util.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/pollsters/util.py 2015-09-03 13:05:55.000000000 +0000 @@ -33,10 +33,11 @@ def _get_metadata_from_object(instance): """Return a metadata dictionary for the instance.""" + instance_type = instance.flavor['name'] if instance.flavor else None metadata = { 'display_name': instance.name, 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), - 'instance_type': (instance.flavor['id'] if instance.flavor else None), + 'instance_type': instance_type, 'host': instance.hostId, 'flavor': instance.flavor, 'status': instance.status.lower(), diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/virt/inspector.py ceilometer-5.0.0~b3/ceilometer/compute/virt/inspector.py --- ceilometer-5.0.0~b2/ceilometer/compute/virt/inspector.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/virt/inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -27,8 +27,9 @@ OPTS = [ cfg.StrOpt('hypervisor_inspector', default='libvirt', - choices=('hyperv', 'libvirt', 'vmware', 'xenapi'), - help='Inspector to use for inspecting the hypervisor layer.'), + help='Inspector to use for inspecting the hypervisor layer. ' + 'Known inspectors are libvirt, hyperv, vmware, xenapi ' + 'and powervm.'), ] cfg.CONF.register_opts(OPTS) @@ -183,10 +184,22 @@ pass +class NoSanityException(InspectorException): + pass + + # Main virt inspector abstraction layering over the hypervisor API. # class Inspector(object): + def check_sanity(self): + """Check the sanity of hypervisor inspector. + + Each subclass could overwrite it to throw any exception + when detecting mis-configured inspector + """ + pass + def inspect_cpus(self, instance): """Inspect the CPU statistics for an instance. @@ -244,6 +257,16 @@ """ raise ceilometer.NotImplementedError + def inspect_memory_resident(self, instance, duration=None): + """Inspect the resident memory statistics for an instance. + + :param instance: the target instance + :param duration: the last 'n' seconds, over which the value should be + inspected + :return: the amount of resident memory + """ + raise ceilometer.NotImplementedError + def inspect_disk_rates(self, instance, duration=None): """Inspect the disk statistics as rates for an instance. diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/virt/libvirt/inspector.py ceilometer-5.0.0~b3/ceilometer/compute/virt/libvirt/inspector.py --- ceilometer-5.0.0~b2/ceilometer/compute/virt/libvirt/inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/virt/libvirt/inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -51,7 +51,7 @@ if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): - LOG.debug(_('Connection to libvirt broken')) + LOG.debug('Connection to libvirt broken') self.connection = None return function(self, *args, **kwargs) else: @@ -76,11 +76,15 @@ global libvirt if libvirt is None: libvirt = __import__('libvirt') - LOG.debug(_('Connecting to libvirt: %s'), self.uri) + LOG.debug('Connecting to libvirt: %s', self.uri) self.connection = libvirt.openReadOnly(self.uri) return self.connection + def check_sanity(self): + if not self._get_connection(): + raise virt_inspector.NoSanityException() + @retry_on_disconnect def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/virt/vmware/inspector.py ceilometer-5.0.0~b3/ceilometer/compute/virt/vmware/inspector.py --- ceilometer-5.0.0~b2/ceilometer/compute/virt/vmware/inspector.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/virt/vmware/inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -34,6 +34,7 @@ help='IP address of the VMware vSphere host.'), cfg.IntOpt('host_port', default=443, + min=1, max=65535, help='Port of the VMware vSphere host.'), cfg.StrOpt('host_username', default='', diff -Nru ceilometer-5.0.0~b2/ceilometer/compute/virt/xenapi/inspector.py ceilometer-5.0.0~b3/ceilometer/compute/virt/xenapi/inspector.py --- ceilometer-5.0.0~b2/ceilometer/compute/virt/xenapi/inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/compute/virt/xenapi/inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -66,7 +66,8 @@ exception = api.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) try: - session = api.Session(url) + session = (api.xapi_local() if url == 'unix://local' + else api.Session(url)) with timeout.Timeout(CONF.xenapi.login_timeout, exception): session.login_with_password(username, password) except api.Failure as e: diff -Nru ceilometer-5.0.0~b2/ceilometer/coordination.py ceilometer-5.0.0~b3/ceilometer/coordination.py --- ceilometer-5.0.0~b2/ceilometer/coordination.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/coordination.py 2015-09-03 13:05:55.000000000 +0000 @@ -63,7 +63,6 @@ self._coordinator = None self._groups = set() self._my_id = my_id or str(uuid.uuid4()) - self._started = False def start(self): backend_url = cfg.CONF.coordination.backend_url @@ -72,10 +71,8 @@ self._coordinator = tooz.coordination.get_coordinator( backend_url, self._my_id) self._coordinator.start() - self._started = True LOG.info(_LI('Coordination backend started successfully.')) except tooz.coordination.ToozError: - self._started = False LOG.exception(_LE('Error connecting to coordination backend.')) def stop(self): @@ -91,14 +88,13 @@ LOG.exception(_LE('Error connecting to coordination backend.')) finally: self._coordinator = None - self._started = False def is_active(self): return self._coordinator is not None def heartbeat(self): if self._coordinator: - if not self._started: + if not self._coordinator.is_started: # re-connect self.start() try: @@ -117,7 +113,8 @@ self._coordinator.run_watchers() def join_group(self, group_id): - if not self._coordinator or not self._started or not group_id: + if (not self._coordinator or not self._coordinator.is_started + or not group_id): return while True: try: @@ -171,7 +168,7 @@ hr = utils.HashRing(members) filtered = [v for v in iterable if hr.get_node(str(v)) == self._my_id] - LOG.debug('My subset: %s', filtered) + LOG.debug('My subset: %s', [str(f) for f in filtered]) return filtered except tooz.coordination.ToozError: LOG.exception(_LE('Error getting group membership info from ' diff -Nru ceilometer-5.0.0~b2/ceilometer/database/notifications.py ceilometer-5.0.0~b3/ceilometer/database/notifications.py --- ceilometer-5.0.0~b2/ceilometer/database/notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/database/notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,7 +22,7 @@ SERVICE = 'trove' cfg.CONF.import_opt('trove_control_exchange', - 'ceilometer.profiler.notifications') + 'ceilometer.notification') class TroveMetricsNotificationBase(plugin_base.NotificationBase): diff -Nru ceilometer-5.0.0~b2/ceilometer/data_processing/notifications.py ceilometer-5.0.0~b3/ceilometer/data_processing/notifications.py --- ceilometer-5.0.0~b2/ceilometer/data_processing/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/data_processing/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - - -OPTS = [ - cfg.StrOpt('sahara_control_exchange', - default='sahara', - help="Exchange name for Data Processing notifications."), -] - -cfg.CONF.register_opts(OPTS) -SERVICE = 'sahara' - - -class DataProcessing(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - - resource_name = '%s.cluster' % SERVICE - - @property - def event_types(self): - return [ - '%s.create' % self.resource_name, - '%s.update' % self.resource_name, - '%s.delete' % self.resource_name, - ] - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - It is defining the exchange and topics to be connected for this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.sahara_control_exchange) - for topic in conf.notification_topics] - - def process_notification(self, message): - name = message['event_type'].replace(self.resource_name, 'cluster') - - project_id = message['payload']['project_id'] - - user_id = message['_context_user_id'] - - yield sample.Sample.from_notification( - name=name, - type=sample.TYPE_DELTA, - unit='cluster', - volume=1, - resource_id=message['payload']['cluster_id'], - user_id=user_id, - project_id=project_id, - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/dispatcher/database.py ceilometer-5.0.0~b3/ceilometer/dispatcher/database.py --- ceilometer-5.0.0~b2/ceilometer/dispatcher/database.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/dispatcher/database.py 2015-09-03 13:05:55.000000000 +0000 @@ -18,7 +18,7 @@ from ceilometer import dispatcher from ceilometer.event.storage import models -from ceilometer.i18n import _, _LE, _LW +from ceilometer.i18n import _LE, _LW from ceilometer.publisher import utils as publisher_utils from ceilometer import storage @@ -73,13 +73,13 @@ data = [data] for meter in data: - LOG.debug(_( + LOG.debug( 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s') - % ({'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']})) + 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', + {'counter_name': meter['counter_name'], + 'resource_id': meter['resource_id'], + 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), + 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: diff -Nru ceilometer-5.0.0~b2/ceilometer/dispatcher/gnocchi_client.py ceilometer-5.0.0~b3/ceilometer/dispatcher/gnocchi_client.py --- ceilometer-5.0.0~b2/ceilometer/dispatcher/gnocchi_client.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/dispatcher/gnocchi_client.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,200 @@ +# +# Copyright 2015 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import json + +from oslo_log import log +import requests +import retrying +from six.moves.urllib import parse as urlparse + +from ceilometer.i18n import _ +from ceilometer import keystone_client + +LOG = log.getLogger(__name__) + + +class UnexpectedError(Exception): + pass + + +class AuthenticationError(Exception): + pass + + +class NoSuchMetric(Exception): + pass + + +class MetricAlreadyExists(Exception): + pass + + +class NoSuchResource(Exception): + pass + + +class ResourceAlreadyExists(Exception): + pass + + +def retry_if_authentication_error(exception): + return isinstance(exception, AuthenticationError) + + +def maybe_retry_if_authentication_error(): + return retrying.retry(retry_on_exception=retry_if_authentication_error, + wait_fixed=2000, + stop_max_delay=60000) + + +class GnocchiSession(object): + def __init__(self): + self._session = requests.session() + # NOTE(sileht): wait when the pool is empty + # instead of raising errors. + adapter = requests.adapters.HTTPAdapter( + pool_block=True) + self._session.mount("http://", adapter) + self._session.mount("https://", adapter) + + self.post = functools.partial(self._do_method, method='post') + self.patch = functools.partial(self._do_method, method='patch') + + def _do_method(self, *args, **kwargs): + method = kwargs.pop('method') + try: + response = getattr(self._session, method)(*args, **kwargs) + except requests.ConnectionError as e: + raise UnexpectedError("Connection error: %s " % e) + + if response.status_code == 401: + LOG.info("Authentication failure, retrying...") + raise AuthenticationError() + + return response + + +class Client(object): + def __init__(self, url): + self._gnocchi_url = url + self._ks_client = keystone_client.get_client() + self._session = GnocchiSession() + + def _get_headers(self, content_type="application/json"): + return { + 'Content-Type': content_type, + 'X-Auth-Token': self._ks_client.auth_token, + } + + @maybe_retry_if_authentication_error() + def post_measure(self, resource_type, resource_id, metric_name, + measure_attributes): + r = self._session.post("%s/v1/resource/%s/%s/metric/%s/measures" + % (self._gnocchi_url, resource_type, + urlparse.quote(resource_id, safe=""), + metric_name), + headers=self._get_headers(), + data=json.dumps(measure_attributes)) + + if r.status_code == 404: + LOG.debug("The metric %(metric_name)s of " + "resource %(resource_id)s doesn't exists: " + "%(status_code)d", + {'metric_name': metric_name, + 'resource_id': resource_id, + 'status_code': r.status_code}) + raise NoSuchMetric + elif r.status_code // 100 != 2: + raise UnexpectedError( + _("Fail to post measure on metric %(metric_name)s of " + "resource %(resource_id)s with status: " + "%(status_code)d: %(msg)s") % + {'metric_name': metric_name, + 'resource_id': resource_id, + 'status_code': r.status_code, + 'msg': r.text}) + else: + LOG.debug("Measure posted on metric %s of resource %s", + metric_name, resource_id) + + @maybe_retry_if_authentication_error() + def create_resource(self, resource_type, resource): + r = self._session.post("%s/v1/resource/%s" + % (self._gnocchi_url, resource_type), + headers=self._get_headers(), + data=json.dumps(resource)) + + if r.status_code == 409: + LOG.debug("Resource %s already exists", resource['id']) + raise ResourceAlreadyExists + + elif r.status_code // 100 != 2: + raise UnexpectedError( + _("Resource %(resource_id)s creation failed with " + "status: %(status_code)d: %(msg)s") % + {'resource_id': resource['id'], + 'status_code': r.status_code, + 'msg': r.text}) + else: + LOG.debug("Resource %s created", resource['id']) + + @maybe_retry_if_authentication_error() + def update_resource(self, resource_type, resource_id, + resource_extra): + r = self._session.patch( + "%s/v1/resource/%s/%s" + % (self._gnocchi_url, resource_type, + urlparse.quote(resource_id, safe="")), + headers=self._get_headers(), + data=json.dumps(resource_extra)) + + if r.status_code // 100 != 2: + raise UnexpectedError( + _("Resource %(resource_id)s update failed with " + "status: %(status_code)d: %(msg)s") % + {'resource_id': resource_id, + 'status_code': r.status_code, + 'msg': r.text}) + else: + LOG.debug("Resource %s updated", resource_id) + + @maybe_retry_if_authentication_error() + def create_metric(self, resource_type, resource_id, metric_name, + archive_policy): + params = {metric_name: archive_policy} + r = self._session.post("%s/v1/resource/%s/%s/metric" + % (self._gnocchi_url, resource_type, + urlparse.quote(resource_id, safe="")), + headers=self._get_headers(), + data=json.dumps(params)) + if r.status_code == 409: + LOG.debug("Metric %s of resource %s already exists", + metric_name, resource_id) + raise MetricAlreadyExists + + elif r.status_code // 100 != 2: + raise UnexpectedError( + _("Fail to create metric %(metric_name)s of " + "resource %(resource_id)s with status: " + "%(status_code)d: %(msg)s") % + {'metric_name': metric_name, + 'resource_id': resource_id, + 'status_code': r.status_code, + 'msg': r.text}) + else: + LOG.debug("Metric %s of resource %s created", + metric_name, resource_id) diff -Nru ceilometer-5.0.0~b2/ceilometer/dispatcher/gnocchi.py ceilometer-5.0.0~b3/ceilometer/dispatcher/gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/dispatcher/gnocchi.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/dispatcher/gnocchi.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,20 +16,20 @@ # License for the specific language governing permissions and limitations # under the License. import fnmatch +import functools import itertools -import json import operator import os import threading -import jsonpath_rw +from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log -import requests import six import yaml from ceilometer import dispatcher +from ceilometer.dispatcher import gnocchi_client from ceilometer.i18n import _, _LE from ceilometer import keystone_client @@ -65,35 +65,11 @@ cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi") -class UnexpectedWorkflowError(Exception): - pass - - -class NoSuchMetric(Exception): - pass - - -class MetricAlreadyExists(Exception): - pass - - -class NoSuchResource(Exception): - pass - - -class ResourceAlreadyExists(Exception): - pass - - def log_and_ignore_unexpected_workflow_error(func): def log_and_ignore(self, *args, **kwargs): try: func(self, *args, **kwargs) - except requests.ConnectionError as e: - with self._gnocchi_api_lock: - self._gnocchi_api = None - LOG.warn("Connection error, reconnecting...") - except UnexpectedWorkflowError as e: + except gnocchi_client.UnexpectedError as e: LOG.error(six.text_type(e)) return log_and_ignore @@ -102,8 +78,7 @@ def __init__(self, definition_cfg): self.cfg = definition_cfg if self.cfg is None: - LOG.debug(_("No archive policy file found!" - " Using default config.")) + LOG.debug("No archive policy file found! Using default config.") def get(self, metric_name): if self.cfg is not None: @@ -128,12 +103,44 @@ MANDATORY_FIELDS = {'resource_type': six.string_types, 'metrics': list} + JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() + def __init__(self, definition_cfg, default_archive_policy, legacy_archive_policy_defintion): self._default_archive_policy = default_archive_policy self._legacy_archive_policy_defintion = legacy_archive_policy_defintion self.cfg = definition_cfg - self._validate() + + for field, field_type in self.MANDATORY_FIELDS.items(): + if field not in self.cfg: + raise ResourcesDefinitionException( + _LE("Required field %s not specified") % field, self.cfg) + if not isinstance(self.cfg[field], field_type): + raise ResourcesDefinitionException( + _LE("Required field %(field)s should be a %(type)s") % + {'field': field, 'type': field_type}, self.cfg) + + self._field_getter = {} + for name, fval in self.cfg.get('attributes', {}).items(): + if isinstance(fval, six.integer_types): + self._field_getter[name] = fval + else: + try: + parts = self.JSONPATH_RW_PARSER.parse(fval) + except Exception as e: + raise ResourcesDefinitionException( + _LE("Parse error in JSONPath specification " + "'%(jsonpath)s': %(err)s") + % dict(jsonpath=fval, err=e), self.cfg) + self._field_getter[name] = functools.partial( + self._parse_jsonpath_field, parts) + + @staticmethod + def _parse_jsonpath_field(parts, sample): + values = [match.value for match in parts.find(sample) + if match.value is not None] + if values: + return values[0] def match(self, metric_name): for t in self.cfg['metrics']: @@ -143,11 +150,13 @@ def attributes(self, sample): attrs = {} - for attribute_info in self.cfg.get('attributes', []): - for attr, field in attribute_info.items(): - value = self._parse_field(field, sample) - if value is not None: - attrs[attr] = value + for attr, getter in self._field_getter.items(): + if callable(getter): + value = getter(sample) + else: + value = getter + if value is not None: + attrs[attr] = value return attrs def metrics(self): @@ -160,35 +169,6 @@ self._default_archive_policy) return metrics - def _parse_field(self, field, sample): - # TODO(sileht): share this with - # https://review.openstack.org/#/c/197633/ - if not field: - return - if isinstance(field, six.integer_types): - return field - try: - parts = jsonpath_rw.parse(field) - except Exception as e: - raise ResourcesDefinitionException( - _LE("Parse error in JSONPath specification " - "'%(jsonpath)s': %(err)s") - % dict(jsonpath=field, err=e), self.cfg) - values = [match.value for match in parts.find(sample) - if match.value is not None] - if values: - return values[0] - - def _validate(self): - for field, field_type in self.MANDATORY_FIELDS.items(): - if field not in self.cfg: - raise ResourcesDefinitionException( - _LE("Required field %s not specified") % field, self.cfg) - if not isinstance(self.cfg[field], field_type): - raise ResourcesDefinitionException( - _LE("Required field %(field)s should be a %(type)s") % - {'field': field, 'type': field_type}, self.cfg) - class GnocchiDispatcher(dispatcher.Base): def __init__(self, conf): @@ -197,20 +177,13 @@ self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() - self.gnocchi_url = conf.dispatcher_gnocchi.url self.gnocchi_archive_policy_data = self._load_archive_policy(conf) self.resources_definition = self._load_resources_definitions(conf) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() - self._gnocchi_api = None - self._gnocchi_api_lock = threading.Lock() - def _get_headers(self, content_type="application/json"): - return { - 'Content-Type': content_type, - 'X-Auth-Token': self._ks_client.auth_token, - } + self._gnocchi = gnocchi_client.Client(conf.dispatcher_gnocchi.url) # TODO(sileht): Share yaml loading with # event converter and declarative notification @@ -264,27 +237,9 @@ LOG.exception('fail to retreive user of Gnocchi service') raise self._gnocchi_project_id = project.id - LOG.debug("gnocchi project found: %s" % - self.gnocchi_project_id) + LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) return self._gnocchi_project_id - @property - def gnocchi_api(self): - """return a working requests session object""" - if self._gnocchi_api is not None: - return self._gnocchi_api - - with self._gnocchi_api_lock: - if self._gnocchi_api is None: - self._gnocchi_api = requests.session() - # NOTE(sileht): wait when the pool is empty - # instead of raising errors. - adapter = requests.adapters.HTTPAdapter(pool_block=True) - self._gnocchi_api.mount("http://", adapter) - self._gnocchi_api.mount("https://", adapter) - - return self._gnocchi_api - def _is_swift_account_sample(self, sample): return bool([rd for rd in self.resources_definition if rd.cfg['resource_type'] == 'swift_account' @@ -320,181 +275,75 @@ data, key=operator.itemgetter('resource_id')) for resource_id, samples_of_resource in resource_grouped_samples: - resource_need_to_be_updated = True - metric_grouped_samples = itertools.groupby( list(samples_of_resource), key=operator.itemgetter('counter_name')) - for metric_name, samples in metric_grouped_samples: - samples = list(samples) - rd = self._get_resource_definition(metric_name) - if rd: - self._process_samples(rd, resource_id, metric_name, - samples, - resource_need_to_be_updated) - else: - LOG.warn("metric %s is not handled by gnocchi" % - metric_name) - - # FIXME(sileht): Does it reasonable to skip the resource - # update here ? Does differents kind of counter_name - # can have different metadata set ? - # (ie: one have only flavor_id, and an other one have only - # image_ref ?) - # - # resource_need_to_be_updated = False + + self._process_resource(resource_id, metric_grouped_samples) @log_and_ignore_unexpected_workflow_error - def _process_samples(self, resource_def, resource_id, metric_name, samples, - resource_need_to_be_updated): - resource_type = resource_def.cfg['resource_type'] - measure_attributes = [{'timestamp': sample['timestamp'], - 'value': sample['counter_volume']} - for sample in samples] + def _process_resource(self, resource_id, metric_grouped_samples): + resource_extra = {} + for metric_name, samples in metric_grouped_samples: + samples = list(samples) + rd = self._get_resource_definition(metric_name) + if rd is None: + LOG.warn("metric %s is not handled by gnocchi" % + metric_name) + continue + + resource_type = rd.cfg['resource_type'] + resource = { + "id": resource_id, + "user_id": samples[0]['user_id'], + "project_id": samples[0]['project_id'], + "metrics": rd.metrics(), + } + measures = [] + + for sample in samples: + resource_extra.update(rd.attributes(sample)) + measures.append({'timestamp': sample['timestamp'], + 'value': sample['counter_volume']}) + + resource.update(resource_extra) - try: - self._post_measure(resource_type, resource_id, metric_name, - measure_attributes) - except NoSuchMetric: - # NOTE(sileht): we try first to create the resource, because - # they more chance that the resource doesn't exists than the metric - # is missing, the should be reduce the number of resource API call - resource_attributes = self._get_resource_attributes( - resource_def, resource_id, metric_name, samples) try: - self._create_resource(resource_type, resource_id, - resource_attributes) - except ResourceAlreadyExists: + self._gnocchi.post_measure(resource_type, resource_id, + metric_name, measures) + except gnocchi_client.NoSuchMetric: + # TODO(sileht): Make gnocchi smarter to be able to detect 404 + # for 'resource doesn't exist' and for 'metric doesn't exist' + # https://bugs.launchpad.net/gnocchi/+bug/1476186 + self._ensure_resource_and_metric(resource_type, resource, + metric_name) + try: - archive_policy = (resource_def.metrics()[metric_name]) - self._create_metric(resource_type, resource_id, - metric_name, archive_policy) - except MetricAlreadyExists: - # NOTE(sileht): Just ignore the metric have been created in - # the meantime. - pass - else: - # No need to update it we just created it - # with everything we need - resource_need_to_be_updated = False - - # NOTE(sileht): we retry to post the measure but if it fail we - # don't catch the exception to just log it and continue to process - # other samples - self._post_measure(resource_type, resource_id, metric_name, - measure_attributes) - - if resource_need_to_be_updated: - resource_attributes = self._get_resource_attributes( - resource_def, resource_id, metric_name, samples, - for_update=True) - if resource_attributes: - self._update_resource(resource_type, resource_id, - resource_attributes) - - def _get_resource_attributes(self, resource_def, resource_id, metric_name, - samples, for_update=False): - # FIXME(sileht): Should I merge attibutes of all samples ? - # Or keep only the last one is sufficient ? - attributes = resource_def.attributes(samples[-1]) - if not for_update: - attributes["id"] = resource_id - attributes["user_id"] = samples[-1]['user_id'] - attributes["project_id"] = samples[-1]['project_id'] - attributes["metrics"] = resource_def.metrics() - return attributes - - def _post_measure(self, resource_type, resource_id, metric_name, - measure_attributes): - r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric/%s/measures" - % (self.gnocchi_url, resource_type, - resource_id, metric_name), - headers=self._get_headers(), - data=json.dumps(measure_attributes)) - if r.status_code == 404: - LOG.debug(_("The metric %(metric_name)s of " - "resource %(resource_id)s doesn't exists: " - "%(status_code)d"), - {'metric_name': metric_name, - 'resource_id': resource_id, - 'status_code': r.status_code}) - raise NoSuchMetric - elif int(r.status_code / 100) != 2: - raise UnexpectedWorkflowError( - _("Fail to post measure on metric %(metric_name)s of " - "resource %(resource_id)s with status: " - "%(status_code)d: %(msg)s") % - {'metric_name': metric_name, - 'resource_id': resource_id, - 'status_code': r.status_code, - 'msg': r.text}) - else: - LOG.debug("Measure posted on metric %s of resource %s", - metric_name, resource_id) - - def _create_resource(self, resource_type, resource_id, - resource_attributes): - r = self.gnocchi_api.post("%s/v1/resource/%s" - % (self.gnocchi_url, resource_type), - headers=self._get_headers(), - data=json.dumps(resource_attributes)) - if r.status_code == 409: - LOG.debug("Resource %s already exists", resource_id) - raise ResourceAlreadyExists - - elif int(r.status_code / 100) != 2: - raise UnexpectedWorkflowError( - _("Resource %(resource_id)s creation failed with " - "status: %(status_code)d: %(msg)s") % - {'resource_id': resource_id, - 'status_code': r.status_code, - 'msg': r.text}) - else: - LOG.debug("Resource %s created", resource_id) - - def _update_resource(self, resource_type, resource_id, - resource_attributes): - r = self.gnocchi_api.patch( - "%s/v1/resource/%s/%s" - % (self.gnocchi_url, resource_type, resource_id), - headers=self._get_headers(), - data=json.dumps(resource_attributes)) - - if int(r.status_code / 100) != 2: - raise UnexpectedWorkflowError( - _("Resource %(resource_id)s update failed with " - "status: %(status_code)d: %(msg)s") % - {'resource_id': resource_id, - 'status_code': r.status_code, - 'msg': r.text}) - else: - LOG.debug("Resource %s updated", resource_id) - - def _create_metric(self, resource_type, resource_id, metric_name, - archive_policy): - params = {metric_name: archive_policy} - r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric" - % (self.gnocchi_url, resource_type, - resource_id), - headers=self._get_headers(), - data=json.dumps(params)) - if r.status_code == 409: - LOG.debug("Metric %s of resource %s already exists", - metric_name, resource_id) - raise MetricAlreadyExists - - elif int(r.status_code / 100) != 2: - raise UnexpectedWorkflowError( - _("Fail to create metric %(metric_name)s of " - "resource %(resource_id)s with status: " - "%(status_code)d: %(msg)s") % - {'metric_name': metric_name, - 'resource_id': resource_id, - 'status_code': r.status_code, - 'msg': r.text}) - else: - LOG.debug("Metric %s of resource %s created", - metric_name, resource_id) + self._gnocchi.post_measure(resource_type, resource_id, + metric_name, measures) + except gnocchi_client.NoSuchMetric: + LOG.error(_LE("Fail to post measures for " + "%(resource_id)s/%(metric_name)s") % + dict(resource_id=resource_id, + metric_name=metric_name)) + + if resource_extra: + self._gnocchi.update_resource(resource_type, resource_id, + resource_extra) + + def _ensure_resource_and_metric(self, resource_type, resource, + metric_name): + try: + self._gnocchi.create_resource(resource_type, resource) + except gnocchi_client.ResourceAlreadyExists: + try: + archive_policy = resource['metrics'][metric_name] + self._gnocchi.create_metric(resource_type, resource['id'], + metric_name, archive_policy) + except gnocchi_client.MetricAlreadyExists: + # NOTE(sileht): Just ignore the metric have been + # created in the meantime. + pass @staticmethod def record_events(events): diff -Nru ceilometer-5.0.0~b2/ceilometer/dispatcher/http.py ceilometer-5.0.0~b3/ceilometer/dispatcher/http.py --- ceilometer-5.0.0~b2/ceilometer/dispatcher/http.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/dispatcher/http.py 2015-09-03 13:05:55.000000000 +0000 @@ -86,13 +86,13 @@ data = [data] for meter in data: - LOG.debug(_( + LOG.debug( 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s') - % ({'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']})) + 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', + {'counter_name': meter['counter_name'], + 'resource_id': meter['resource_id'], + 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), + 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: @@ -111,8 +111,8 @@ data=json.dumps(data), headers=self.headers, timeout=self.timeout) - LOG.debug(_('Message posting finished with status code ' - '%d.') % res.status_code) + LOG.debug('Message posting finished with status code ' + '%d.', res.status_code) except Exception as err: LOG.exception(_('Failed to record metering data: %s'), err) diff -Nru ceilometer-5.0.0~b2/ceilometer/dispatcher/__init__.py ceilometer-5.0.0~b3/ceilometer/dispatcher/__init__.py --- ceilometer-5.0.0~b2/ceilometer/dispatcher/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/dispatcher/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -38,8 +38,7 @@ def load_dispatcher_manager(): - LOG.debug(_('loading dispatchers from %s'), - DISPATCHER_NAMESPACE) + LOG.debug('loading dispatchers from %s', DISPATCHER_NAMESPACE) # set propagate_map_exceptions to True to enable stevedore # to propagate exceptions. dispatcher_manager = named.NamedExtensionManager( diff -Nru ceilometer-5.0.0~b2/ceilometer/energy/kwapi.py ceilometer-5.0.0~b3/ceilometer/energy/kwapi.py --- ceilometer-5.0.0~b2/ceilometer/energy/kwapi.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/energy/kwapi.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,7 +21,6 @@ import six from ceilometer.agent import plugin_base -from ceilometer.i18n import _ from ceilometer import sample @@ -85,7 +84,7 @@ try: client = self.get_kwapi_client(ksclient, endpoint) except exceptions.EndpointNotFound: - LOG.debug(_("Kwapi endpoint not found")) + LOG.debug("Kwapi endpoint not found") return [] return list(client.iter_probes()) diff -Nru ceilometer-5.0.0~b2/ceilometer/event/converter.py ceilometer-5.0.0~b3/ceilometer/event/converter.py --- ceilometer-5.0.0~b2/ceilometer/event/converter.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/event/converter.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,7 +16,7 @@ import fnmatch import os -import jsonpath_rw +from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils @@ -59,6 +59,8 @@ class TraitDefinition(object): + JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() + def __init__(self, name, trait_cfg, plugin_manager): self.cfg = trait_cfg self.name = name @@ -106,7 +108,7 @@ else: fields = '|'.join('(%s)' % path for path in fields) try: - self.fields = jsonpath_rw.parse(fields) + self.fields = self.JSONPATH_RW_PARSER.parse(fields) except Exception as e: raise EventDefinitionException( _("Parse error in JSONPath specification " @@ -369,7 +371,7 @@ """Setup the event definitions from yaml config file.""" config_file = get_config_file() if config_file is not None: - LOG.debug(_("Event Definitions configuration file: %s"), config_file) + LOG.debug("Event Definitions configuration file: %s", config_file) with open(config_file) as cf: config = cf.read() @@ -392,8 +394,8 @@ raise else: - LOG.debug(_("No Event Definitions configuration file found!" - " Using default config.")) + LOG.debug("No Event Definitions configuration file found!" + " Using default config.") events_config = [] LOG.info(_("Event Definitions: %s"), events_config) diff -Nru ceilometer-5.0.0~b2/ceilometer/event/endpoint.py ceilometer-5.0.0~b3/ceilometer/event/endpoint.py --- ceilometer-5.0.0~b2/ceilometer/event/endpoint.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/event/endpoint.py 2015-09-03 13:05:55.000000000 +0000 @@ -1,4 +1,3 @@ -# # Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -21,7 +20,6 @@ from stevedore import extension from ceilometer.event import converter as event_converter -from ceilometer.i18n import _ from ceilometer import messaging LOG = logging.getLogger(__name__) @@ -30,7 +28,7 @@ class EventsNotificationEndpoint(object): def __init__(self, manager): super(EventsNotificationEndpoint, self).__init__() - LOG.debug(_('Loading event definitions')) + LOG.debug('Loading event definitions') self.ctxt = context.get_admin_context() self.event_converter = event_converter.setup_events( extension.ExtensionManager( @@ -52,7 +50,7 @@ # source of the notification. This will have to get added back later. notification = messaging.convert_to_old_notification_format( 'info', ctxt, publisher_id, event_type, payload, metadata) - self.process_notification(notification) + return self.process_notification(notification) def error(self, ctxt, publisher_id, event_type, payload, metadata): """Convert error message to Ceilometer Event. @@ -69,7 +67,7 @@ # source of the notification. This will have to get added back later. notification = messaging.convert_to_old_notification_format( 'error', ctxt, publisher_id, event_type, payload, metadata) - self.process_notification(notification) + return self.process_notification(notification) def process_notification(self, notification): try: diff -Nru ceilometer-5.0.0~b2/ceilometer/event/storage/impl_hbase.py ceilometer-5.0.0~b3/ceilometer/event/storage/impl_hbase.py --- ceilometer-5.0.0~b2/ceilometer/event/storage/impl_hbase.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/event/storage/impl_hbase.py 2015-09-03 13:05:55.000000000 +0000 @@ -17,7 +17,7 @@ from ceilometer.event.storage import base from ceilometer.event.storage import models -from ceilometer.i18n import _, _LE +from ceilometer.i18n import _LE from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import utils as hbase_utils from ceilometer import utils @@ -77,17 +77,17 @@ hbase_utils.create_tables(conn, tables, column_families) def clear(self): - LOG.debug(_('Dropping HBase schema...')) + LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.EVENT_TABLE]: try: conn.disable_table(table) except Exception: - LOG.debug(_('Cannot disable table but ignoring error')) + LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: - LOG.debug(_('Cannot delete table but ignoring error')) + LOG.debug('Cannot delete table but ignoring error') def record_events(self, event_models): """Write the events to Hbase. diff -Nru ceilometer-5.0.0~b2/ceilometer/event/storage/impl_sqlalchemy.py ceilometer-5.0.0~b3/ceilometer/event/storage/impl_sqlalchemy.py --- ceilometer-5.0.0~b2/ceilometer/event/storage/impl_sqlalchemy.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/event/storage/impl_sqlalchemy.py 2015-09-03 13:05:55.000000000 +0000 @@ -131,6 +131,10 @@ options = dict(cfg.CONF.database.items()) options['max_retries'] = 0 self._engine_facade = db_session.EngineFacade(url, **options) + if self._engine_facade.get_engine().name == 'sqlite': + self.isolation_level = 'SERIALIZABLE' + else: + self.isolation_level = 'REPEATABLE READ' def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed @@ -212,6 +216,9 @@ return session = self._engine_facade.get_session() with session.begin(): + session.connection( + execution_options={'isolation_level': self.isolation_level}) + # Build up the join conditions event_join_conditions = [models.EventType.id == models.Event.event_type_id] @@ -282,7 +289,7 @@ sa.cast(sa.null(), sa.String(255))) .filter(sa.exists().where( models.TraitDatetime.event_id == query.subquery().c.id)) - ).union( + ).union_all( session.query( models.TraitInt.event_id, models.TraitInt.key, sa.null(), @@ -317,8 +324,13 @@ dtype = api_models.Trait.TEXT_TYPE val = t_text - trait_model = api_models.Trait(key, dtype, val) - event_list[id_].append_trait(trait_model) + try: + trait_model = api_models.Trait(key, dtype, val) + event_list[id_].append_trait(trait_model) + except KeyError: + LOG.warning('Trait key: %(key)s, val: %(val)s, for event: ' + '%(event)s not valid.' % + {'key': key, 'val': val, 'event': id_}) return event_list.values() diff -Nru ceilometer-5.0.0~b2/ceilometer/exchange_control.py ceilometer-5.0.0~b3/ceilometer/exchange_control.py --- ceilometer-5.0.0~b2/ceilometer/exchange_control.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/exchange_control.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,47 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +EXCHANGE_OPTS = [ + cfg.StrOpt('heat_control_exchange', + default='heat', + help="Exchange name for Heat notifications"), + cfg.StrOpt('glance_control_exchange', + default='glance', + help="Exchange name for Glance notifications."), + cfg.StrOpt('magnetodb_control_exchange', + default='magnetodb', + help="Exchange name for Magnetodb notifications."), + cfg.StrOpt('keystone_control_exchange', + default='keystone', + help="Exchange name for Keystone notifications."), + cfg.StrOpt('cinder_control_exchange', + default='cinder', + help="Exchange name for Cinder notifications."), + cfg.StrOpt('sahara_control_exchange', + default='sahara', + help="Exchange name for Data Processing notifications."), + cfg.StrOpt('swift_control_exchange', + default='swift', + help="Exchange name for Swift notifications."), + cfg.StrOpt('magnum_control_exchange', + default='magnum', + help="Exchange name for Magnum notifications."), + cfg.StrOpt('trove_control_exchange', + default='trove', + help="Exchange name for DBaaS notifications."), + cfg.StrOpt('zaqar_control_exchange', + default='zaqar', + help="Exchange name for Messaging service notifications."), +] diff -Nru ceilometer-5.0.0~b2/ceilometer/hardware/pollsters/disk.py ceilometer-5.0.0~b3/ceilometer/hardware/pollsters/disk.py --- ceilometer-5.0.0~b2/ceilometer/hardware/pollsters/disk.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/hardware/pollsters/disk.py 2015-09-03 13:05:55.000000000 +0000 @@ -35,7 +35,7 @@ return util.make_sample_from_host(host, name=self.IDENTIFIER, sample_type=sample.TYPE_GAUGE, - unit='B', + unit='KB', volume=value, res_metadata=metadata, extra=extra, diff -Nru ceilometer-5.0.0~b2/ceilometer/hardware/pollsters/memory.py ceilometer-5.0.0~b3/ceilometer/hardware/pollsters/memory.py --- ceilometer-5.0.0~b2/ceilometer/hardware/pollsters/memory.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/hardware/pollsters/memory.py 2015-09-03 13:05:55.000000000 +0000 @@ -32,7 +32,7 @@ return util.make_sample_from_host(host, name=self.IDENTIFIER, sample_type=sample.TYPE_GAUGE, - unit='B', + unit='KB', volume=value, res_metadata=metadata, extra=extra) diff -Nru ceilometer-5.0.0~b2/ceilometer/identity/notifications.py ceilometer-5.0.0~b3/ceilometer/identity/notifications.py --- ceilometer-5.0.0~b2/ceilometer/identity/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/identity/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -# Copyright 2014 Mirantis Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('keystone_control_exchange', - default='keystone', - help="Exchange name for Keystone notifications."), -] - - -cfg.CONF.register_opts(OPTS) - -SERVICE = 'identity' - - -class _Base(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - """Convert identity notification into Samples.""" - - resource_type = None - resource_name = None - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.keystone_control_exchange) - for topic in conf.notification_topics] - - -class IdentityCRUD(_Base): - def process_notification(self, message): - user_id = message['payload'].get("initiator", {}).get("id") - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - unit=self.resource_type, - volume=1, - resource_id=message['payload']['resource_info'], - user_id=user_id, - project_id=None, - message=message) - - -class User(IdentityCRUD): - - resource_type = 'user' - resource_name = '%s.%s' % (SERVICE, resource_type) - - @property - def event_types(self): - return ['%s.*' % self.resource_name] - - -class Group(IdentityCRUD): - - resource_type = 'group' - resource_name = '%s.%s' % (SERVICE, resource_type) - - @property - def event_types(self): - return ['%s.*' % self.resource_name] - - -class Project(IdentityCRUD): - - resource_type = 'project' - resource_name = '%s.%s' % (SERVICE, resource_type) - - @property - def event_types(self): - return ['%s.*' % self.resource_name] - - -class Role(IdentityCRUD): - - resource_type = 'role' - resource_name = '%s.%s' % (SERVICE, resource_type) - - @property - def event_types(self): - return ['%s\..*' % self.resource_name] - - -class Trust(IdentityCRUD): - - resource_type = 'OS-TRUST:trust' - resource_name = '%s.%s' % (SERVICE, resource_type) - - @property - def event_types(self): - return [ - '%s.created' % self.resource_name, - '%s.deleted' % self.resource_name, - ] - - -class Authenticate(_Base): - """Convert identity authentication notifications into Samples.""" - - resource_type = 'authenticate' - event_name = '%s.%s' % (SERVICE, resource_type) - - def process_notification(self, message): - outcome = message['payload']['outcome'] - meter_name = '%s.%s.%s' % (SERVICE, self.resource_type, outcome) - - yield sample.Sample.from_notification( - name=meter_name, - type=sample.TYPE_DELTA, - unit='user', - volume=1, - resource_id=message['payload']['initiator']['id'], - user_id=message['payload']['initiator']['id'], - project_id=None, - message=message) - - @property - def event_types(self): - return [self.event_name] - - -class RoleAssignment(_Base): - """Convert role assignment notifications into Samples.""" - - resource_type = 'role_assignment' - resource_name = '%s.%s' % (SERVICE, resource_type) - - def process_notification(self, message): - # NOTE(stevemar): action is created.role_assignment - action = message['payload']['action'] - event, resource_type = action.split(".") - - # NOTE(stevemar): meter_name is identity.role_assignment.created - meter_name = '%s.%s.%s' % (SERVICE, resource_type, event) - - yield sample.Sample.from_notification( - name=meter_name, - type=sample.TYPE_DELTA, - unit=self.resource_type, - volume=1, - resource_id=message['payload']['role'], - user_id=message['payload']['initiator']['id'], - project_id=None, - message=message) - - @property - def event_types(self): - return [ - '%s.created' % self.resource_name, - '%s.deleted' % self.resource_name, - ] diff -Nru ceilometer-5.0.0~b2/ceilometer/image/notifications.py ceilometer-5.0.0~b3/ceilometer/image/notifications.py --- ceilometer-5.0.0~b2/ceilometer/image/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/image/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handler for producing image metering messages from glance notification - events. -""" - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('glance_control_exchange', - default='glance', - help="Exchange name for Glance notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class ImageBase(plugin_base.NotificationBase): - """Base class for image counting.""" - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.glance_control_exchange) - for topic in conf.notification_topics] - - -class ImageCRUDBase(ImageBase): - event_types = [ - 'image.update', - 'image.upload', - 'image.delete', - ] - - -class ImageCRUD(ImageCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - unit='image', - volume=1, - resource_id=message['payload']['id'], - user_id=None, - project_id=message['payload']['owner'], - message=message) - - -class Image(ImageCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='image', - type=sample.TYPE_GAUGE, - unit='image', - volume=1, - resource_id=message['payload']['id'], - user_id=None, - project_id=message['payload']['owner'], - message=message) - - -class ImageSize(ImageCRUDBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='image.size', - type=sample.TYPE_GAUGE, - unit='B', - volume=message['payload']['size'], - resource_id=message['payload']['id'], - user_id=None, - project_id=message['payload']['owner'], - message=message) - - -class ImageDownload(ImageBase): - """Emit image_download sample when an image is downloaded.""" - event_types = ['image.send'] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name='image.download', - type=sample.TYPE_DELTA, - unit='B', - volume=message['payload']['bytes_sent'], - resource_id=message['payload']['image_id'], - user_id=message['payload']['receiver_user_id'], - project_id=message['payload']['receiver_tenant_id'], - message=message) - - -class ImageServe(ImageBase): - """Emit image_serve sample when an image is served out.""" - event_types = ['image.send'] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name='image.serve', - type=sample.TYPE_DELTA, - unit='B', - volume=message['payload']['bytes_sent'], - resource_id=message['payload']['image_id'], - user_id=None, - project_id=message['payload']['owner_id'], - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/ipmi/pollsters/node.py ceilometer-5.0.0~b3/ceilometer/ipmi/pollsters/node.py --- ceilometer-5.0.0~b2/ceilometer/ipmi/pollsters/node.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/ipmi/pollsters/node.py 2015-09-03 13:05:55.000000000 +0000 @@ -69,7 +69,7 @@ if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.NAME) - raise plugin_base.PollsterPermanentError(resources[0]) + raise plugin_base.PollsterPermanentError(resources) else: return diff -Nru ceilometer-5.0.0~b2/ceilometer/ipmi/pollsters/sensor.py ceilometer-5.0.0~b3/ceilometer/ipmi/pollsters/sensor.py --- ceilometer-5.0.0~b2/ceilometer/ipmi/pollsters/sensor.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/ipmi/pollsters/sensor.py 2015-09-03 13:05:55.000000000 +0000 @@ -72,7 +72,7 @@ if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) - raise plugin_base.PollsterPermanentError(resources[0]) + raise plugin_base.PollsterPermanentError(resources) else: return diff -Nru ceilometer-5.0.0~b2/ceilometer/key_value_storage/notifications.py ceilometer-5.0.0~b3/ceilometer/key_value_storage/notifications.py --- ceilometer-5.0.0~b2/ceilometer/key_value_storage/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/key_value_storage/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('magnetodb_control_exchange', - default='magnetodb', - help="Exchange name for Magnetodb notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class _Base(plugin_base.NotificationBase): - """Convert magnetodb notification into Samples.""" - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.magnetodb_control_exchange) - for topic in conf.notification_topics] - - -class Table(_Base, plugin_base.NonMetricNotificationBase): - - event_types = [ - 'magnetodb.table.create.end', - 'magnetodb.table.delete.end' - ] - - def process_notification(self, message): - meter_name = '.'.join(message['event_type'].split('.')[:-1]) - yield sample.Sample.from_notification( - name=meter_name, - type=sample.TYPE_GAUGE, - unit='table', - volume=1, - resource_id=message['payload']['table_uuid'], - user_id=message['_context_user'], - project_id=message['_context_tenant'], - message=message) - - -class Index(_Base): - - event_types = [ - 'magnetodb.table.create.end' - ] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name='magnetodb.table.index.count', - type=sample.TYPE_GAUGE, - unit='index', - volume=message['payload']['index_count'], - resource_id=message['payload']['table_uuid'], - user_id=message['_context_user'], - project_id=message['_context_tenant'], - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-critical.pot ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-critical.pot --- ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-critical.pot 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-critical.pot 2015-09-03 13:05:55.000000000 +0000 @@ -1,19 +1,19 @@ # Translations template for ceilometer. -# Copyright (C) 2014 ORGANIZATION +# Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. -# FIRST AUTHOR , 2014. +# FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: ceilometer 2014.2.dev50.ga2139d8\n" +"Project-Id-Version: ceilometer 5.0.0.0b2.dev138\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-19 06:01+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-error.pot ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-error.pot --- ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-error.pot 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-error.pot 2015-09-03 13:05:55.000000000 +0000 @@ -6,16 +6,16 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: ceilometer 5.0.0.0b2.dev94\n" +"Project-Id-Version: ceilometer 5.0.0.0b3.dev38\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-20 06:09+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #: ceilometer/collector.py:168 #, python-format @@ -34,7 +34,7 @@ msgid "Error getting group membership info from coordination backend." msgstr "" -#: ceilometer/service_base.py:65 +#: ceilometer/service_base.py:70 #, python-format msgid "Unable to load changed pipeline: %s" msgstr "" @@ -64,6 +64,26 @@ msgid "Error processing event and it will be dropped: %s" msgstr "" +#: ceilometer/dispatcher/gnocchi.py:149 ceilometer/meter/notifications.py:80 +#, python-format +msgid "Parse error in JSONPath specification '%(jsonpath)s': %(err)s" +msgstr "" + +#: ceilometer/dispatcher/gnocchi.py:161 ceilometer/meter/notifications.py:62 +#, python-format +msgid "Required field %s not specified" +msgstr "" + +#: ceilometer/dispatcher/gnocchi.py:164 +#, python-format +msgid "Required field %(field)s should be a %(type)s" +msgstr "" + +#: ceilometer/dispatcher/gnocchi.py:321 +#, python-format +msgid "Fail to post measures for %(resource_id)s/%(metric_name)s" +msgstr "" + #: ceilometer/dispatcher/http.py:136 msgid "Status Code: %{code}s. Failed to dispatch event: %{event}s" msgstr "" @@ -77,43 +97,33 @@ msgid "Failed to record event: %s" msgstr "" -#: ceilometer/meter/notifications.py:61 -#, python-format -msgid "Required field %s not specified" -msgstr "" - -#: ceilometer/meter/notifications.py:79 -#, python-format -msgid "Parse error in JSONPath specification '%(jsonpath)s': %(err)s" -msgstr "" - -#: ceilometer/meter/notifications.py:90 +#: ceilometer/meter/notifications.py:91 #, python-format msgid "Invalid type %s specified" msgstr "" -#: ceilometer/meter/notifications.py:104 +#: ceilometer/meter/notifications.py:105 #, python-format msgid "Meter Definitions configuration file: %s" msgstr "" -#: ceilometer/meter/notifications.py:114 +#: ceilometer/meter/notifications.py:115 #, python-format msgid "" "Invalid YAML syntax in Meter Definitions file %(file)s at line: %(line)s," " column: %(column)s." msgstr "" -#: ceilometer/meter/notifications.py:120 +#: ceilometer/meter/notifications.py:121 #, python-format msgid "YAML error reading Meter Definitions file %(file)s" msgstr "" -#: ceilometer/meter/notifications.py:127 +#: ceilometer/meter/notifications.py:128 msgid "No Meter Definitions configuration file found! Using default config." msgstr "" -#: ceilometer/meter/notifications.py:131 +#: ceilometer/meter/notifications.py:132 #, python-format msgid "Meter Definitions: %s" msgstr "" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-info.pot ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-info.pot --- ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-info.pot 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-info.pot 2015-09-03 13:05:55.000000000 +0000 @@ -6,16 +6,16 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: ceilometer 5.0.0.0b2.dev94\n" +"Project-Id-Version: ceilometer 5.0.0.0b2.dev138\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-20 06:09+0000\n" +"POT-Creation-Date: 2015-07-29 06:34+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #: ceilometer/coordination.py:76 msgid "Coordination backend started successfully." @@ -43,7 +43,7 @@ msgid "Detected change in pipeline configuration." msgstr "" -#: ceilometer/agent/base.py:378 +#: ceilometer/agent/base.py:394 msgid "Reconfiguring polling tasks." msgstr "" @@ -86,7 +86,7 @@ msgid "Dropping event data with TTL %d" msgstr "" -#: ceilometer/event/storage/impl_sqlalchemy.py:411 +#: ceilometer/event/storage/impl_sqlalchemy.py:415 #, python-format msgid "%d events are removed from database" msgstr "" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-warning.pot ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-warning.pot --- ceilometer-5.0.0~b2/ceilometer/locale/ceilometer-log-warning.pot 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/ceilometer-log-warning.pot 2015-09-03 13:05:55.000000000 +0000 @@ -6,18 +6,18 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: ceilometer 5.0.0.0b2.dev56\n" +"Project-Id-Version: ceilometer 5.0.0.0b2.dev138\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-10 06:11+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" -#: ceilometer/notification.py:157 +#: ceilometer/notification.py:173 msgid "" "Non-metric meters may be collected. It is highly advisable to disable " "these meters using ceilometer.conf or the pipeline.yaml" @@ -39,6 +39,26 @@ "with False" msgstr "" +#: ceilometer/api/controllers/v2/root.py:122 +msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" +msgstr "" + +#: ceilometer/api/controllers/v2/root.py:126 +msgid "" +"ceilometer-api started with gnocchi enabled. The resources/meters/samples" +" URLs are disabled." +msgstr "" + +#: ceilometer/api/controllers/v2/root.py:147 +msgid "Can't connect to keystone, assuming aodh is disabled and retry later." +msgstr "" + +#: ceilometer/api/controllers/v2/root.py:150 +msgid "" +"ceilometer-api started with aodh enabled. Alarms URLs will be redirected " +"to aodh endpoint." +msgstr "" + #: ceilometer/cmd/eventlet/polling.py:51 #, python-format msgid "Duplicated values: %s found in CLI options, auto de-duplidated" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/ceilometer.pot ceilometer-5.0.0~b3/ceilometer/locale/ceilometer.pot --- ceilometer-5.0.0~b2/ceilometer/locale/ceilometer.pot 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/ceilometer.pot 2015-09-03 13:05:55.000000000 +0000 @@ -6,16 +6,16 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: ceilometer 5.0.0.0b2.dev116\n" +"Project-Id-Version: ceilometer 5.0.0.0b3.dev38\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #: ceilometer/collector.py:125 #, python-format @@ -31,75 +31,75 @@ msgid "UDP: Unable to store meter" msgstr "" -#: ceilometer/notification.py:185 +#: ceilometer/notification.py:188 #, python-format msgid "Failed to load any notification handlers for %s" msgstr "" -#: ceilometer/notification.py:201 +#: ceilometer/notification.py:204 #, python-format msgid "Event types from %(name)s: %(type)s (ack_on_error=%(error)s)" msgstr "" -#: ceilometer/notification.py:235 +#: ceilometer/notification.py:240 #, python-format msgid "Pipeline endpoint: %s" msgstr "" -#: ceilometer/pipeline.py:381 +#: ceilometer/pipeline.py:397 #, python-format msgid "Unable to load publisher %s" msgstr "" -#: ceilometer/pipeline.py:399 +#: ceilometer/pipeline.py:415 #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter" " %(param)s" msgstr "" -#: ceilometer/pipeline.py:418 ceilometer/pipeline.py:482 +#: ceilometer/pipeline.py:434 ceilometer/pipeline.py:498 #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" -#: ceilometer/pipeline.py:437 +#: ceilometer/pipeline.py:453 #, python-format msgid "Pipeline %(pipeline)s: Sample dropped by transformer %(trans)s" msgstr "" -#: ceilometer/pipeline.py:445 +#: ceilometer/pipeline.py:461 #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" -#: ceilometer/pipeline.py:468 +#: ceilometer/pipeline.py:484 #, python-format msgid "Pipeline %(pipeline)s: Transform sample %(smp)s from %(trans)s transformer" msgstr "" -#: ceilometer/pipeline.py:498 +#: ceilometer/pipeline.py:514 #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" -#: ceilometer/pipeline.py:663 ceilometer/pipeline.py:725 +#: ceilometer/pipeline.py:679 ceilometer/pipeline.py:741 msgid "detected decoupled pipeline config format" msgstr "" -#: ceilometer/pipeline.py:743 +#: ceilometer/pipeline.py:759 #, python-format msgid "Pipeline config file: %s" msgstr "" -#: ceilometer/pipeline.py:749 ceilometer/pipeline.py:768 +#: ceilometer/pipeline.py:765 ceilometer/pipeline.py:784 #, python-format msgid "Pipeline config: %s" msgstr "" -#: ceilometer/pipeline.py:762 +#: ceilometer/pipeline.py:778 #, python-format msgid "Polling config file: %s" msgstr "" @@ -119,54 +119,54 @@ msgid "Active pipeline config's hash is %s" msgstr "" -#: ceilometer/agent/base.py:126 +#: ceilometer/agent/base.py:137 #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "" -#: ceilometer/agent/base.py:154 +#: ceilometer/agent/base.py:165 #, python-format msgid "" "Found following duplicated resoures for %(name)s in context of " "%(source)s:%(list)s. Check pipeline configuration." msgstr "" -#: ceilometer/agent/base.py:165 +#: ceilometer/agent/base.py:176 #, python-format msgid "Skip polling pollster %s, no resources found" msgstr "" -#: ceilometer/agent/base.py:188 +#: ceilometer/agent/base.py:201 #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" -#: ceilometer/agent/base.py:194 +#: ceilometer/agent/base.py:207 #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "" -#: ceilometer/agent/base.py:253 +#: ceilometer/agent/base.py:277 #, python-format msgid "Skip loading extension for %s" msgstr "" -#: ceilometer/agent/base.py:257 +#: ceilometer/agent/base.py:281 #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "" -#: ceilometer/agent/base.py:381 +#: ceilometer/agent/base.py:405 #, python-format msgid "Unable to discover resources: %s" msgstr "" -#: ceilometer/agent/base.py:383 +#: ceilometer/agent/base.py:407 #, python-format msgid "Unknown discovery extension: %s" msgstr "" -#: ceilometer/agent/base.py:392 +#: ceilometer/agent/base.py:416 msgid "Error stopping pollster." msgstr "" @@ -751,7 +751,7 @@ msgid "checking net info for instance %s" msgstr "" -#: ceilometer/compute/virt/inspector.py:291 +#: ceilometer/compute/virt/inspector.py:302 #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "" @@ -864,39 +864,45 @@ msgid "The Yaml file that defines per metric archive policies." msgstr "" -#: ceilometer/dispatcher/gnocchi.py:141 +#: ceilometer/dispatcher/gnocchi.py:60 +msgid "" +"The Yaml file that defines mapping between samples and gnocchi " +"resources/metrics" +msgstr "" + +#: ceilometer/dispatcher/gnocchi.py:80 msgid "No archive policy file found! Using default config." msgstr "" -#: ceilometer/dispatcher/gnocchi.py:311 +#: ceilometer/dispatcher/gnocchi_client.py:112 #, python-format msgid "" "The metric %(metric_name)s of resource %(resource_id)s doesn't exists: " "%(status_code)d" msgstr "" -#: ceilometer/dispatcher/gnocchi.py:320 +#: ceilometer/dispatcher/gnocchi_client.py:121 #, python-format msgid "" "Fail to post measure on metric %(metric_name)s of resource " "%(resource_id)s with status: %(status_code)d: %(msg)s" msgstr "" -#: ceilometer/dispatcher/gnocchi.py:343 +#: ceilometer/dispatcher/gnocchi_client.py:145 #, python-format msgid "" "Resource %(resource_id)s creation failed with status: %(status_code)d: " "%(msg)s" msgstr "" -#: ceilometer/dispatcher/gnocchi.py:361 +#: ceilometer/dispatcher/gnocchi_client.py:164 #, python-format msgid "" "Resource %(resource_id)s update failed with status: %(status_code)d: " "%(msg)s" msgstr "" -#: ceilometer/dispatcher/gnocchi.py:383 +#: ceilometer/dispatcher/gnocchi_client.py:188 #, python-format msgid "" "Fail to create metric %(metric_name)s of resource %(resource_id)s with " @@ -995,7 +1001,7 @@ msgid "Loading event definitions" msgstr "" -#: ceilometer/hardware/discovery.py:74 +#: ceilometer/hardware/discovery.py:89 #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "" @@ -1178,7 +1184,7 @@ msgid "Dropping metering data with TTL %d" msgstr "" -#: ceilometer/storage/impl_mongodb.py:557 +#: ceilometer/storage/impl_mongodb.py:315 msgid "" "Clearing expired metering data is based on native MongoDB time to live " "feature and going in background." @@ -1216,30 +1222,35 @@ msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" -#: ceilometer/storage/mongo/utils.py:248 +#: ceilometer/storage/mongo/utils.py:261 #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "" -#: ceilometer/storage/mongo/utils.py:263 +#: ceilometer/storage/mongo/utils.py:276 #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "" -#: ceilometer/storage/mongo/utils.py:402 +#: ceilometer/storage/mongo/utils.py:415 #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. " "Giving up." msgstr "" -#: ceilometer/storage/mongo/utils.py:406 +#: ceilometer/storage/mongo/utils.py:419 #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" +#: ceilometer/storage/mongo/utils.py:466 +#, python-format +msgid "Index %s will be recreate." +msgstr "" + #: ceilometer/transformer/arithmetic.py:55 #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-error.po ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-error.po --- ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-error.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-error.po 2015-09-03 13:05:55.000000000 +0000 @@ -8,16 +8,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-07-16 19:11+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-07-31 22:06+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"Language-Team: English (United Kingdom) (http://www.transifex.com/openstack/" "ceilometer/language/en_GB/)\n" "Language: en_GB\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" msgid "Error connecting to coordination backend." diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-warning.po ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-warning.po --- ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-warning.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer-log-warning.po 2015-09-03 13:05:55.000000000 +0000 @@ -8,16 +8,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-06-23 16:41+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-07-24 13:32+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"Language-Team: English (United Kingdom) (http://www.transifex.com/openstack/" "ceilometer/language/en_GB/)\n" "Language: en_GB\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po --- ceilometer-5.0.0~b2/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po 2015-09-03 13:05:55.000000000 +0000 @@ -8,16 +8,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-07-22 19:43+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-08-04 13:56+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"Language-Team: English (United Kingdom) (http://www.transifex.com/openstack/" "ceilometer/language/en_GB/)\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #, python-format msgid "%(entity)s %(id)s Not Found" diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-info.po ceilometer-5.0.0~b3/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-info.po --- ceilometer-5.0.0~b2/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-info.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-info.po 2015-09-03 13:05:55.000000000 +0000 @@ -10,16 +10,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" "PO-Revision-Date: 2015-07-13 14:33+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: French (http://www.transifex.com/projects/p/ceilometer/" +"Language-Team: French (http://www.transifex.com/openstack/ceilometer/" "language/fr/)\n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-warning.po ceilometer-5.0.0~b3/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-warning.po --- ceilometer-5.0.0~b2/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-warning.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/fr/LC_MESSAGES/ceilometer-log-warning.po 2015-09-03 13:05:55.000000000 +0000 @@ -9,16 +9,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-06-23 16:41+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-07-24 13:32+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: French (http://www.transifex.com/projects/p/ceilometer/" +"Language-Team: French (http://www.transifex.com/openstack/ceilometer/" "language/fr/)\n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po --- ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po 2015-09-03 13:05:55.000000000 +0000 @@ -9,16 +9,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-07-16 19:11+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-07-31 22:06+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/" +"Language-Team: Chinese (China) (http://www.transifex.com/openstack/" "ceilometer/language/zh_CN/)\n" "Language: zh_CN\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=1; plural=0;\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po --- ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po 2015-09-03 13:05:55.000000000 +0000 @@ -9,16 +9,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" "PO-Revision-Date: 2015-07-13 14:33+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/" +"Language-Team: Chinese (China) (http://www.transifex.com/openstack/" "ceilometer/language/zh_CN/)\n" "Language: zh_CN\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=1; plural=0;\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po --- ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po 2015-09-03 13:05:55.000000000 +0000 @@ -8,16 +8,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-06-23 16:41+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-07-24 13:32+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/" +"Language-Team: Chinese (China) (http://www.transifex.com/openstack/" "ceilometer/language/zh_CN/)\n" "Language: zh_CN\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" "Plural-Forms: nplurals=1; plural=0;\n" #, python-format diff -Nru ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po --- ceilometer-5.0.0~b2/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po 2015-09-03 13:05:55.000000000 +0000 @@ -12,16 +12,16 @@ msgstr "" "Project-Id-Version: Ceilometer\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-24 06:28+0000\n" -"PO-Revision-Date: 2015-07-22 19:43+0000\n" +"POT-Creation-Date: 2015-08-06 06:33+0000\n" +"PO-Revision-Date: 2015-08-04 13:56+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/" +"Language-Team: Chinese (China) (http://www.transifex.com/openstack/" "ceilometer/language/zh_CN/)\n" "Plural-Forms: nplurals=1; plural=0\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #, python-format msgid "%(entity)s %(id)s Not Found" diff -Nru ceilometer-5.0.0~b2/ceilometer/meter/data/meters.yaml ceilometer-5.0.0~b3/ceilometer/meter/data/meters.yaml --- ceilometer-5.0.0~b2/ceilometer/meter/data/meters.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/meter/data/meters.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,721 @@ +--- + +metric: + # Image + - name: "image.size" + event_type: + - "image.upload" + - "image.delete" + - "image.update" + type: "gauge" + unit: B + volume: $.payload.size + resource_id: $.payload.id + project_id: $.payload.owner + + - name: "image.download" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + user_id: $.payload.receiver_user_id + project_id: $.payload.receiver_tenant_id + + - name: "image.serve" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + project_id: $.payload.owner_id + + # MagnetoDB + - name: 'magnetodb.table.index.count' + type: 'gauge' + unit: 'index' + event_type: 'magnetodb.table.create.end' + volume: $.payload.index_count + resource_id: $.payload.table_uuid + user_id: $._context_user + + - name: 'volume.size' + event_type: + - 'volume.exists' + - 'volume.create.*' + - 'volume.delete.*' + - 'volume.resize.*' + - 'volume.attach.*' + - 'volume.detach.*' + - 'volume.update.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.volume_id + + - name: 'snapshot.size' + event_type: + - 'snapshot.exists' + - 'snapshot.create.*' + - 'snapshot.delete.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.volume_size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.snapshot_id + + # Magnum + - name: $.payload.metrics.[*].name + event_type: 'magnum.bay.metrics.*' + type: 'gauge' + unit: $.payload.metrics.[*].unit + volume: $.payload.metrics.[*].value + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: $.payload.resource_id + lookup: ['name', 'unit', 'volume'] + + # Swift + - name: $.payload.measurements.[*].metric.[*].name + event_type: 'objectstore.http.request' + type: 'delta' + unit: $.payload.measurements.[*].metric.[*].unit + volume: $.payload.measurements.[*].result + resource_id: $.payload.target.id + user_id: $.payload.initiator.id + project_id: $.payload.initiator.project_id + lookup: ['name', 'unit', 'volume'] + + - name: 'memory' + event_type: 'compute.instance.*' + type: 'gauge' + unit: 'MB' + volume: $.payload.memory_mb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + + - name: 'vcpus' + event_type: 'compute.instance.*' + type: 'gauge' + unit: 'vcpu' + volume: $.payload.vcpus + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + + - name: 'disk.root.size' + event_type: 'compute.instance.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.root_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + + - name: 'disk.ephemeral.size' + event_type: 'compute.instance.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.ephemeral_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + + - name: 'bandwidth' + event_type: 'l3.meter' + type: 'delta' + unit: 'B' + volume: $.payload.bytes + project_id: $.payload.tenant_id + resource_id: $.payload.label_id + + - name: 'compute.node.cpu.frequency' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'MHz' + volume: $.payload.metrics[?(@.name='cpu.frequency')].value + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.user.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.user.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.kernel.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.idle.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.idle.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.iowait.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.kernel.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.idle.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.user.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.iowait.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + + - name: 'compute.node.cpu.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + + +# NOTE: non-metric meters are generally events/existence meters +# These are expected to be DEPRECATED in future releases +# + # Image + - name: "image" + event_type: + - "image.upload" + - "image.delete" + - "image.update" + type: "gauge" + unit: 'image' + volume: 1 + resource_id: $.payload.id + project_id: $.payload.owner + + # Orchestration + - name: 'stack.create' + event_type: + - 'orchestration.stack.create.end' + type: 'delta' + unit: 'stack' + volume: 1 + user_id: _context_trustor_user_id + project_id: $.payload.tenant_id + resource_id: $.payload.stack_identity + + - name: 'stack.update' + event_type: + - 'orchestration.stack.update.end' + type: 'delta' + unit: 'stack' + volume: 1 + user_id: _context_trustor_user_id + project_id: $.payload.tenant_id + resource_id: $.payload.stack_identity + + - name: 'stack.delete' + event_type: + - 'orchestration.stack.delete.end' + type: 'delta' + unit: 'stack' + volume: 1 + user_id: _context_trustor_user_id + project_id: $.payload.tenant_id + resource_id: $.payload.stack_identity + + - name: 'stack.resume' + event_type: + - 'orchestration.stack.resume.end' + type: 'delta' + unit: 'stack' + volume: 1 + user_id: _context_trustor_user_id + project_id: $.payload.tenant_id + resource_id: $.payload.stack_identity + + - name: 'stack.suspend' + event_type: + - 'orchestration.stack.suspend.end' + type: 'delta' + unit: 'stack' + volume: 1 + user_id: _context_trustor_user_id + project_id: $.payload.tenant_id + resource_id: $.payload.stack_identity + + # MagnetoDB + - name: 'magnetodb.table.create' + type: 'gauge' + unit: 'table' + volume: 1 + event_type: 'magnetodb.table.create.end' + resource_id: $.payload.table_uuid + user_id: _context_user + project_id: _context_tenant + + - name: 'magnetodb.table.delete' + type: 'gauge' + unit: 'table' + volume: 1 + event_type: 'magnetodb.table.delete.end' + resource_id: $.payload.table_uuid + user_id: _context_user + project_id: _context_tenant + + # Volume + - name: 'volume' + type: 'gauge' + unit: 'volume' + volume: 1 + event_type: + - 'volume.exists' + - 'volume.create.*' + - 'volume.delete.*' + - 'volume.resize.*' + - 'volume.attach.*' + - 'volume.detach.*' + - 'volume.update.*' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.exists' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.exists' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.create.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.create.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.create.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.create.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.delete.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.delete.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.delete.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.delete.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.update.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.update.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.update.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.update.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.resize.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.resize.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.resize.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.resize.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + + - name: 'volume.attach.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.attach.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.attach.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.attach.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.detach.end' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.detach.end' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'volume.detach.start' + type: 'delta' + unit: 'volume' + volume: 1 + event_type: + - 'volume.detach.start' + resource_id: $.payload.volume_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + # Volume Snapshot + - name: 'snapshot' + type: 'gauge' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.exists' + - 'snapshot.create.*' + - 'snapshot.delete.*' + + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'snapshot.exists' + type: 'delta' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.exists' + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'snapshot.create.start' + type: 'delta' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.create.start' + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'snapshot.create.end' + type: 'delta' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.create.end' + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'snapshot.delete.start' + type: 'delta' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.delete.start' + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + - name: 'snapshot.delete.end' + type: 'delta' + unit: 'snapshot' + volume: 1 + event_type: + - 'snapshot.delete.end' + resource_id: $.payload.snapshot_id + user_id: $.payload.user_id + project_id: $.payload.tenant_id + + # Sahara + - name: 'cluster.create' + type: 'delta' + unit: 'cluster' + volume: 1 + event_type: + - 'sahara.cluster.create' + resource_id: $.payload.cluster_id + project_id: $.payload.project_id + + - name: 'cluster.update' + type: 'delta' + unit: 'cluster' + volume: 1 + event_type: + - 'sahara.cluster.update' + resource_id: $.payload.cluster_id + project_id: $.payload.project_id + + - name: 'cluster.delete' + type: 'delta' + unit: 'cluster' + volume: 1 + event_type: + - 'sahara.cluster.delete' + resource_id: $.payload.cluster_id + project_id: $.payload.project_id + + # Identity + - name: 'identity.user.created' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.user.created' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.user.updated' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.user.updated' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.user.deleted' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.user.deleted' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.group.created' + type: 'delta' + unit: 'group' + volume: 1 + event_type: + - 'identity.group.created' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.group.updated' + type: 'delta' + unit: 'group' + volume: 1 + event_type: + - 'identity.group.updated' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.group.deleted' + type: 'delta' + unit: 'group' + volume: 1 + event_type: + - 'identity.group.deleted' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.project.created' + type: 'delta' + unit: 'project' + volume: 1 + event_type: + - 'identity.project.created' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.project.updated' + type: 'delta' + unit: 'project' + volume: 1 + event_type: + - 'identity.project.updated' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.project.deleted' + type: 'delta' + unit: 'project' + volume: 1 + event_type: + - 'identity.project.deleted' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.role.created' + type: 'delta' + unit: 'role' + volume: 1 + event_type: + - 'identity.role.created' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.role.updated' + type: 'delta' + unit: 'role' + volume: 1 + event_type: + - 'identity.role.updated' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.role.deleted' + type: 'delta' + unit: 'role' + volume: 1 + event_type: + - 'identity.role.deleted' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.role_assignment.created' + type: 'delta' + unit: 'role_assignment' + volume: 1 + event_type: + - 'identity.role_assignment.created' + resource_id: $.payload.role + user_id: $.payload.initiator.id + + - name: 'identity.role_assignment.deleted' + type: 'delta' + unit: 'role_assignment' + volume: 1 + event_type: + - 'identity.role_assignment.deleted' + resource_id: $.payload.role + user_id: $.payload.initiator.id + + - name: 'identity.authenticate.success' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.authenticate' + resource_id: $.payload.initiator.id + user_id: $.payload.initiator.id + + - name: 'identity.authenticate.pending' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.authenticate' + resource_id: $.payload.initiator.id + user_id: $.payload.initiator.id + + - name: 'identity.authenticate.failure' + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.authenticate' + resource_id: $.payload.initiator.id + user_id: $.payload.initiator.id + + - name: 'identity.trust.created' + type: 'delta' + unit: 'trust' + volume: 1 + event_type: + - 'identity.OS-TRUST:trust.created' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'identity.trust.deleted' + type: 'delta' + unit: 'trust' + volume: 1 + event_type: + - 'identity.OS-TRUST:trust.deleted' + resource_id: $.payload.resource_info + user_id: $.payload.initiator.id + + - name: 'storage.api.request' + type: 'delta' + unit: 'request' + volume: 1 + event_type: + - 'objectstore.http.request' + resource_id: $.payload.target.id + user_id: $.payload.initiator.id + project_id: $.payload.initiator.project_id + + - name: '$.payload.name' + event_type: 'profiler.*' + type: 'gauge' + unit: 'trace' + volume: 1 + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: '"profiler-" + $.payload.base_id' diff -Nru ceilometer-5.0.0~b2/ceilometer/meter/notifications.py ceilometer-5.0.0~b3/ceilometer/meter/notifications.py --- ceilometer-5.0.0~b2/ceilometer/meter/notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/meter/notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -12,11 +12,14 @@ # under the License. import fnmatch +import functools +import itertools import os +import pkg_resources import six import yaml -import jsonpath_rw +from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log import oslo_messaging @@ -33,6 +36,8 @@ ] cfg.CONF.register_opts(OPTS, group='meter') +cfg.CONF.import_opt('disable_non_metric_meters', 'ceilometer.notification', + group='notification') LOG = log.getLogger(__name__) @@ -40,6 +45,7 @@ class MeterDefinitionException(Exception): def __init__(self, message, definition_cfg): super(MeterDefinitionException, self).__init__(message) + self.message = message self.definition_cfg = definition_cfg def __str__(self): @@ -49,51 +55,75 @@ class MeterDefinition(object): + JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() + def __init__(self, definition_cfg): self.cfg = definition_cfg - self._validate_type() - def match_type(self, meter_name): - try: - event_type = self.cfg['event_type'] - except KeyError as err: + self._event_type = self.cfg.get('event_type') + if not self._event_type: raise MeterDefinitionException( - _LE("Required field %s not specified") % err.args[0], self.cfg) + _LE("Required field event_type not specified"), self.cfg) + if isinstance(self._event_type, six.string_types): + self._event_type = [self._event_type] + + if ('type' not in self.cfg.get('lookup', []) and + self.cfg['type'] not in sample.TYPES): + raise MeterDefinitionException( + _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) - if isinstance(event_type, six.string_types): - event_type = [event_type] - for t in event_type: + self._field_getter = {} + for name, field in self.cfg.items(): + if name in ["event_type", "lookup"] or not field: + continue + elif isinstance(field, six.integer_types): + self._field_getter[name] = field + else: + parts = self.parse_jsonpath(field) + self._field_getter[name] = functools.partial( + self._parse_jsonpath_field, parts) + + def parse_jsonpath(self, field): + try: + parts = self.JSONPATH_RW_PARSER.parse(field) + except Exception as e: + raise MeterDefinitionException(_LE( + "Parse error in JSONPath specification " + "'%(jsonpath)s': %(err)s") + % dict(jsonpath=field, err=e), self.cfg) + return parts + + def match_type(self, meter_name): + for t in self._event_type: if fnmatch.fnmatch(meter_name, t): return True - def parse_fields(self, field, message): - fval = self.cfg.get(field) - if not fval: + def parse_fields(self, field, message, all_values=False): + getter = self._field_getter.get(field) + if not getter: return - if isinstance(fval, six.integer_types): - return fval - try: - parts = jsonpath_rw.parse(fval) - except Exception as e: - raise MeterDefinitionException( - _LE("Parse error in JSONPath specification " - "'%(jsonpath)s': %(err)s") - % dict(jsonpath=parts, err=e), self.cfg) + elif callable(getter): + return getter(message, all_values) + else: + return getter + + @staticmethod + def _parse_jsonpath_field(parts, message, all_values): values = [match.value for match in parts.find(message) if match.value is not None] if values: - return values[0] - - def _validate_type(self): - if self.cfg['type'] not in sample.TYPES: - raise MeterDefinitionException( - _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) + if not all_values: + return values[0] + return values def get_config_file(): config_file = cfg.CONF.meter.meter_definitions_cfg_file if not os.path.exists(config_file): config_file = cfg.CONF.find_file(config_file) + if not config_file: + config_file = pkg_resources.resource_filename( + __name__, "data/meters.yaml") return config_file @@ -126,7 +156,7 @@ else: LOG.debug(_LE("No Meter Definitions configuration file found!" " Using default config.")) - meters_config = [] + meters_config = {} LOG.info(_LE("Meter Definitions: %s"), meters_config) @@ -134,13 +164,21 @@ def load_definitions(config_def): + if not config_def: + return [] return [MeterDefinition(event_def) - for event_def in reversed(config_def['metric'])] + for event_def in reversed(config_def['metric']) + if (event_def['volume'] != 1 or + not cfg.CONF.notification.disable_non_metric_meters)] + + +class InvalidPayload(Exception): + pass class ProcessMeterNotifications(plugin_base.NotificationBase): - event_types = None + event_types = [] def __init__(self, manager): super(ProcessMeterNotifications, self).__init__(manager) @@ -167,6 +205,7 @@ conf.swift_control_exchange, conf.magnetodb_control_exchange, conf.ceilometer_control_exchange, + conf.magnum_control_exchange, ] for exchange in exchanges: @@ -175,21 +214,71 @@ for topic in conf.notification_topics) return targets + @staticmethod + def _normalise_as_list(value, d, body, length): + values = d.parse_fields(value, body, True) + if not values: + if value in d.cfg.get('lookup'): + LOG.warning('Could not find %s values', value) + raise InvalidPayload + values = [d.cfg[value]] + elif value in d.cfg.get('lookup') and length != len(values): + LOG.warning('Not all fetched meters contain "%s" field', value) + raise InvalidPayload + return values if isinstance(values, list) else [values] + def process_notification(self, notification_body): for d in self.definitions: if d.match_type(notification_body['event_type']): userid = self.get_user_id(d, notification_body) projectid = self.get_project_id(d, notification_body) resourceid = d.parse_fields('resource_id', notification_body) - yield sample.Sample.from_notification( - name=d.cfg['name'], - type=d.cfg['type'], - unit=d.cfg['unit'], - volume=d.parse_fields('volume', notification_body), - resource_id=resourceid, - user_id=userid, - project_id=projectid, - message=notification_body) + ts = d.parse_fields('timestamp', notification_body) + if d.cfg.get('lookup'): + meters = d.parse_fields('name', notification_body, True) + if not meters: # skip if no meters in payload + break + try: + resources = self._normalise_as_list( + 'resource_id', d, notification_body, len(meters)) + volumes = self._normalise_as_list( + 'volume', d, notification_body, len(meters)) + units = self._normalise_as_list( + 'unit', d, notification_body, len(meters)) + types = self._normalise_as_list( + 'type', d, notification_body, len(meters)) + users = (self._normalise_as_list( + 'user_id', d, notification_body, len(meters)) + if 'user_id' in d.cfg['lookup'] else [userid]) + projs = (self._normalise_as_list( + 'project_id', d, notification_body, len(meters)) + if 'project_id' in d.cfg['lookup'] + else [projectid]) + times = (self._normalise_as_list( + 'timestamp', d, notification_body, len(meters)) + if 'timestamp' in d.cfg['lookup'] else [ts]) + except InvalidPayload: + break + for m, v, unit, t, r, p, user, ts in zip( + meters, volumes, itertools.cycle(units), + itertools.cycle(types), itertools.cycle(resources), + itertools.cycle(projs), itertools.cycle(users), + itertools.cycle(times)): + yield sample.Sample.from_notification( + name=m, type=t, unit=unit, volume=v, + resource_id=r, user_id=user, project_id=p, + message=notification_body, timestamp=ts) + else: + yield sample.Sample.from_notification( + name=d.cfg['name'], + type=d.cfg['type'], + unit=d.cfg['unit'], + volume=d.parse_fields('volume', notification_body), + resource_id=resourceid, + user_id=userid, + project_id=projectid, + message=notification_body, + timestamp=ts) @staticmethod def get_user_id(d, notification_body): diff -Nru ceilometer-5.0.0~b2/ceilometer/middleware.py ceilometer-5.0.0~b3/ceilometer/middleware.py --- ceilometer-5.0.0~b2/ceilometer/middleware.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/middleware.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,11 +22,11 @@ cfg.CONF.import_opt('nova_control_exchange', 'ceilometer.compute.notifications') cfg.CONF.import_opt('glance_control_exchange', - 'ceilometer.image.notifications') + 'ceilometer.notification') cfg.CONF.import_opt('neutron_control_exchange', 'ceilometer.network.notifications') cfg.CONF.import_opt('cinder_control_exchange', - 'ceilometer.volume.notifications') + 'ceilometer.notification') OPTS = [ cfg.MultiStrOpt('http_control_exchanges', diff -Nru ceilometer-5.0.0~b2/ceilometer/network/notifications.py ceilometer-5.0.0~b3/ceilometer/network/notifications.py --- ceilometer-5.0.0~b2/ceilometer/network/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/network/notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -153,25 +153,6 @@ unit = 'ip' -class Bandwidth(NetworkNotificationBase): - """Listen for Neutron notifications. - - Listen in order to mediate with the metering framework. - """ - event_types = ['l3.meter'] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name='bandwidth', - type=sample.TYPE_DELTA, - unit='B', - volume=message['payload']['bytes'], - user_id=None, - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['label_id'], - message=message) - - class Pool(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. diff -Nru ceilometer-5.0.0~b2/ceilometer/network/statistics/opendaylight/client.py ceilometer-5.0.0~b3/ceilometer/network/statistics/opendaylight/client.py --- ceilometer-5.0.0~b2/ceilometer/network/statistics/opendaylight/client.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/network/statistics/opendaylight/client.py 2015-09-03 13:05:55.000000000 +0000 @@ -227,7 +227,7 @@ resp = requests.get(url, **self._req_params) if CONF.debug: self._log_res(resp) - if resp.status_code / 100 != 2: + if resp.status_code // 100 != 2: raise OpenDaylightRESTAPIFailed( _('OpenDaylitght API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) diff -Nru ceilometer-5.0.0~b2/ceilometer/notification.py ceilometer-5.0.0~b3/ceilometer/notification.py --- ceilometer-5.0.0~b2/ceilometer/notification.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/notification.py 2015-09-03 13:05:55.000000000 +0000 @@ -23,6 +23,7 @@ from ceilometer import coordination from ceilometer.event import endpoint as event_endpoint from ceilometer.i18n import _, _LI, _LW +from ceilometer import exchange_control from ceilometer import messaging from ceilometer import pipeline from ceilometer import service_base @@ -53,12 +54,14 @@ 'notification agents to be run simultaneously.'), cfg.MultiStrOpt('messaging_urls', default=[], + secret=True, help="Messaging URLs to listen for notifications. " "Example: transport://user:pass@host1:port" "[,hostN:portN]/virtual_host " "(DEFAULT/transport_url is used if empty)"), ] +cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS) cfg.CONF.register_opts(OPTS, group="notification") cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') @@ -91,21 +94,25 @@ invoke_args=(pm, ) ) - def _get_notifier(self, transport, pipe): - return oslo_messaging.Notifier( - transport, - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id='ceilometer.notification', - topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name)) + def _get_notifiers(self, transport, pipe): + notifiers = [] + for agent in self.partition_coordinator._get_members(self.group_id): + notifiers.append(oslo_messaging.Notifier( + transport, + driver=cfg.CONF.publisher_notifier.telemetry_driver, + publisher_id='ceilometer.notification', + topic='%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, agent))) + return notifiers def _get_pipe_manager(self, transport, pipeline_manager): if cfg.CONF.notification.workload_partitioning: pipe_manager = pipeline.SamplePipelineTransportManager() for pipe in pipeline_manager.pipelines: + key = pipeline.get_pipeline_grouping_key(pipe) pipe_manager.add_transporter( - (pipe.source.support_meter, - self._get_notifier(transport, pipe))) + (pipe.source.support_meter, key or ['resource_id'], + self._get_notifiers(transport, pipe))) else: pipe_manager = pipeline_manager @@ -120,8 +127,8 @@ event_pipe_manager = pipeline.EventPipelineTransportManager() for pipe in self.event_pipeline_manager.pipelines: event_pipe_manager.add_transporter( - (pipe.source.support_event, - self._get_notifier(transport, pipe))) + (pipe.source.support_event, ['event_type'], + self._get_notifiers(transport, pipe))) else: event_pipe_manager = self.event_pipeline_manager @@ -133,33 +140,31 @@ self.pipeline_manager = pipeline.setup_pipeline() self.transport = messaging.get_transport() - self.pipe_manager = self._get_pipe_manager(self.transport, - self.pipeline_manager) - self.event_pipe_manager = self._get_event_pipeline_manager( - self.transport) - - self.partition_coordinator = coordination.PartitionCoordinator() - self.partition_coordinator.start() - if cfg.CONF.notification.workload_partitioning: self.ctxt = context.get_admin_context() self.group_id = self.NOTIFICATION_NAMESPACE + self.partition_coordinator = coordination.PartitionCoordinator() + self.partition_coordinator.start() + self.partition_coordinator.join_group(self.group_id) else: - # FIXME(sileht): endpoint use notification_topics option - # and it should not because this is oslo_messaging option - # not a ceilometer, until we have a something to get - # the notification_topics in an other way - # we must create a transport to ensure the option have - # beeen registered by oslo_messaging + # FIXME(sileht): endpoint uses the notification_topics option + # and it should not because this is an oslo_messaging option + # not a ceilometer. Until we have something to get the + # notification_topics in another way, we must create a transport + # to ensure the option has been registered by oslo_messaging. messaging.get_notifier(self.transport, '') self.group_id = None + self.pipe_manager = self._get_pipe_manager(self.transport, + self.pipeline_manager) + self.event_pipe_manager = self._get_event_pipeline_manager( + self.transport) + self.listeners, self.pipeline_listeners = [], [] self._configure_main_queue_listeners(self.pipe_manager, self.event_pipe_manager) if cfg.CONF.notification.workload_partitioning: - self.partition_coordinator.join_group(self.group_id) self._configure_pipeline_listeners() self.partition_coordinator.watch_group(self.group_id, self._refresh_agent) @@ -198,8 +203,8 @@ if (cfg.CONF.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase)): continue - LOG.debug(_('Event types from %(name)s: %(type)s' - ' (ack_on_error=%(error)s)') % + LOG.debug('Event types from %(name)s: %(type)s' + ' (ack_on_error=%(error)s)', {'name': ext.name, 'type': ', '.join(handler.event_types), 'error': ack_on_error}) @@ -220,6 +225,9 @@ self.listeners.append(listener) def _refresh_agent(self, event): + self.reload_pipeline() + + def _refresh_listeners(self): utils.kill_listeners(self.pipeline_listeners) self._configure_pipeline_listeners() @@ -228,18 +236,19 @@ ev_pipes = [] if cfg.CONF.notification.store_events: ev_pipes = self.event_pipeline_manager.pipelines - partitioned = self.partition_coordinator.extract_my_subset( - self.group_id, self.pipeline_manager.pipelines + ev_pipes) + pipelines = self.pipeline_manager.pipelines + ev_pipes transport = messaging.get_transport() - for pipe in partitioned: - LOG.debug(_('Pipeline endpoint: %s'), pipe.name) + for pipe in pipelines: + LOG.debug('Pipeline endpoint: %s', pipe.name) pipe_endpoint = (pipeline.EventPipelineEndpoint if isinstance(pipe, pipeline.EventPipeline) else pipeline.SamplePipelineEndpoint) listener = messaging.get_notification_listener( transport, [oslo_messaging.Target( - topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name))], + topic='%s-%s-%s' % (self.NOTIFICATION_IPC, + pipe.name, + self.partition_coordinator._my_id))], [pipe_endpoint(self.ctxt, pipe)]) listener.start() self.pipeline_listeners.append(listener) @@ -256,6 +265,9 @@ self.pipe_manager = self._get_pipe_manager( self.transport, self.pipeline_manager) + self.event_pipe_manager = self._get_event_pipeline_manager( + self.transport) + # re-start the main queue listeners. utils.kill_listeners(self.listeners) self._configure_main_queue_listeners( @@ -264,4 +276,4 @@ # re-start the pipeline listeners if workload partitioning # is enabled. if cfg.CONF.notification.workload_partitioning: - self._refresh_agent(None) + self._refresh_listeners() diff -Nru ceilometer-5.0.0~b2/ceilometer/objectstore/notifications.py ceilometer-5.0.0~b3/ceilometer/objectstore/notifications.py --- ceilometer-5.0.0~b2/ceilometer/objectstore/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/objectstore/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('swift_control_exchange', - default='swift', - help="Exchange name for Swift notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class _Base(plugin_base.NotificationBase): - """Convert objectstore notification into Samples.""" - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.swift_control_exchange) - for topic in conf.notification_topics] - - -class SwiftWsgiMiddleware(_Base, plugin_base.NonMetricNotificationBase): - - @property - def event_types(self): - return ['objectstore.http.request'] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name='storage.api.request', - type=sample.TYPE_DELTA, - unit='request', - volume=1, - resource_id=message['payload']['target']['id'], - user_id=message['payload']['initiator']['id'], - project_id=message['payload']['initiator']['project_id'], - message=message) - - -class SwiftWsgiMiddlewareMeters(_Base): - - @property - def event_types(self): - return ['objectstore.http.request'] - - def process_notification(self, message): - for meter in message['payload'].get('measurements', []): - yield sample.Sample.from_notification( - name=meter['metric']['name'], - type=sample.TYPE_DELTA, - unit=meter['metric']['unit'], - volume=meter['result'], - resource_id=message['payload']['target']['id'], - user_id=message['payload']['initiator']['id'], - project_id=message['payload']['initiator']['project_id'], - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/objectstore/rgw.py ceilometer-5.0.0~b3/ceilometer/objectstore/rgw.py --- ceilometer-5.0.0~b2/ceilometer/objectstore/rgw.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/objectstore/rgw.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,7 +22,6 @@ import six.moves.urllib.parse as urlparse from ceilometer.agent import plugin_base -from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) @@ -76,7 +75,7 @@ endpoint_type=conf.os_endpoint_type) _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') except exceptions.EndpointNotFound: - LOG.debug(_("Radosgw endpoint not found")) + LOG.debug("Radosgw endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): @@ -93,8 +92,8 @@ try: from ceilometer.objectstore.rgw_client import RGWAdminClient rgw_client = RGWAdminClient(endpoint, self.access_key, self.secret) - except ImportError as e: - raise plugin_base.PollsterPermanentError(e) + except ImportError: + raise plugin_base.PollsterPermanentError(tenants) for t in tenants: api_method = 'get_%s' % self.METHOD diff -Nru ceilometer-5.0.0~b2/ceilometer/objectstore/swift.py ceilometer-5.0.0~b3/ceilometer/objectstore/swift.py --- ceilometer-5.0.0~b2/ceilometer/objectstore/swift.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/objectstore/swift.py 2015-09-03 13:05:55.000000000 +0000 @@ -25,7 +25,6 @@ from swiftclient import client as swift from ceilometer.agent import plugin_base -from ceilometer.i18n import _ from ceilometer import sample @@ -73,7 +72,7 @@ service_type=cfg.CONF.service_types.swift, endpoint_type=conf.os_endpoint_type) except exceptions.EndpointNotFound: - LOG.debug(_("Swift endpoint not found")) + LOG.debug("Swift endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): diff -Nru ceilometer-5.0.0~b2/ceilometer/opts.py ceilometer-5.0.0~b3/ceilometer/opts.py --- ceilometer-5.0.0~b2/ceilometer/opts.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/opts.py 2015-09-03 13:05:55.000000000 +0000 @@ -30,15 +30,12 @@ import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination -import ceilometer.data_processing.notifications import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.hardware.discovery -import ceilometer.identity.notifications import ceilometer.image.glance -import ceilometer.image.notifications import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters @@ -49,16 +46,13 @@ import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift -import ceilometer.orchestration.notifications import ceilometer.pipeline -import ceilometer.profiler.notifications import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.service import ceilometer.storage import ceilometer.utils -import ceilometer.volume.notifications def list_opts(): @@ -71,24 +65,18 @@ ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, - ceilometer.data_processing.notifications.OPTS, ceilometer.dispatcher.OPTS, - ceilometer.identity.notifications.OPTS, ceilometer.image.glance.OPTS, - ceilometer.image.notifications.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.network.notifications.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, - ceilometer.orchestration.notifications.OPTS, ceilometer.pipeline.OPTS, - ceilometer.profiler.notifications.OPTS, ceilometer.sample.OPTS, ceilometer.service.OPTS, ceilometer.storage.OLD_OPTS, - ceilometer.utils.OPTS, - ceilometer.volume.notifications.OPTS,)), + ceilometer.utils.OPTS,)), ('alarm', itertools.chain(ceilometer.alarm.notifier.rest.OPTS, ceilometer.alarm.service.OPTS, @@ -97,10 +85,13 @@ ceilometer.api.controllers.v2.alarms.ALARM_API_OPTS)), ('api', itertools.chain(ceilometer.api.OPTS, - ceilometer.api.app.API_OPTS,)), + ceilometer.api.app.API_OPTS, + [ceilometer.service.API_OPT])), # deprecated path, new one is 'polling' ('central', ceilometer.agent.manager.OPTS), - ('collector', ceilometer.collector.OPTS), + ('collector', + itertools.chain(ceilometer.collector.OPTS, + [ceilometer.service.COLL_OPT])), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), @@ -110,7 +101,9 @@ ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), - ('notification', ceilometer.notification.OPTS), + ('notification', + itertools.chain(ceilometer.notification.OPTS, + [ceilometer.service.NOTI_OPT])), ('polling', ceilometer.agent.manager.OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), diff -Nru ceilometer-5.0.0~b2/ceilometer/orchestration/notifications.py ceilometer-5.0.0~b3/ceilometer/orchestration/notifications.py --- ceilometer-5.0.0~b2/ceilometer/orchestration/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/orchestration/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handler for producing orchestration metering from Heat notification - events. -""" - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - - -OPTS = [ - cfg.StrOpt('heat_control_exchange', - default='heat', - help="Exchange name for Heat notifications"), -] - -cfg.CONF.register_opts(OPTS) -SERVICE = 'orchestration' - - -class StackCRUD(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - - resource_name = '%s.stack' % SERVICE - - @property - def event_types(self): - return [ - '%s.create.end' % self.resource_name, - '%s.update.end' % self.resource_name, - '%s.delete.end' % self.resource_name, - '%s.resume.end' % self.resource_name, - '%s.suspend.end' % self.resource_name, - ] - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo_messaging.Target - - It is defining the exchange and topics to be connected for this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.heat_control_exchange) - for topic in conf.notification_topics] - - def process_notification(self, message): - name = (message['event_type'].replace(self.resource_name, 'stack') - .replace('.end', '')) - - project_id = message['payload']['tenant_id'] - - # Trying to use the trustor_id if trusts is used by Heat, - user_id = (message.get('_context_trustor_user_id') or - message['_context_user_id']) - - yield sample.Sample.from_notification( - name=name, - type=sample.TYPE_DELTA, - unit='stack', - volume=1, - resource_id=message['payload']['stack_identity'], - user_id=user_id, - project_id=project_id, - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/pipeline.py ceilometer-5.0.0~b3/ceilometer/pipeline.py --- ceilometer-5.0.0~b2/ceilometer/pipeline.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/pipeline.py 2015-09-03 13:05:55.000000000 +0000 @@ -31,7 +31,7 @@ from ceilometer.event.storage import models -from ceilometer.i18n import _ +from ceilometer.i18n import _, _LW from ceilometer import publisher from ceilometer.publisher import utils as publisher_utils from ceilometer import sample as sample_util @@ -125,11 +125,19 @@ def __init__(self): self.transporters = [] + @staticmethod + def hash_grouping(datapoint, grouping_keys): + value = '' + for key in grouping_keys or []: + value += datapoint.get(key) if datapoint.get(key) else '' + return hash(value) + def add_transporter(self, transporter): self.transporters.append(transporter) def publisher(self, context): serializer = self.serializer + hash_grouping = self.hash_grouping transporters = self.transporters filter_attr = self.filter_attr event_type = self.event_type @@ -137,13 +145,21 @@ class PipelinePublishContext(object): def __enter__(self): def p(data): - serialized_data = serializer(data) - for d_filter, notifier in transporters: - if any(d_filter(d[filter_attr]) - for d in serialized_data): - notifier.sample(context.to_dict(), - event_type=event_type, - payload=serialized_data) + # TODO(gordc): cleanup so payload is always single + # datapoint. we can't correctly bucketise + # datapoints if batched. + data = [data] if not isinstance(data, list) else data + for datapoint in data: + serialized_data = serializer(datapoint) + for d_filter, grouping_keys, notifiers in transporters: + if d_filter(serialized_data[filter_attr]): + key = (hash_grouping(serialized_data, + grouping_keys) + % len(notifiers)) + notifier = notifiers[key] + notifier.sample(context.to_dict(), + event_type=event_type, + payload=[serialized_data]) return p def __exit__(self, exc_type, exc_value, traceback): @@ -158,8 +174,8 @@ @staticmethod def serializer(data): - return [publisher_utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) for sample in data] + return publisher_utils.meter_message_from_counter( + data, cfg.CONF.publisher.telemetry_secret) class EventPipelineTransportManager(_PipelineTransportManager): @@ -168,8 +184,8 @@ @staticmethod def serializer(data): - return [publisher_utils.message_from_event( - data, cfg.CONF.publisher.telemetry_secret)] + return publisher_utils.message_from_event( + data, cfg.CONF.publisher.telemetry_secret) class PublishContext(object): @@ -314,19 +330,7 @@ def get_interval(self): return self.interval - # (yjiang5) To support meters like instance:m1.tiny, - # which include variable part at the end starting with ':'. - # Hope we will not add such meters in future. - @staticmethod - def _variable_meter_name(name): - m = name.partition(':') - if m[1] == ':': - return m[1].join((m[0], '*')) - else: - return name - def support_meter(self, meter_name): - meter_name = self._variable_meter_name(meter_name) return self.is_supported(self.meters, meter_name) @@ -380,6 +384,7 @@ except Exception: LOG.exception(_("Unable to load publisher %s"), p) + self.multi_publish = True if len(self.publishers) > 1 else False self.transformers = self._setup_transformers(cfg, transformer_manager) def __str__(self): @@ -415,10 +420,13 @@ try: p.publish_events(ctxt, events) except Exception: - LOG.exception(_( - "Pipeline %(pipeline)s: Continue after error " - "from publisher %(pub)s") % ({'pipeline': self, - 'pub': p})) + LOG.exception(_("Pipeline %(pipeline)s: %(status)s" + " after error from publisher %(pub)s") % + ({'pipeline': self, 'status': 'Continue' if + self.multi_publish else 'Exit', 'pub': p} + )) + if not self.multi_publish: + raise def flush(self, ctxt): """Flush data after all events have been injected to pipeline.""" @@ -434,10 +442,10 @@ for transformer in self.transformers[start:]: sample = transformer.handle_sample(ctxt, sample) if not sample: - LOG.debug(_( + LOG.debug( "Pipeline %(pipeline)s: Sample dropped by " - "transformer %(trans)s") % ({'pipeline': self, - 'trans': transformer})) + "transformer %(trans)s", {'pipeline': self, + 'trans': transformer}) return return sample except Exception as err: @@ -465,11 +473,11 @@ transformed_samples = samples else: for sample in samples: - LOG.debug(_( + LOG.debug( "Pipeline %(pipeline)s: Transform sample " - "%(smp)s from %(trans)s transformer") % ({'pipeline': self, - 'smp': sample, - 'trans': start})) + "%(smp)s from %(trans)s transformer", {'pipeline': self, + 'smp': sample, + 'trans': start}) sample = self._transform_sample(start, ctxt, sample) if sample: transformed_samples.append(sample) @@ -563,10 +571,40 @@ def support_meter(self, meter_name): return self.source.support_meter(meter_name) + def _validate_volume(self, s): + volume = s.volume + if volume is None: + LOG.warning(_LW( + 'metering data %(counter_name)s for %(resource_id)s ' + '@ %(timestamp)s has no volume (volume: None), the sample will' + ' be dropped') + % {'counter_name': s.name, + 'resource_id': s.resource_id, + 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} + ) + return False + if not isinstance(volume, (int, float)): + try: + volume = float(volume) + except ValueError: + LOG.warning(_LW( + 'metering data %(counter_name)s for %(resource_id)s ' + '@ %(timestamp)s has volume which is not a number ' + '(volume: %(counter_volume)s), the sample will be dropped') + % {'counter_name': s.name, + 'resource_id': s.resource_id, + 'timestamp': ( + s.timestamp if s.timestamp else 'NO TIMESTAMP'), + 'counter_volume': volume} + ) + return False + return True + def publish_data(self, ctxt, samples): if not isinstance(samples, list): samples = [samples] - supported = [s for s in samples if self.source.support_meter(s.name)] + supported = [s for s in samples if self.source.support_meter(s.name) + and self._validate_volume(s)] self.sink.publish_samples(ctxt, supported) @@ -740,7 +778,7 @@ if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) - LOG.debug(_("Pipeline config file: %s"), cfg_file) + LOG.debug("Pipeline config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() @@ -759,7 +797,7 @@ if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) - LOG.debug(_("Polling config file: %s"), cfg_file) + LOG.debug("Polling config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() @@ -815,3 +853,10 @@ """Setup polling manager according to yaml config file.""" cfg_file = cfg.CONF.pipeline_cfg_file return _setup_polling_manager(cfg_file) + + +def get_pipeline_grouping_key(pipe): + keys = [] + for transformer in pipe.sink.transformers: + keys += transformer.grouping_keys + return list(set(keys)) diff -Nru ceilometer-5.0.0~b2/ceilometer/profiler/notifications.py ceilometer-5.0.0~b3/ceilometer/profiler/notifications.py --- ceilometer-5.0.0~b2/ceilometer/profiler/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/profiler/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - - -OPTS = [ - cfg.StrOpt('trove_control_exchange', - default='trove', - help="Exchange name for DBaaS notifications."), - cfg.StrOpt('zaqar_control_exchange', - default='zaqar', - help="Exchange name for Messaging service notifications."), -] - -cfg.CONF.register_opts(OPTS) -# TODO(boris-42): remove after adding keystone audit plugins. -cfg.CONF.import_opt('keystone_control_exchange', - 'ceilometer.identity.notifications') - - -class ProfilerNotifications(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - - event_types = ["profiler.*"] - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - It is defining the exchange and topics to be connected for this plugin. - :param conf: Configuration. - """ - targets = [] - exchanges = [ - conf.nova_control_exchange, - conf.cinder_control_exchange, - conf.glance_control_exchange, - conf.neutron_control_exchange, - conf.heat_control_exchange, - conf.keystone_control_exchange, - conf.sahara_control_exchange, - conf.trove_control_exchange, - conf.zaqar_control_exchange, - ] - - for exchange in exchanges: - targets.extend(oslo_messaging.Target(topic=topic, - exchange=exchange) - for topic in conf.notification_topics) - return targets - - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message["payload"]["name"], - type=sample.TYPE_GAUGE, - volume=1, - unit="trace", - user_id=message["payload"].get("user_id"), - project_id=message["payload"].get("project_id"), - resource_id="profiler-%s" % message["payload"]["base_id"], - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer/publisher/kafka_broker.py ceilometer-5.0.0~b3/ceilometer/publisher/kafka_broker.py --- ceilometer-5.0.0~b2/ceilometer/publisher/kafka_broker.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/publisher/kafka_broker.py 2015-09-03 13:05:55.000000000 +0000 @@ -13,24 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import kafka -from oslo_config import cfg from oslo_log import log +from oslo_serialization import jsonutils from oslo_utils import netutils from six.moves.urllib import parse as urlparse from ceilometer.i18n import _LE -from ceilometer.i18n import _LI -from ceilometer.i18n import _LW -from ceilometer import publisher -from ceilometer.publisher import utils +from ceilometer.publisher import messaging LOG = log.getLogger(__name__) -class KafkaBrokerPublisher(publisher.PublisherBase): +class KafkaBrokerPublisher(messaging.MessagingPublisher): """Publish metering data to kafka broker. The ip address and port number of kafka broker should be configured in @@ -68,132 +63,34 @@ """ def __init__(self, parsed_url): - self.kafka_client = None + super(KafkaBrokerPublisher, self).__init__(parsed_url) + options = urlparse.parse_qs(parsed_url.query) - self.host, self.port = netutils.parse_host_port( + self._producer = None + self._host, self._port = netutils.parse_host_port( parsed_url.netloc, default_port=9092) + self._topic = options.get('topic', ['ceilometer'])[-1] + self.max_retry = int(options.get('max_retry', [100])[-1]) - self.local_queue = [] - - params = urlparse.parse_qs(parsed_url.query) - self.topic = params.get('topic', ['ceilometer'])[-1] - self.policy = params.get('policy', ['default'])[-1] - self.max_queue_length = int(params.get( - 'max_queue_length', [1024])[-1]) - self.max_retry = int(params.get('max_retry', [100])[-1]) - - if self.policy in ['default', 'drop', 'queue']: - LOG.info(_LI('Publishing policy set to %s') % self.policy) - else: - LOG.warn(_LW('Publishing policy is unknown (%s) force to default') - % self.policy) - self.policy = 'default' + def _ensure_connection(self): + if self._producer: + return try: - self._get_client() + client = kafka.KafkaClient("%s:%s" % (self._host, self._port)) + self._producer = kafka.SimpleProducer(client) except Exception as e: LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) + raise messaging.DeliveryFailure('Kafka Client is not available, ' + 'please restart Kafka client') - def publish_samples(self, context, samples): - """Send a metering message for kafka broker. - - :param context: Execution context from the service or RPC call - :param samples: Samples from pipeline after transformation - """ - samples_list = [ - utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - for sample in samples - ] - - self.local_queue.append(samples_list) - - try: - self._check_kafka_connection() - except Exception as e: - raise e - - self.flush() - - def flush(self): - queue = self.local_queue - self.local_queue = self._process_queue(queue) - if self.policy == 'queue': - self._check_queue_length() - - def publish_events(self, context, events): - """Send an event message for kafka broker. - - :param context: Execution context from the service or RPC call - :param events: events from pipeline after transformation - """ - events_list = [utils.message_from_event( - event, cfg.CONF.publisher.telemetry_secret) for event in events] - - self.local_queue.append(events_list) - + def _send(self, context, event_type, data): + self._ensure_connection() + # TODO(sileht): don't split the payload into multiple network + # message ... but how to do that without breaking consuming + # application... try: - self._check_kafka_connection() + for d in data: + self._producer.send_messages(self._topic, jsonutils.dumps(d)) except Exception as e: - raise e - - self.flush() - - def _process_queue(self, queue): - current_retry = 0 - while queue: - data = queue[0] - try: - self._send(data) - except Exception: - LOG.warn(_LW("Failed to publish %d datum"), - sum([len(d) for d in queue])) - if self.policy == 'queue': - return queue - elif self.policy == 'drop': - return [] - current_retry += 1 - if current_retry >= self.max_retry: - self.local_queue = [] - LOG.exception(_LE("Failed to retry to send sample data " - "with max_retry times")) - raise - else: - queue.pop(0) - return [] - - def _check_queue_length(self): - queue_length = len(self.local_queue) - if queue_length > self.max_queue_length > 0: - diff = queue_length - self.max_queue_length - self.local_queue = self.local_queue[diff:] - LOG.warn(_LW("Kafka Publisher max local queue length is exceeded, " - "dropping %d oldest data") % diff) - - def _check_kafka_connection(self): - try: - self._get_client() - except Exception as e: - LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) - - if self.policy == 'queue': - self._check_queue_length() - else: - self.local_queue = [] - raise Exception('Kafka Client is not available, ' - 'please restart Kafka client') - - def _get_client(self): - if not self.kafka_client: - self.kafka_client = kafka.KafkaClient( - "%s:%s" % (self.host, self.port)) - self.kafka_producer = kafka.SimpleProducer(self.kafka_client) - - def _send(self, data): - for d in data: - try: - self.kafka_producer.send_messages( - self.topic, json.dumps(d)) - except Exception as e: - LOG.exception(_LE("Failed to send sample data: %s"), e) - raise + messaging.raise_delivery_failure(e) diff -Nru ceilometer-5.0.0~b2/ceilometer/publisher/messaging.py ceilometer-5.0.0~b3/ceilometer/publisher/messaging.py --- ceilometer-5.0.0~b2/ceilometer/publisher/messaging.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/publisher/messaging.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,10 +22,12 @@ from oslo_config import cfg from oslo_log import log import oslo_messaging +from oslo_utils import encodeutils +from oslo_utils import excutils import six import six.moves.urllib.parse as urlparse -from ceilometer.i18n import _ +from ceilometer.i18n import _, _LE from ceilometer import messaging from ceilometer import publisher from ceilometer.publisher import utils @@ -36,6 +38,7 @@ RPC_OPTS = [ cfg.StrOpt('metering_topic', default='metering', + deprecated_for_removal=True, help='The topic that ceilometer uses for metering messages.', deprecated_group="DEFAULT", ), @@ -67,6 +70,18 @@ cfg.CONF.import_opt('host', 'ceilometer.service') +class DeliveryFailure(Exception): + def __init__(self, message=None, cause=None): + super(DeliveryFailure, self).__init__(message) + self.cause = cause + + +def raise_delivery_failure(exc): + excutils.raise_with_cause(DeliveryFailure, + encodeutils.exception_to_unicode(exc), + cause=exc) + + @six.add_metaclass(abc.ABCMeta) class MessagingPublisher(publisher.PublisherBase): @@ -81,6 +96,7 @@ self.policy = options.get('policy', ['default'])[-1] self.max_queue_length = int(options.get( 'max_queue_length', [1024])[-1]) + self.max_retry = 0 self.local_queue = [] @@ -144,11 +160,12 @@ "dropping %d oldest samples") % count) def _process_queue(self, queue, policy): + current_retry = 0 while queue: context, topic, data = queue[0] try: self._send(context, topic, data) - except oslo_messaging.MessageDeliveryFailure: + except DeliveryFailure: data = sum([len(m) for __, __, m in queue]) if policy == 'queue': LOG.warn(_("Failed to publish %d datapoints, queue them"), @@ -158,8 +175,11 @@ LOG.warn(_("Failed to publish %d datapoints, " "dropping them"), data) return [] - # default, occur only if rabbit_max_retries > 0 - raise + current_retry += 1 + if current_retry >= self.max_retry: + LOG.exception(_LE("Failed to retry to send sample data " + "with max_retry times")) + raise else: queue.pop(0) return [] @@ -195,8 +215,11 @@ ) def _send(self, context, topic, meters): - self.rpc_client.prepare(topic=topic).cast(context, self.target, - data=meters) + try: + self.rpc_client.prepare(topic=topic).cast(context, self.target, + data=meters) + except oslo_messaging.MessageDeliveryFailure as e: + raise_delivery_failure(e) class NotifierPublisher(MessagingPublisher): @@ -213,8 +236,11 @@ ) def _send(self, context, event_type, data): - self.notifier.sample(context.to_dict(), event_type=event_type, - payload=data) + try: + self.notifier.sample(context.to_dict(), event_type=event_type, + payload=data) + except oslo_messaging.MessageDeliveryFailure as e: + raise_delivery_failure(e) class SampleNotifierPublisher(NotifierPublisher): diff -Nru ceilometer-5.0.0~b2/ceilometer/publisher/udp.py ceilometer-5.0.0~b3/ceilometer/publisher/udp.py --- ceilometer-5.0.0~b2/ceilometer/publisher/udp.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/publisher/udp.py 2015-09-03 13:05:55.000000000 +0000 @@ -57,9 +57,9 @@ sample, cfg.CONF.publisher.telemetry_secret) host = self.host port = self.port - LOG.debug(_("Publishing sample %(msg)s over UDP to " - "%(host)s:%(port)d") % {'msg': msg, 'host': host, - 'port': port}) + LOG.debug("Publishing sample %(msg)s over UDP to " + "%(host)s:%(port)d", {'msg': msg, 'host': host, + 'port': port}) try: self.socket.sendto(msgpack.dumps(msg), (self.host, self.port)) diff -Nru ceilometer-5.0.0~b2/ceilometer/sample.py ceilometer-5.0.0~b3/ceilometer/sample.py --- ceilometer-5.0.0~b2/ceilometer/sample.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/sample.py 2015-09-03 13:05:55.000000000 +0000 @@ -82,10 +82,12 @@ @classmethod def from_notification(cls, name, type, volume, unit, user_id, project_id, resource_id, - message, source=None): - metadata = copy.copy(message['payload']) + message, timestamp=None, source=None): + metadata = (copy.copy(message['payload']) + if isinstance(message['payload'], dict) else {}) metadata['event_type'] = message['event_type'] metadata['host'] = message['publisher_id'] + ts = timestamp if timestamp else message['timestamp'] return cls(name=name, type=type, volume=volume, @@ -93,7 +95,7 @@ user_id=user_id, project_id=project_id, resource_id=resource_id, - timestamp=message['timestamp'], + timestamp=ts, resource_metadata=metadata, source=source) diff -Nru ceilometer-5.0.0~b2/ceilometer/service_base.py ceilometer-5.0.0~b3/ceilometer/service_base.py --- ceilometer-5.0.0~b2/ceilometer/service_base.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/service_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -20,7 +20,7 @@ from oslo_service import service as os_service import six -from ceilometer.i18n import _, _LE, _LI +from ceilometer.i18n import _LE, _LI from ceilometer import pipeline LOG = log.getLogger(__name__) @@ -60,12 +60,12 @@ # Polling in the polling agent. elif hasattr(self, 'polling_manager'): self.polling_manager = pipeline.setup_polling() - LOG.debug(_("Pipeline has been refreshed. " - "old hash: %(old)s, new hash: %(new)s") % - ({'old': self.pipeline_hash, - 'new': _hash})) + LOG.debug("Pipeline has been refreshed. " + "old hash: %(old)s, new hash: %(new)s", + {'old': self.pipeline_hash, + 'new': _hash}) except Exception as err: - LOG.debug(_("Active pipeline config's hash is %s") % + LOG.debug("Active pipeline config's hash is %s", self.pipeline_hash) LOG.exception(_LE('Unable to load changed pipeline: %s') % err) diff -Nru ceilometer-5.0.0~b2/ceilometer/service.py ceilometer-5.0.0~b3/ceilometer/service.py --- ceilometer-5.0.0~b2/ceilometer/service.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/service.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,10 +21,10 @@ from oslo_config import cfg import oslo_i18n from oslo_log import log +from oslo_reports import guru_meditation_report as gmr -from ceilometer.i18n import _ from ceilometer import messaging -from ceilometer import utils +from ceilometer import version OPTS = [ @@ -33,14 +33,6 @@ help='Name of this node, which must be valid in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN, or IP address.'), - cfg.IntOpt('collector_workers', - default=1, - help='Number of workers for collector service. A single ' - 'collector is enabled by default.'), - cfg.IntOpt('notification_workers', - default=1, - help='Number of workers for notification service. A single ' - 'notification agent is enabled by default.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to ' @@ -87,27 +79,40 @@ help='Disables X.509 certificate validation when an ' 'SSL connection to Identity Service is established.'), ] -cfg.CONF.register_cli_opts(CLI_OPTS, group="service_credentials") - -LOG = log.getLogger(__name__) +cfg.CONF.register_cli_opts(CLI_OPTS, group="service_credentials") -class WorkerException(Exception): - """Exception for errors relating to service workers.""" +API_OPT = cfg.IntOpt('workers', + default=1, + min=1, + deprecated_group='DEFAULT', + deprecated_name='api_workers', + help='Number of workers for api, default value is 1.') +cfg.CONF.register_opt(API_OPT, 'api') + +NOTI_OPT = cfg.IntOpt('workers', + default=1, + min=1, + deprecated_group='DEFAULT', + deprecated_name='notification_workers', + help='Number of workers for notification service, ' + 'default value is 1.') +cfg.CONF.register_opt(NOTI_OPT, 'notification') + +COLL_OPT = cfg.IntOpt('workers', + default=1, + min=1, + deprecated_group='DEFAULT', + deprecated_name='collector_workers', + help='Number of workers for collector service. ' + 'default value is 1.') +cfg.CONF.register_opt(COLL_OPT, 'collector') -def get_workers(name): - workers = (cfg.CONF.get('%s_workers' % name) or - utils.cpu_count()) - if workers and workers < 1: - msg = (_("%(worker_name)s value of %(workers)s is invalid, " - "must be greater than 0") % - {'worker_name': '%s_workers' % name, 'workers': str(workers)}) - raise WorkerException(msg) - return workers +LOG = log.getLogger(__name__) -def prepare_service(argv=None): +def prepare_service(argv=None, config_files=None): oslo_i18n.enable_lazy() log.register_options(cfg.CONF) log_levels = (cfg.CONF.default_log_levels + @@ -115,6 +120,13 @@ log.set_defaults(default_log_levels=log_levels) if argv is None: argv = sys.argv - cfg.CONF(argv[1:], project='ceilometer', validate_default_values=True) + cfg.CONF(argv[1:], project='ceilometer', validate_default_values=True, + version=version.version_info.version_string(), + default_config_files=config_files) log.setup(cfg.CONF, 'ceilometer') + # NOTE(liusheng): guru cannot run with service under apache daemon, so when + # ceilometer-api running with mod_wsgi, the argv is [], we don't start + # guru. + if argv: + gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/hbase/base.py ceilometer-5.0.0~b3/ceilometer/storage/hbase/base.py --- ceilometer-5.0.0~b2/ceilometer/storage/hbase/base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/hbase/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -18,7 +18,6 @@ from oslo_utils import netutils from six.moves.urllib import parse as urlparse -from ceilometer.i18n import _ from ceilometer.storage.hbase import inmemory as hbase_inmemory LOG = log.getLogger(__name__) @@ -42,8 +41,8 @@ else: # This is a in-memory usage for unit tests if Connection._memory_instance is None: - LOG.debug(_('Creating a new in-memory HBase ' - 'Connection object')) + LOG.debug('Creating a new in-memory HBase ' + 'Connection object') Connection._memory_instance = (hbase_inmemory. MConnectionPool()) self.conn_pool = Connection._memory_instance @@ -59,8 +58,8 @@ The tests use a subclass to override this and return an in-memory connection pool. """ - LOG.debug(_('connecting to HBase on %(host)s:%(port)s') % ( - {'host': conf['host'], 'port': conf['port']})) + LOG.debug('connecting to HBase on %(host)s:%(port)s', + {'host': conf['host'], 'port': conf['port']}) return happybase.ConnectionPool(size=100, host=conf['host'], port=conf['port'], table_prefix=conf['table_prefix']) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/hbase/inmemory.py ceilometer-5.0.0~b3/ceilometer/storage/hbase/inmemory.py --- ceilometer-5.0.0~b2/ceilometer/storage/hbase/inmemory.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/hbase/inmemory.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,7 +21,6 @@ import six import ceilometer -from ceilometer.i18n import _ LOG = log.getLogger(__name__) @@ -265,7 +264,7 @@ @staticmethod def open(): - LOG.debug(_("Opening in-memory HBase connection")) + LOG.debug("Opening in-memory HBase connection") def create_table(self, n, families=None): families = families or {} diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/impl_hbase.py ceilometer-5.0.0~b3/ceilometer/storage/impl_hbase.py --- ceilometer-5.0.0~b2/ceilometer/storage/impl_hbase.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/impl_hbase.py 2015-09-03 13:05:55.000000000 +0000 @@ -19,7 +19,6 @@ from oslo_utils import timeutils import ceilometer -from ceilometer.i18n import _ from ceilometer.storage import base from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import migration as hbase_migration @@ -128,18 +127,18 @@ hbase_migration.migrate_tables(conn, tables) def clear(self): - LOG.debug(_('Dropping HBase schema...')) + LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.RESOURCE_TABLE, self.METER_TABLE]: try: conn.disable_table(table) except Exception: - LOG.debug(_('Cannot disable table but ignoring error')) + LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: - LOG.debug(_('Cannot delete table but ignoring error')) + LOG.debug('Cannot delete table but ignoring error') def record_metering_data(self, data): """Write the data to the backend storage system. @@ -212,7 +211,7 @@ source, q) with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) - LOG.debug(_("Query Resource table: %s") % q) + LOG.debug("Query Resource table: %s", q) for resource_id, data in resource_table.scan(filter=q, limit=limit): f_res, sources, meters, md = hbase_utils.deserialize_entry( @@ -263,7 +262,7 @@ project_id=project, resource_id=resource, source=source) - LOG.debug(_("Query Resource table: %s") % q) + LOG.debug("Query Resource table: %s", q) gen = resource_table.scan(filter=q) # We need result set to be sure that user doesn't receive several @@ -305,11 +304,13 @@ q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter, require_meter=False)) - LOG.debug(_("Query Meter Table: %s") % q) + LOG.debug("Query Meter Table: %s", q) gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit, columns=columns) for ignored, meter in gen: d_meter = hbase_utils.deserialize_entry(meter)[0] + d_meter['message']['counter_volume'] = ( + float(d_meter['message']['counter_volume'])) d_meter['message']['recorded_at'] = d_meter['recorded_at'] yield models.Sample(**d_meter['message']) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/impl_mongodb.py ceilometer-5.0.0~b3/ceilometer/storage/impl_mongodb.py --- ceilometer-5.0.0~b2/ceilometer/storage/impl_mongodb.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/impl_mongodb.py 2015-09-03 13:05:55.000000000 +0000 @@ -33,7 +33,6 @@ import six import ceilometer -from ceilometer.i18n import _ from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models @@ -207,28 +206,30 @@ name_qualifier = dict(user_id='', project_id='project_') background = dict(user_id=False, project_id=True) for primary in ['user_id', 'project_id']: - name = 'resource_%sidx' % name_qualifier[primary] - self.db.resource.create_index([ - (primary, pymongo.ASCENDING), - ('source', pymongo.ASCENDING), - ], name=name, background=background[primary]) - name = 'meter_%sidx' % name_qualifier[primary] self.db.meter.create_index([ ('resource_id', pymongo.ASCENDING), (primary, pymongo.ASCENDING), ('counter_name', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), - ('source', pymongo.ASCENDING), ], name=name, background=background[primary]) - self.db.resource.create_index([('last_sample_timestamp', - pymongo.DESCENDING)], - name='last_sample_timestamp_idx', - sparse=True) self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], name='timestamp_idx') + # NOTE(ityaptin) This index covers get_resource requests sorting + # and MongoDB uses part of this compound index for different + # queries based on any of user_id, project_id, last_sample_timestamp + # fields + self.db.resource.create_index([('user_id', pymongo.DESCENDING), + ('project_id', pymongo.DESCENDING), + ('last_sample_timestamp', + pymongo.DESCENDING)], + name='resource_user_project_timestamp',) + self.db.resource.create_index([('last_sample_timestamp', + pymongo.DESCENDING)], + name='last_sample_timestamp_idx') + # update or create time_to_live index ttl = cfg.CONF.database.metering_time_to_live self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter) @@ -310,8 +311,8 @@ Clearing occurs with native MongoDB time-to-live feature. """ - LOG.debug(_("Clearing expired metering data is based on native " - "MongoDB time to live feature and going in background.")) + LOG.debug("Clearing expired metering data is based on native " + "MongoDB time to live feature and going in background.") @staticmethod def _get_marker(db_collection, marker_pairs): diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/mongo/utils.py ceilometer-5.0.0~b3/ceilometer/storage/mongo/utils.py --- ceilometer-5.0.0~b2/ceilometer/storage/mongo/utils.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/mongo/utils.py 2015-09-03 13:05:55.000000000 +0000 @@ -26,11 +26,14 @@ from oslo_log import log from oslo_utils import netutils import pymongo +import pymongo.errors import six from six.moves.urllib import parse from ceilometer.i18n import _ +ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86 + LOG = log.getLogger(__name__) # FIXME(dhellmann): Configuration options are not part of the Oslo @@ -45,7 +48,7 @@ MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] -TRIVIAL_LAMBDA = lambda result, param=None: result +FINALIZE_AGGREGATION_LAMBDA = lambda result, param=None: float(result) CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id', 'user_id', 'project_id', @@ -455,6 +458,19 @@ # exception. return CursorProxy(self.conn.find(*args, **kwargs)) + def create_index(self, keys, name=None, *args, **kwargs): + try: + self.conn.create_index(keys, name=name, *args, **kwargs) + except pymongo.errors.OperationFailure as e: + if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS: + LOG.info(_("Index %s will be recreate.") % name) + self._recreate_index(keys, name, *args, **kwargs) + + @safe_mongo_call + def _recreate_index(self, keys, name, *args, **kwargs): + self.conn.drop_index(name) + self.conn.create_index(keys, name=name, *args, **kwargs) + def __getattr__(self, item): """Wrap MongoDB connection. @@ -503,7 +519,7 @@ finalize=None, parametrized=False, validate=None): - self._finalize = finalize or TRIVIAL_LAMBDA + self._finalize = finalize or FINALIZE_AGGREGATION_LAMBDA self.group = lambda *args: group(*args) if parametrized else group self.project = (lambda *args: project(*args) if parametrized else project) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/pymongo_base.py ceilometer-5.0.0~b3/ceilometer/storage/pymongo_base.py --- ceilometer-5.0.0~b2/ceilometer/storage/pymongo_base.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/pymongo_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -139,6 +139,8 @@ del s['_id'] # Backward compatibility for samples without units s['counter_unit'] = s.get('counter_unit', '') + # Compatibility with MongoDB 3.+ + s['counter_volume'] = float(s.get('counter_volume')) # Tolerate absence of recorded_at in older datapoints s['recorded_at'] = s.get('recorded_at') # Check samples for metadata and "unquote" key if initially it diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -93,11 +93,3 @@ tables = [meter, project, resource, user, source, sourceassoc] for i in sorted(tables, key=lambda table: table.fullname): i.create() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - for name in ['source', 'sourceassoc', 'project', - 'user', 'resource', 'meter']: - t = Table(name, meta, autoload=True) - t.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,10 +21,3 @@ meter = Table('meter', meta, autoload=True) duration = Column('counter_duration', Integer) meter.drop_column(duration) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - duration = Column('counter_duration', Integer) - meter.create_column(duration) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py 2015-09-03 13:05:55.000000000 +0000 @@ -27,19 +27,3 @@ migrate_engine.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % migrate_engine.url.database) - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - if migrate_engine.name == "mysql": - tables = ['meter', 'user', 'resource', 'project', 'source', - 'sourceassoc'] - migrate_engine.execute("SET foreign_key_checks = 0") - - for table in tables: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET latin1" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET latin1" % - migrate_engine.url.database) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,10 +21,3 @@ meter = Table('meter', meta, autoload=True) unit = Column('counter_unit', String(255)) meter.create_column(unit) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - unit = Column('counter_unit', String(255)) - meter.drop_column(unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py 2015-09-03 13:05:55.000000000 +0000 @@ -12,7 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslo_utils import timeutils from sqlalchemy import MetaData, Table, Column, DateTime @@ -23,13 +22,3 @@ resource.drop_column(timestamp) received_timestamp = Column('received_timestamp', DateTime) resource.drop_column(received_timestamp) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - timestamp = Column('timestamp', DateTime) - resource.create_column(timestamp) - received_timestamp = Column('received_timestamp', DateTime, - default=timeutils.utcnow) - resource.create_column(received_timestamp) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py 2015-09-03 13:05:55.000000000 +0000 @@ -15,7 +15,6 @@ # under the License. from sqlalchemy import Float -from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table @@ -24,9 +23,3 @@ meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) meter.c.counter_volume.alter(type=Float(53)) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - meter.c.counter_volume.alter(type=Integer) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -44,9 +44,3 @@ mysql_engine='InnoDB', mysql_charset='utf8') alarm.create() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - alarm.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py 2015-09-03 13:05:55.000000000 +0000 @@ -58,10 +58,3 @@ mysql_charset='utf8', ) trait.create() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - for name in ['trait', 'event', 'unique_name']: - t = Table(name, meta, autoload=True) - t.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,11 +22,3 @@ name.c.key.alter(type=VARCHAR(length=255)) trait = Table('trait', meta, autoload=True) trait.c.t_string.alter(type=VARCHAR(length=255)) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - name = Table('unique_name', meta, autoload=True) - name.c.key.alter(type=VARCHAR(length=32)) - trait = Table('trait', meta, autoload=True) - trait.c.t_string.alter(type=VARCHAR(length=32)) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,11 +21,3 @@ index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, meter.c.counter_name) index.create(bind=migrate_engine) - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, - meter.c.counter_name) - index.drop(bind=migrate_engine) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py 2015-09-03 13:05:55.000000000 +0000 @@ -35,14 +35,3 @@ for index_name, column in indexes: index = Index(index_name, table.c[column]) index.drop() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in INDEXES.keys()) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for index_name, column in indexes: - index = Index(index_name, table.c[column]) - index.create() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py 2015-09-03 13:05:55.000000000 +0000 @@ -56,22 +56,3 @@ params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create() - - -def downgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - with migrate_engine.begin(): - fkey = ForeignKeyConstraint(**params) - fkey.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,10 +21,3 @@ meta.bind = migrate_engine alarm = Table('alarm', meta, autoload=True) alarm.c.counter_name.alter(name='meter_name') - - -def downgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - alarm = Table('alarm', meta, autoload=True) - alarm.c.meter_name.alter(name='counter_name') diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py 2015-09-03 13:05:55.000000000 +0000 @@ -42,14 +42,3 @@ execute()) # Leave the Trait, makes the rollback easier and won't really hurt anyone. - - -def downgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - event = sqlalchemy.Table('event', meta, autoload=True) - message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) - cons = UniqueConstraint('message_id', table=event) - cons.drop() - index = sqlalchemy.Index('idx_event_message_id', event.c.message_id) - index.drop(bind=migrate_engine) - event.drop_column(message_id) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -61,10 +61,3 @@ refcolumns=[user.c.id])] for fkey in fkeys: fkey.create(engine=migrate_engine) - - -def downgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - alarm_history = Table('alarm_history', meta, autoload=True) - alarm_history.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,7 +16,7 @@ import json from sqlalchemy import MetaData, Table, Column, Index -from sqlalchemy import String, Float, Integer, Text +from sqlalchemy import String, Text def upgrade(migrate_engine): @@ -58,49 +58,3 @@ table.c.evaluation_periods.drop() table.c.period.drop() table.c.matching_metadata.drop() - - -def downgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - table = Table('alarm', meta, autoload=True) - - columns = [ - Column('meter_name', String(255)), - Column('comparison_operator', String(2)), - Column('threshold', Float), - Column('statistic', String(255)), - Column('evaluation_periods', Integer), - Column('period', Integer), - Column('matching_metadata', Text()) - ] - for c in columns: - c.create(table) - - for row in table.select().execute().fetchall(): - if row.type != 'threshold': - # note: type insupported in previous version - table.delete().where(table.c.id == row.id).execute() - else: - rule = json.loads(row.rule) - values = {'comparison_operator': rule['comparison_operator'], - 'threshold': float(rule['threshold']), - 'statistic': rule['statistic'], - 'evaluation_periods': int(rule['evaluation_periods']), - 'period': int(rule['period']), - 'meter_name': int(rule['mater_name']), - 'matching_metadata': {}} - - # note: op are ignored because previous format don't support it - for q in rule['query']: - values['matching_metadata'][q['field']] = q['value'] - values['matching_metadata'] = json.dumps( - values['matching_metadata']) - table.update().where(table.c.id == row.id - ).values(**values).execute() - - index = Index('ix_alarm_counter_name', table.c.meter_name) - index.create(bind=migrate_engine) - - table.c.type.drop() - table.c.rule.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py 2015-09-03 13:05:55.000000000 +0000 @@ -52,11 +52,3 @@ _convert_data_type(meter, _col, sa.DateTime(), models.PreciseTimestamp(), pk_attr='id', index=True) - - -def downgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - _convert_data_type(meter, _col, models.PreciseTimestamp(), - sa.DateTime(), pk_attr='id', index=True) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,7 +16,6 @@ # under the License. from sqlalchemy import MetaData -from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text @@ -25,9 +24,3 @@ meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) resource.c.resource_metadata.alter(type=Text) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - resource.c.resource_metadata.alter(type=String(5000)) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,7 +16,6 @@ # under the License. from sqlalchemy import MetaData -from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text @@ -25,9 +24,3 @@ meta = MetaData(bind=migrate_engine) alm_hist = Table('alarm_history', meta, autoload=True) alm_hist.c.detail.alter(type=Text) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alm_hist = Table('alarm_history', meta, autoload=True) - alm_hist.c.detail.alter(type=String(255)) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py 2015-09-03 13:05:55.000000000 +0000 @@ -66,10 +66,3 @@ ins = meta_tables['metadata_float'].insert() if ins is not None: ins.values(id=meter_id, meta_key=key, value=v).execute() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - for t in tables: - table = Table(t[0], meta, autoload=True) - table.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py 2015-09-03 13:05:55.000000000 +0000 @@ -75,45 +75,3 @@ fkey.create() event.c.unique_name_id.drop() - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - event_type = Table('event_type', meta, autoload=True) - event = Table('event', meta, autoload=True) - unique_name = Table('unique_name', meta, autoload=True) - # Re-insert the event type table records into the old - # unique_name table. - conn = migrate_engine.connect() - sql = ("INSERT INTO unique_name " - "SELECT event_type.id, event_type.desc FROM event_type") - conn.execute(sql) - conn.close() - # Drop the foreign key constraint to event_type, drop the - # event_type table, rename the event.event_type column to - # event.unique_name, and re-add the old foreign - # key constraint - params = {'columns': [event.c.event_type_id], - 'refcolumns': [event_type.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'event_type', 'id')) - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - event_type.drop() - - Column('unique_name_id', Integer).create(event) - - # Move data from event_type_id column to unique_name_id column - query = select([event.c.id, event.c.event_type_id]) - for key, value in migration.paged(query): - (event.update().where(event.c.id == key). - values({"unique_name_id": value}).execute()) - - event.c.event_type_id.drop() - params = {'columns': [event.c.unique_name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = 'event_ibfk_1' - fkey = ForeignKeyConstraint(**params) - fkey.create() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_downgrade.sql ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_downgrade.sql --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_downgrade.sql 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_downgrade.sql 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -ALTER TABLE event RENAME TO event_orig; - -INSERT INTO unique_name -SELECT et.id, et.desc -FROM event_type et; - -CREATE TABLE event ( - id INTEGER PRIMARY KEY ASC, - generated FLOAT NOT NULL, - message_id VARCHAR(50) UNIQUE, - unique_name_id INTEGER NOT NULL, - FOREIGN KEY (unique_name_id) REFERENCES unique_name (id) -); - -INSERT INTO event -SELECT id, generated, message_id, event_type_id -FROM event_orig; - -DROP TABLE event_orig; -DROP TABLE event_type; diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py 2015-09-03 13:05:55.000000000 +0000 @@ -16,7 +16,6 @@ # under the License. from sqlalchemy import BigInteger -from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table @@ -25,9 +24,3 @@ meta = MetaData(bind=migrate_engine) resource = Table('metadata_int', meta, autoload=True) resource.c.value.alter(type=BigInteger) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('metadata_int', meta, autoload=True) - resource.c.value.alter(type=Integer) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py 2015-09-03 13:05:55.000000000 +0000 @@ -84,68 +84,3 @@ # Finally, drop the unique_name table - we don't need it # anymore. unique_name.drop() - - -def downgrade(migrate_engine): - meta = MetaData(migrate_engine) - unique_name = Table( - 'unique_name', meta, - Column('id', Integer, primary_key=True), - Column('key', String(255), unique=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - trait_type = Table('trait_type', meta, autoload=True) - trait = Table('trait', meta, autoload=True) - - # Create the UniqueName table, drop the foreign key constraint - # to trait_type, drop the trait_type table, rename the - # trait.trait_type column to traitname, re-add the dtype to - # the trait table, and re-add the old foreign key constraint - - unique_name.create(migrate_engine) - - conn = migrate_engine.connect() - sql = ("INSERT INTO unique_name " - "SELECT trait_type.id, trait_type.desc " - "FROM trait_type") - - conn.execute(sql) - conn.close() - params = {'columns': [trait.c.trait_type_id], - 'refcolumns': [trait_type.c.id]} - - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'trait_type', 'id')) - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - # Re-create the old columns in trait - Column("name_id", Integer).create(trait) - Column("t_type", Integer).create(trait) - - # copy data from trait_type.data_type into trait.t_type - query = select([trait_type.c.id, trait_type.c.data_type]) - for key, value in migration.paged(query): - (trait.update().where(trait.c.trait_type_id == key). - values({"t_type": value}).execute()) - - # Move data from name_id column into trait_type_id column - query = select([trait.c.id, trait.c.trait_type_id]) - for key, value in migration.paged(query): - (trait.update().where(trait.c.id == key). - values({"name_id": value}).execute()) - - # Add a foreign key to the unique_name table - params = {'columns': [trait.c.name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = 'trait_ibfk_1' - fkey = ForeignKeyConstraint(**params) - fkey.create() - - trait.c.trait_type_id.drop() - - # Drop the trait_type table. It isn't needed anymore - trait_type.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_downgrade.sql ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_downgrade.sql --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_downgrade.sql 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_downgrade.sql 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -ALTER TABLE trait RENAME TO trait_orig; - -INSERT INTO unique_name -SELECT id, 'desc' -FROM trait_type; - -CREATE TABLE trait ( - id INTEGER PRIMARY KEY ASC, - t_string VARCHAR(255), - t_int INTEGER, - t_float FLOAT, - t_datetime FLOAT, - t_type INTEGER NOT NULL, - name_id INTEGER NOT NULL, - event_id INTEGER NOT NULL, - FOREIGN KEY (name_id) REFERENCES unique_name (id) - FOREIGN KEY (event_id) REFERENCES event (id) -); - - -INSERT INTO trait -SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime - tt.data_type, t.trait_type_id, t.event_id -FROM trait_orig t -INNER JOIN trait_type tt -ON tt.id = t.trait_type_id - -DROP TABLE trait_orig; -DROP TABLE trait_type; diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py 2015-09-03 13:05:55.000000000 +0000 @@ -54,14 +54,3 @@ _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) - - -def downgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', models.PreciseTimestamp(), - sa.Float(), pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', models.PreciseTimestamp(), - sa.Float(), pk_attr='id', index=True) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py 2015-09-03 13:05:55.000000000 +0000 @@ -56,13 +56,3 @@ _convert_data_type(table, col_name, sa.DateTime(), models.PreciseTimestamp(), pk_attr=pk_attr) - - -def downgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - for table_name, col_name, pk_attr in to_convert: - table = sa.Table(table_name, meta, autoload=True) - _convert_data_type(table, col_name, models.PreciseTimestamp(), - sa.DateTime(), - pk_attr=pk_attr) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,11 +22,3 @@ metadata_float.c.value.alter(type=Float(53)) trait = Table('trait', meta, autoload=True) trait.c.t_float.alter(type=Float(53)) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - metadata_float = Table('metadata_float', meta, autoload=True) - metadata_float.c.value.alter(type=Float()) - trait = Table('trait', meta, autoload=True) - trait.c.t_string.alter(type=Float()) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py 2015-09-03 13:05:55.000000000 +0000 @@ -15,7 +15,6 @@ from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table -from sqlalchemy.sql.expression import select TABLES = ['user', 'project', 'alarm'] @@ -41,26 +40,3 @@ params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.drop() - - -def downgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - subq = select([getattr(ref_table.c, ref_column_name)]) - sql_del = table.delete().where( - ~ getattr(table.c, column).in_(subq)) - migrate_engine.execute(sql_del) - - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - fkey = ForeignKeyConstraint(**params) - fkey.create() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py 2015-09-03 13:05:55.000000000 +0000 @@ -73,7 +73,7 @@ sa.Index(uniq_name, *cols).drop() -def change_uniq(meta, downgrade=False): +def change_uniq(meta): uniq_name = 'uniq_sourceassoc0meter_id0user_id' columns = ('meter_id', 'user_id') @@ -94,12 +94,9 @@ 'refcolumns': [user.c.id], 'name': 'fk_sourceassoc_user_id'} migrate.ForeignKeyConstraint(**params).drop() - if downgrade: - migrate.UniqueConstraint(*columns, table=sourceassoc, - name=uniq_name).drop() - else: - migrate.UniqueConstraint(*columns, table=sourceassoc, - name=uniq_name).create() + + migrate.UniqueConstraint(*columns, table=sourceassoc, + name=uniq_name).create() if meta.bind.engine.name == 'mysql': params = {'columns': [sourceassoc.c.meter_id], 'refcolumns': [meter.c.id], @@ -139,22 +136,3 @@ change_uniq(meta) delete_alembic(meta) - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - - change_uniq(meta, downgrade=True) - - for (engine_names, table_name, uniq_name, - columns, create, uniq, limited) in INDEXES: - if migrate_engine.name in engine_names: - index_cleanup(meta, table_name, uniq_name, - columns, not create, uniq, limited) - - meter = sa.Table('meter', meta, autoload=True) - meter.c.resource_metadata.alter(type=sa.String(5000)) - - alarm = sa.Table('alarm', meta, autoload=True) - repeat_act = sa.Column('repeat_actions', sa.Boolean) - alarm.drop_column(repeat_act) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,9 +22,3 @@ c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(), default=timeutils.utcnow) meter.create_column(c) - - -def downgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - meter = sqlalchemy.Table('meter', meta, autoload=True) - meter.drop_column('recorded_at') diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -15,7 +15,7 @@ import sqlalchemy as sa -def _handle_meter_indices(meta, downgrade=False): +def _handle_meter_indices(meta): if meta.bind.engine.name == 'sqlite': return @@ -51,17 +51,17 @@ for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': - params['name'] = fk[2] if downgrade else fk[1] + params['name'] = fk[1] migrate.ForeignKeyConstraint(**params).drop() for meter_ix, sample_ix in indices: - meter_ix.create() if downgrade else meter_ix.drop() - sample_ix.drop() if downgrade else sample_ix.create() + meter_ix.drop() + sample_ix.create() for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': - params['name'] = fk[1] if downgrade else fk[2] + params['name'] = fk[2] migrate.ForeignKeyConstraint(**params).create() @@ -108,17 +108,3 @@ # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) _alter_sourceassoc(meta, 'sample', 'idx_ss', True) - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - - sample = sa.Table('sample', meta, autoload=True) - sample.rename('meter') - _handle_meter_indices(meta, True) - - _alter_sourceassoc(meta, 'sample', 'idx_ss') - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - sourceassoc.c.sample_id.alter(name='meter_id') - meta = sa.MetaData(bind=migrate_engine) - _alter_sourceassoc(meta, 'meter', 'idx_sm', True) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -17,7 +17,7 @@ import sqlalchemy as sa -def handle_rid_index(meta, downgrade=False): +def handle_rid_index(meta): if meta.bind.engine.name == 'sqlite': return @@ -33,7 +33,7 @@ index = sa.Index('idx_sample_rid_cname', sample.c.resource_id, sample.c.counter_name) - index.create() if downgrade else index.drop() + index.drop() if meta.bind.engine.name == 'mysql': migrate.ForeignKeyConstraint(**params).create() @@ -85,31 +85,3 @@ sample.c.counter_type.drop() sample.c.counter_unit.drop() sample.c.counter_volume.alter(name='volume') - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - sample.c.volume.alter(name='counter_volume') - sa.Column('counter_name', sa.String(255)).create(sample) - sa.Column('counter_type', sa.String(255)).create(sample) - sa.Column('counter_unit', sa.String(255)).create(sample) - meter = sa.Table('meter', meta, autoload=True) - for row in sa.select([meter]).execute(): - (sample.update(). - where(sample.c.meter_id == row['id']). - values({sample.c.counter_name: row['name'], - sample.c.counter_type: row['type'], - sample.c.counter_unit: row['unit']}).execute()) - - params = {'columns': [sample.c.meter_id], - 'refcolumns': [meter.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_meter_id' - if migrate_engine.name != 'sqlite': - migrate.ForeignKeyConstraint(**params).drop() - - handle_rid_index(meta, True) - - sample.c.meter_id.drop() - meter.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py 2015-09-03 13:05:55.000000000 +0000 @@ -21,10 +21,3 @@ alarm = Table('alarm', meta, autoload=True) time_constraints = Column('time_constraints', Text()) alarm.create_column(time_constraints) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - time_constraints = Column('time_constraints', Text()) - alarm.drop_column(time_constraints) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py 2015-09-03 13:05:55.000000000 +0000 @@ -19,9 +19,3 @@ meta = MetaData(bind=migrate_engine) users = Table('alarm', meta, autoload=True) users.c.id.alter(name='alarm_id') - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - users = Table('alarm', meta, autoload=True) - users.c.alarm_id.alter(name='id') diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py 2015-09-03 13:05:55.000000000 +0000 @@ -31,7 +31,3 @@ drop(checkfirst=True)) except sa.exc.NoSuchTableError: pass - - -def downgrade(migrate_engine): - pass diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py 2015-09-03 13:05:55.000000000 +0000 @@ -13,7 +13,6 @@ from migrate import ForeignKeyConstraint, UniqueConstraint import sqlalchemy as sa -from sqlalchemy.sql.expression import select, Alias, not_, and_, exists TABLES_DROP = ['user', 'project'] TABLES = ['user', 'project', 'sourceassoc', 'sample', @@ -83,93 +82,3 @@ for table_name in TABLES_DROP: sa.Table(table_name, meta, autoload=True).drop() - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - user = sa.Table( - 'user', meta, - sa.Column('id', sa.String(255), primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - project = sa.Table( - 'project', meta, - sa.Column('id', sa.String(255), primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - tables = [project, user] - for i in sorted(tables): - i.create() - - load_tables = dict((table_name, sa.Table(table_name, meta, autoload=True)) - for table_name in TABLES) - - # Restore the sourceassoc columns and constraints - sourceassoc = load_tables['sourceassoc'] - user_id = sa.Column('user_id', sa.String(255)) - project_id = sa.Column('project_id', sa.String(255)) - sourceassoc.create_column(user_id) - sourceassoc.create_column(project_id) - - if migrate_engine.name != 'sqlite': - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id0user_id'} - uc = UniqueConstraint('sample_id', 'user_id', - table=sourceassoc, **params) - uc.create() - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id'} - uc = UniqueConstraint('sample_id', table=sourceassoc, **params) - uc.drop() - - idx = sa.Index('idx_su', sourceassoc.c.source_id, - sourceassoc.c.user_id) - idx.create(bind=migrate_engine) - idx = sa.Index('idx_sp', sourceassoc.c.source_id, - sourceassoc.c.project_id) - idx.create(bind=migrate_engine) - - # Restore the user/project columns and constraints in all tables - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - c = getattr(Alias(table).c, column) - except_q = exists([getattr(ref_table.c, ref_column_name)]) - q = select([c]).where(and_(c != sa.null(), not_(except_q))) - q = q.distinct() - - # NOTE(sileht): workaround for - # https://bitbucket.org/zzzeek/sqlalchemy/ - # issue/3044/insert-from-select-union_all - q.select = lambda: q - - sql_ins = ref_table.insert().from_select( - [getattr(ref_table.c, ref_column_name)], q) - try: - migrate_engine.execute(sql_ins) - except TypeError: - # from select is empty - pass - - if migrate_engine.name != 'sqlite': - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - - if (migrate_engine.name == "mysql" and - table_name != 'alarm_history'): - params['name'] = "_".join(('fk', table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == "sample"): - # The fk contains the old table name - params['name'] = "_".join(('meter', column, 'fkey')) - - fkey = ForeignKeyConstraint(**params) - fkey.create() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py 2015-09-03 13:05:55.000000000 +0000 @@ -10,8 +10,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -from migrate import ForeignKeyConstraint, UniqueConstraint +from migrate import ForeignKeyConstraint import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration @@ -67,99 +66,3 @@ # drop tables for table_name in DROP_TABLES: sa.Table(table_name, meta, autoload=True).drop() - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - resource = sa.Table( - 'resource', meta, - sa.Column('id', sa.String(255), primary_key=True), - sa.Column('resource_metadata', sa.Text), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Index('ix_resource_project_id', 'project_id'), - sa.Index('ix_resource_user_id', 'user_id'), - sa.Index('resource_user_id_project_id_key', 'user_id', 'project_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - resource.create() - - source = sa.Table( - 'source', meta, - sa.Column('id', sa.String(255), primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - source.create() - - sourceassoc = sa.Table( - 'sourceassoc', meta, - sa.Column('sample_id', sa.Integer), - sa.Column('resource_id', sa.String(255)), - sa.Column('source_id', sa.String(255)), - sa.Index('idx_sr', 'source_id', 'resource_id'), - sa.Index('idx_ss', 'source_id', 'sample_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - sourceassoc.create() - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id'} - uc = UniqueConstraint('sample_id', table=sourceassoc, **params) - uc.create() - - # reload source/resource tables. - # NOTE(gordc): fine to skip non-id attributes in table since - # they're constantly updated and not used by api - for table, col in [(source, 'source_id'), (resource, 'resource_id')]: - q = sa.select([sample.c[col]]).distinct() - # NOTE(sileht): workaround for - # https://bitbucket.org/zzzeek/sqlalchemy/ - # issue/3044/insert-from-select-union_all - q.select = lambda: q - sql_ins = table.insert().from_select([table.c.id], q) - try: - migrate_engine.execute(sql_ins) - except TypeError: - # from select is empty - pass - - # reload sourceassoc tables - for ref_col, col in [('id', 'sample_id'), ('resource_id', 'resource_id')]: - q = sa.select([sample.c.source_id, sample.c[ref_col]]).distinct() - q.select = lambda: q - sql_ins = sourceassoc.insert().from_select([sourceassoc.c.source_id, - sourceassoc.c[col]], q) - try: - migrate_engine.execute(sql_ins) - except TypeError: - # from select is empty - pass - - sample.c.source_id.drop() - - load_tables = dict((table_name, sa.Table(table_name, meta, - autoload=True)) - for table_name in TABLES) - - # add foreign keys - if migrate_engine.name != 'sqlite': - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - fk_table_name = table_name - if migrate_engine.name == "mysql": - params['name'] = "_".join(('fk', fk_table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == 'sample'): - # fk was not renamed in script 030 - params['name'] = "_".join(('meter', column, 'fkey')) - fkey = ForeignKeyConstraint(**params) - fkey.create() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py 2015-09-03 13:05:55.000000000 +0000 @@ -42,13 +42,3 @@ if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']: index.drop() sa.Index('ix_sample_meter_id', sample.c.meter_id).create() - - -def downgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - - with ForeignKeyHandle(meta): - sa.Index('ix_sample_meter_id', sample.c.meter_id).drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py 2015-09-03 13:05:55.000000000 +0000 @@ -129,43 +129,3 @@ _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, 'resource.internal_id') - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - _migrate_meta_tables(meta, sample.c.resource_id, sample.c.id, - 'sample.id') - - sa.Column('user_id', sa.String(255)).create(sample) - sa.Column('project_id', sa.String(255)).create(sample) - sa.Column('source_id', sa.String(255)).create(sample) - sa.Column('resource_id_new', sa.String(255)).create(sample) - sa.Column('resource_metadata', sa.Text).create(sample) - resource = sa.Table('resource', meta, autoload=True) - - for row in sa.select([resource]).execute(): - (sample.update(). - where(sample.c.resource_id == row['internal_id']). - values({sample.c.resource_id_new: row['resource_id'], - sample.c.user_id: row['user_id'], - sample.c.project_id: row['project_id'], - sample.c.source_id: row['source_id'], - sample.c.resource_metadata: row['resource_metadata']}) - .execute()) - - if migrate_engine.name != 'sqlite': - params = {'columns': [sample.c.resource_id], - 'refcolumns': [resource.c.internal_id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_resource_internal_id' - migrate.ForeignKeyConstraint(**params).drop() - sa.Index('ix_sample_meter_id_resource_id', - sample.c.meter_id, sample.c.resource_id).drop() - sa.Index('ix_sample_resource_id', sample.c.resource_id).drop() - sa.Index('ix_sample_user_id', sample.c.user_id).create() - sa.Index('ix_sample_project_id', sample.c.project_id).create() - - resource.drop() - sample.c.resource_id.drop() - sample.c.resource_id_new.alter(name='resource_id') diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py 2015-09-03 13:05:55.000000000 +0000 @@ -54,14 +54,3 @@ _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) - - -def downgrade(migrate_engine): - if migrate_engine.name == 'postgresql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', models.PreciseTimestamp(), - sa.Float(), pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', models.PreciseTimestamp(), - sa.Float(), pk_attr='id', index=True) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,10 +22,3 @@ alarm = Table('alarm', meta, autoload=True) severity = Column('severity', String(50)) alarm.create_column(severity) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - severity = Column('severity', String(50)) - alarm.drop_column(severity) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py 2015-09-03 13:05:55.000000000 +0000 @@ -52,56 +52,3 @@ ['event_id', 'key', 'value'], query).execute() trait.drop() trait_type.drop() - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - trait_type = sa.Table( - 'trait_type', meta, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('desc', sa.String(255)), - sa.Column('data_type', sa.Integer), - sa.UniqueConstraint('desc', 'data_type', name='tt_unique'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait_type.create() - trait = sa.Table( - 'trait', meta, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('trait_type_id', sa.Integer, sa.ForeignKey(trait_type.c.id)), - sa.Column('event_id', sa.Integer, sa.ForeignKey(event.c.id)), - sa.Column('t_string', sa.String(255), nullable=True, default=None), - sa.Column('t_float', sa.Float(53), nullable=True, default=None), - sa.Column('t_int', sa.Integer, nullable=True, default=None), - sa.Column('t_datetime', models.PreciseTimestamp(), nullable=True, - default=None), - sa.Index('ix_trait_t_int', 't_int'), - sa.Index('ix_trait_t_string', 't_string'), - sa.Index('ix_trait_t_datetime', 't_datetime'), - sa.Index('ix_trait_t_float', 't_float'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait.create() - - for t_name, __, __, col_name, type_id in tables: - table = sa.Table(t_name, meta, autoload=True) - trait_type.insert().from_select([trait_type.c.desc, - trait_type.c.data_type], - sa.select([table.c.key, - type_id]) - .distinct()).execute() - trait.insert().from_select([trait.c['event_id'], - trait.c['trait_type_id'], - trait.c[col_name]], - sa.select([table.c.event_id, - trait_type.c.id, - table.c.value]) - .select_from( - table.join( - trait_type, - table.c.key == trait_type.c.desc)) - ).execute() - table.drop() diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py 2015-09-03 13:05:55.000000000 +0000 @@ -19,10 +19,3 @@ event = sa.Table('event', meta, autoload=True) raw = sa.Column('raw', sa.Text) event.create_column(raw) - - -def downgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - raw = sa.Column('raw', sa.Text) - event.drop_column(raw) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,21 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + + +# Add index on metadata_hash column of resource +def upgrade(migrate_engine): + meta = sa.MetaData(bind=migrate_engine) + resource = sa.Table('resource', meta, autoload=True) + index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash) + index.create(bind=migrate_engine) diff -Nru ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/models.py ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/models.py --- ceilometer-5.0.0~b2/ceilometer/storage/sqlalchemy/models.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/storage/sqlalchemy/models.py 2015-09-03 13:05:55.000000000 +0000 @@ -174,6 +174,7 @@ # 'source_id', 'metadata_hash', # name='res_def_unique'), Index('ix_resource_resource_id', 'resource_id'), + Index('ix_resource_metadata_hash', 'metadata_hash'), ) internal_id = Column(Integer, primary_key=True) diff -Nru ceilometer-5.0.0~b2/ceilometer/telemetry/notifications.py ceilometer-5.0.0~b3/ceilometer/telemetry/notifications.py --- ceilometer-5.0.0~b2/ceilometer/telemetry/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/telemetry/notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -45,10 +45,10 @@ class TelemetryApiPost(TelemetryBase): """Handle sample from notification bus, which is posted via API.""" - event_types = ['telemetry.api'] + event_types = ['telemetry.api', 'telemetry.polling'] def process_notification(self, message): - samples = message['payload'] + samples = message['payload']['samples'] for sample_dict in samples: yield sample.Sample( name=sample_dict['counter_name'], diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/agent/agentbase.py ceilometer-5.0.0~b3/ceilometer/tests/agent/agentbase.py --- ceilometer-5.0.0~b2/ceilometer/tests/agent/agentbase.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/agent/agentbase.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,634 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 Intel corp. -# Copyright 2013 eNovance -# Copyright 2014 Red Hat, Inc -# -# Authors: Yunhong Jiang -# Julien Danjou -# Eoghan Glynn -# Nejc Saje -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import datetime - -import mock -from oslo_config import fixture as fixture_config -from oslotest import mockpatch -import six -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer import sample -from ceilometer.tests import base -from ceilometer import utils - - -class TestSample(sample.Sample): - def __init__(self, name, type, unit, volume, user_id, project_id, - resource_id, timestamp, resource_metadata, source=None): - super(TestSample, self).__init__(name, type, unit, volume, user_id, - project_id, resource_id, timestamp, - resource_metadata, source) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -default_test_data = TestSample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'Pollster'}, -) - - -class TestPollster(plugin_base.PollsterBase): - test_data = default_test_data - discovery = None - - @property - def default_discovery(self): - return self.discovery - - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - c = copy.deepcopy(self.test_data) - c.resource_metadata['resources'] = resources - return [c] - - -class TestPollsterException(TestPollster): - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - raise Exception() - - -class TestDiscovery(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - return self.resources - - -class TestDiscoveryException(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - raise Exception() - - -@six.add_metaclass(abc.ABCMeta) -class BaseAgentManagerTestCase(base.BaseTestCase): - - class Pollster(TestPollster): - samples = [] - resources = [] - test_data = default_test_data - - class PollsterAnother(TestPollster): - samples = [] - resources = [] - test_data = TestSample( - name='testanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterException(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexception', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterExceptionAnother(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexceptionanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class Discovery(TestDiscovery): - params = [] - resources = [] - - class DiscoveryAnother(TestDiscovery): - params = [] - resources = [] - - @property - def group_id(self): - return 'another_group' - - class DiscoveryException(TestDiscoveryException): - params = [] - - def setup_polling(self): - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - - def create_extension_list(self): - return [extension.Extension('test', - None, - None, - self.Pollster(), ), - extension.Extension('testanother', - None, - None, - self.PollsterAnother(), ), - extension.Extension('testexception', - None, - None, - self.PollsterException(), ), - extension.Extension('testexceptionanother', - None, - None, - self.PollsterExceptionAnother(), )] - - def create_discovery_manager(self): - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension( - 'testdiscovery', - None, - None, - self.Discovery(), ), - extension.Extension( - 'testdiscoveryanother', - None, - None, - self.DiscoveryAnother(), ), - extension.Extension( - 'testdiscoveryexception', - None, - None, - self.DiscoveryException(), ), - ], - ) - - @abc.abstractmethod - def create_manager(self): - """Return subclass specific manager.""" - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def setUp(self): - super(BaseAgentManagerTestCase, self).setUp() - self.mgr = self.create_manager() - self.mgr.extensions = self.create_extension_list() - self.mgr.partition_coordinator = mock.MagicMock() - fake_subset = lambda _, x: x - p_coord = self.mgr.partition_coordinator - p_coord.extract_my_subset.side_effect = fake_subset - self.mgr.tg = mock.MagicMock() - self.pipeline_cfg = { - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.setup_polling() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'pipeline_cfg_file', - self.path_get('etc/ceilometer/pipeline.yaml') - ) - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - @staticmethod - def get_publisher(url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'rpc://': test_publisher.TestPublisher} - return fake_drivers[url](url) - - def tearDown(self): - self.Pollster.samples = [] - self.Pollster.discovery = [] - self.PollsterAnother.samples = [] - self.PollsterAnother.discovery = [] - self.PollsterException.samples = [] - self.PollsterException.discovery = [] - self.PollsterExceptionAnother.samples = [] - self.PollsterExceptionAnother.discovery = [] - self.Pollster.resources = [] - self.PollsterAnother.resources = [] - self.PollsterException.resources = [] - self.PollsterExceptionAnother.resources = [] - self.Discovery.params = [] - self.DiscoveryAnother.params = [] - self.DiscoveryException.params = [] - self.Discovery.resources = [] - self.DiscoveryAnother.resources = [] - super(BaseAgentManagerTestCase, self).tearDown() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.mgr.start() - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) - self.mgr.stop() - self.mgr.partition_coordinator.stop.assert_called_once_with() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start_with_pipeline_poller(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 5) - self.mgr.start() - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) - self.assertEqual([timer_call, pipeline_poller_call], - self.mgr.tg.add_timer.call_args_list) - - def test_join_partitioning_groups(self): - self.mgr.discovery_manager = self.create_discovery_manager() - self.mgr.join_partitioning_groups() - p_coord = self.mgr.partition_coordinator - static_group_ids = [utils.hash_of_set(p['resources']) - for p in self.pipeline_cfg['sources'] - if p['resources']] - expected = [mock.call(self.mgr.construct_group_id(g)) - for g in ['another_group', 'global'] + static_group_ids] - self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.join_group.call_args_list) - - def test_setup_polling_tasks(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertTrue('test_pipeline' in polling_tasks.keys()) - per_task_resources = polling_tasks['test_pipeline']['task'].resources - self.assertEqual(1, len(per_task_resources)) - self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), - set(per_task_resources['test_pipeline-test'].get({}))) - - def test_setup_polling_tasks_multiple_interval(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(2, len(polling_tasks)) - self.assertTrue('test_pipeline' in polling_tasks.keys()) - self.assertTrue('test_pipeline_1' in polling_tasks.keys()) - - def test_setup_polling_tasks_mismatch_counter(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test_invalid'], - 'resources': ['invalid://'], - 'sinks': ['test_sink'] - }) - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertTrue('test_pipeline' in polling_tasks.keys()) - self.assertFalse('test_pipeline_1' in polling_tasks.keys()) - - def test_agent_manager_start(self): - mgr = self.create_manager() - mgr.extensions = self.mgr.extensions - mgr.create_polling_task = mock.MagicMock() - mgr.tg = mock.MagicMock() - mgr.start() - self.assertTrue(mgr.tg.add_timer.called) - - def test_manager_exception_persistency(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['testanother'], - 'sinks': ['test_sink'] - }) - self.setup_polling() - - def _verify_discovery_params(self, expected): - self.assertEqual(expected, self.Discovery.params) - self.assertEqual(expected, self.DiscoveryAnother.params) - self.assertEqual(expected, self.DiscoveryException.params) - - def _do_test_per_pollster_discovery(self, discovered_resources, - static_resources): - self.Pollster.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - if static_resources: - # just so we can test that static + pre_pipeline amalgamated - # override per_pollster - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscoveryanother', - 'testdiscoverynonexistent', - 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - if static_resources: - self.assertEqual(set(static_resources + - self.DiscoveryAnother.resources), - set(self.Pollster.resources)) - else: - self.assertEqual(set(self.Discovery.resources), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pollster_discovery(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): - # ensure static+per_source_discovery overrides per_pollster_discovery - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pollster_discovery_duplicated(self): - self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], - []) - - def test_per_pollster_discovery_overridden_by_duplicated_static(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'dup', 'dup']) - - def test_per_pollster_discovery_caching(self): - # ensure single discovery associated with multiple pollsters - # only called once per polling cycle - discovered_resources = ['discovered_1', 'discovered_2'] - self.Pollster.discovery = 'testdiscovery' - self.PollsterAnother.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.pipeline_cfg['sources'][0]['meters'].append('testanother') - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - self.assertEqual(1, len(self.Discovery.params)) - self.assertEqual(discovered_resources, self.Pollster.resources) - self.assertEqual(discovered_resources, self.PollsterAnother.resources) - - def _do_test_per_pipeline_discovery(self, - discovered_resources, - static_resources): - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - discovery = self.Discovery.resources + self.DiscoveryAnother.resources - # compare resource lists modulo ordering - self.assertEqual(set(static_resources + discovery), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pipeline_discovery_discovered_only(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pipeline_discovery_static_only(self): - self._do_test_per_pipeline_discovery([], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_augmented_by_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_duplicated_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], - ['dup', 'static_1', 'dup']) - - def test_multiple_pipelines_different_static_resources(self): - # assert that the individual lists of static and discovered resources - # for each pipeline with a common interval are passed to individual - # pollsters matching each pipeline - self.pipeline_cfg['sources'][0]['resources'] = ['test://'] - self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] - self.pipeline_cfg['sources'].append({ - 'name': 'another_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['another://'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_new'] - }) - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(2, len(polling_tasks)) - self.assertTrue('another_pipeline' in polling_tasks.keys()) - self.assertTrue('test_pipeline' in polling_tasks.keys()) - self.mgr.interval_task(polling_tasks['another_pipeline']['task']) - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - self.assertEqual([None], self.Discovery.params) - self.assertEqual([None], self.DiscoveryAnother.params) - self.assertEqual(2, len(self.Pollster.samples)) - samples = self.Pollster.samples - test_resources = ['test://', 'discovered_1', 'discovered_2'] - another_resources = ['another://', 'discovered_3', 'discovered_4'] - if samples[0][1] == test_resources: - self.assertEqual(another_resources, samples[1][1]) - elif samples[0][1] == another_resources: - self.assertEqual(test_resources, samples[1][1]) - else: - self.fail('unexpected sample resources %s' % samples) - - def test_multiple_sources_different_discoverers(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1']}, - {'name': 'test_source_2', - 'interval': 60, - 'meters': ['testanother'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(2, len(polling_tasks)) - self.assertTrue('test_source_1' in polling_tasks.keys()) - self.assertTrue('test_source_2' in polling_tasks.keys()) - self.mgr.interval_task(polling_tasks['test_source_1']['task']) - self.mgr.interval_task(polling_tasks['test_source_2']['task']) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - self.assertEqual(1, len(self.PollsterAnother.samples)) - self.assertEqual(['discovered_3', 'discovered_4'], - self.PollsterAnother.resources) - - def test_multiple_sinks_same_discoverer(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1', 'test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertTrue('test_source_1' in polling_tasks.keys()) - self.mgr.interval_task(polling_tasks['test_source_1']['task']) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - - def test_discovery_partitioning(self): - self.mgr.discovery_manager = self.create_discovery_manager() - p_coord = self.mgr.partition_coordinator - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), - d.obj.resources) - for d in self.mgr.discovery_manager - if hasattr(d.obj, 'resources')] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) - - def test_static_resources_partitioning(self): - p_coord = self.mgr.partition_coordinator - static_resources = ['static_1', 'static_2'] - static_resources2 = ['static_3', 'static_4'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline2', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': static_resources2, - 'sinks': ['test_sink'] - }) - # have one pipeline without static resources defined - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline3', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - for meter_name in polling_tasks: - self.mgr.interval_task(polling_tasks[meter_name]['task']) - # Only two groups need to be created, one for each pipeline, - # even though counter test is used twice - expected = [mock.call(self.mgr.construct_group_id( - utils.hash_of_set(resources)), - resources) - for resources in [static_resources, - static_resources2]] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/agent/test_discovery.py ceilometer-5.0.0~b3/ceilometer/tests/agent/test_discovery.py --- ceilometer-5.0.0~b2/ceilometer/tests/agent/test_discovery.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/agent/test_discovery.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# -# Copyright 2014 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/central/manager.py -""" - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent.discovery import endpoint -from ceilometer.agent.discovery import localnode -from ceilometer.hardware import discovery as hardware - - -class TestEndpointDiscovery(base.BaseTestCase): - - def setUp(self): - super(TestEndpointDiscovery, self).setUp() - self.discovery = endpoint.EndpointDiscovery() - self.manager = mock.MagicMock() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('os_endpoint_type', 'test-endpoint-type', - group='service_credentials') - self.CONF.set_override('os_region_name', 'test-region-name', - group='service_credentials') - - def test_keystone_called(self): - self.discovery.discover(self.manager, param='test-service-type') - expected = [mock.call(service_type='test-service-type', - endpoint_type='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, - self.manager.keystone.service_catalog.get_urls - .call_args_list) - - def test_keystone_called_no_service_type(self): - self.discovery.discover(self.manager) - expected = [mock.call(service_type=None, - endpoint_type='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, - self.manager.keystone.service_catalog.get_urls - .call_args_list) - - def test_keystone_called_no_endpoints(self): - self.manager.keystone.service_catalog.get_urls.return_value = [] - self.assertEqual([], self.discovery.discover(self.manager)) - - -class TestLocalnodeDiscovery(base.BaseTestCase): - def setUp(self): - super(TestLocalnodeDiscovery, self).setUp() - self.discovery = localnode.LocalNodeDiscovery() - self.manager = mock.MagicMock() - - def test_lockalnode_discovery(self): - self.assertEqual(['local_host'], self.discovery.discover(self.manager)) - - -class TestHardwareDiscovery(base.BaseTestCase): - class MockInstance(object): - addresses = {'ctlplane': [ - {'addr': '0.0.0.0', - 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} - ]} - id = 'resource_id' - image = {'id': 'image_id'} - flavor = {'id': 'flavor_id'} - - expected = { - 'resource_id': 'resource_id', - 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', - 'mac_addr': '01-23-45-67-89-ab', - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - } - - def setUp(self): - super(TestHardwareDiscovery, self).setUp() - self.discovery = hardware.NodesDiscoveryTripleO() - self.discovery.nova_cli = mock.MagicMock() - self.manager = mock.MagicMock() - - def test_hardware_discovery(self): - self.discovery.nova_cli.instance_get_all.return_value = [ - self.MockInstance()] - resources = self.discovery.discover(self.manager) - self.assertEqual(1, len(resources)) - self.assertEqual(self.expected, resources[0]) - - def test_hardware_discovery_without_flavor(self): - instance = self.MockInstance() - instance.flavor = {} - self.discovery.nova_cli.instance_get_all.return_value = [instance] - resources = self.discovery.discover(self.manager) - self.assertEqual(0, len(resources)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/agent/test_manager.py ceilometer-5.0.0~b3/ceilometer/tests/agent/test_manager.py --- ceilometer-5.0.0~b2/ceilometer/tests/agent/test_manager.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/agent/test_manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,365 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/central/manager.py -""" - -import shutil - -import eventlet -import mock -from oslo_service import service as os_service -from oslo_utils import fileutils -from oslo_utils import timeutils -from oslotest import base -from oslotest import mockpatch -import six -from stevedore import extension -import yaml - -from ceilometer.agent import base as agent_base -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer import pipeline -from ceilometer.tests.agent import agentbase - - -class PollingException(Exception): - pass - - -class TestManager(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def test_load_plugins(self): - mgr = manager.AgentManager() - self.assertIsNotNone(list(mgr.extensions)) - - def test_load_plugins_pollster_list(self): - mgr = manager.AgentManager(pollster_list=['disk.*']) - # currently we do have 26 disk-related pollsters - self.assertEqual(26, len(list(mgr.extensions))) - - def test_load_plugins_no_intersection(self): - # Let's test nothing will be polled if namespace and pollsters - # list have no intersection. - mgr = manager.AgentManager(namespaces=['compute'], - pollster_list=['storage.*']) - self.assertEqual(0, len(list(mgr.extensions))) - - # Test plugin load behavior based on Node Manager pollsters. - # pollster_list is just a filter, so sensor pollsters under 'ipmi' - # namespace would be also instanced. Still need mock __init__ for it. - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_normal_plugins(self): - mgr = manager.AgentManager(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - # 8 pollsters for Node Manager - self.assertEqual(8, len(mgr.extensions)) - - # Skip loading pollster upon ExtensionLoadError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=plugin_base.ExtensionLoadError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.agent.base.LOG') - def test_load_failed_plugins(self, LOG): - # Here we additionally check that namespaces will be converted to the - # list if param was not set as a list. - mgr = manager.AgentManager(namespaces='ipmi', - pollster_list=['hardware.ipmi.node.*']) - # 0 pollsters - self.assertEqual(0, len(mgr.extensions)) - - err_msg = 'Skip loading extension for hardware.ipmi.node.%s' - pollster_names = [ - 'power', 'temperature', 'outlet_temperature', - 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] - calls = [mock.call(err_msg % n) for n in pollster_names] - LOG.error.assert_has_calls(calls=calls, - any_order=True) - - # Skip loading pollster upon ImportError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=ImportError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_import_error_in_plugin(self): - mgr = manager.AgentManager(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - # 0 pollsters - self.assertEqual(0, len(mgr.extensions)) - - # Exceptions other than ExtensionLoadError are propagated - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=PollingException)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_exceptional_plugins(self): - self.assertRaises(PollingException, - manager.AgentManager, - ['ipmi'], - ['hardware.ipmi.node.*']) - - def test_load_plugins_pollster_list_forbidden(self): - manager.cfg.CONF.set_override('backend_url', 'http://', - group='coordination') - self.assertRaises(agent_base.PollsterListForbidden, - manager.AgentManager, - pollster_list=['disk.*']) - manager.cfg.CONF.reset() - - -class TestPollsterKeystone(agentbase.TestPollster): - @plugin_base.check_keystone - def get_samples(self, manager, cache, resources): - func = super(TestPollsterKeystone, self).get_samples - return func(manager=manager, - cache=cache, - resources=resources) - - -class TestPollsterPollingException(agentbase.TestPollster): - polling_failures = 0 - - def get_samples(self, manager, cache, resources): - func = super(TestPollsterPollingException, self).get_samples - sample = func(manager=manager, - cache=cache, - resources=resources) - - # Raise polling exception after 2 times - self.polling_failures += 1 - if self.polling_failures > 2: - raise plugin_base.PollsterPermanentError(resources[0]) - - return sample - - -class TestRunTasks(agentbase.BaseAgentManagerTestCase): - - class PollsterKeystone(TestPollsterKeystone): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testkeystone', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - class PollsterPollingException(TestPollsterPollingException): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testpollingexception', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - @staticmethod - def create_manager(): - return manager.AgentManager() - - def fake_notifier_sample(self, ctxt, event_type, payload): - for m in payload: - del m['message_signature'] - self.notified_samples.append(m) - - def setUp(self): - self.notified_samples = [] - notifier = mock.Mock() - notifier.info.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=notifier)) - self.source_resources = True - super(TestRunTasks, self).setUp() - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - return_value=mock.Mock())) - - def tearDown(self): - self.PollsterKeystone.samples = [] - self.PollsterKeystone.resources = [] - self.PollsterPollingException.samples = [] - self.PollsterPollingException.resources = [] - super(TestRunTasks, self).tearDown() - - def create_extension_list(self): - exts = super(TestRunTasks, self).create_extension_list() - exts.extend([extension.Extension('testkeystone', - None, - None, - self.PollsterKeystone(), ), - extension.Extension('testpollingexception', - None, - None, - self.PollsterPollingException(), )]) - return exts - - def test_get_sample_resources(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks['test_pipeline']['task']) - self.assertTrue(self.Pollster.resources) - - def test_when_keystone_fail(self): - """Test for bug 1316532.""" - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - side_effect=Exception)) - self.pipeline_cfg = { - 'sources': [{ - 'name': "test_keystone", - 'interval': 10, - 'meters': ['testkeystone'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_tasks = self.mgr.setup_polling_tasks() - task = polling_tasks['test_keystone']['task'] - self.mgr.interval_task(task) - self.assertFalse(self.PollsterKeystone.samples) - self.assertFalse(self.notified_samples) - - @mock.patch('ceilometer.agent.base.LOG') - def test_polling_exception(self, LOG): - source_name = 'test_pollingexception' - self.pipeline_cfg = { - 'sources': [{ - 'name': source_name, - 'interval': 10, - 'meters': ['testpollingexception'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_task = self.mgr.setup_polling_tasks()[source_name]['task'] - pollster = list(polling_task.pollster_matches[source_name])[0] - - # 2 samples after 4 pollings, as pollster got disabled upon exception - for x in range(0, 4): - self.mgr.interval_task(polling_task) - samples = self.notified_samples - self.assertEqual(2, len(samples)) - LOG.error.assert_called_once_with(( - 'Prevent pollster %(name)s for ' - 'polling source %(source)s anymore!') - % ({'name': pollster.name, 'source': source_name})) - - def test_start_with_reloadable_pipeline(self): - - def setup_pipeline_file(pipeline): - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 2) - - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - pipeline_cfg_file = setup_pipeline_file(pipeline) - - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) - self.mgr.start() - expected_samples = 1 - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if len(self.notified_samples) >= expected_samples: - break - eventlet.sleep(0) - - # we only got the old name of meters - for sample in self.notified_samples: - self.assertEqual('test', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) - - # Modify the collection targets - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['testanother'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - updated_pipeline_cfg_file = setup_pipeline_file(pipeline) - - # Move/re-name the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - - # Random sleep to let the pipeline poller complete the reloading - eventlet.sleep(3) - - # Flush notified samples to test only new, nothing latent on - # fake message bus. - self.notified_samples = [] - - expected_samples = 1 - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if len(self.notified_samples) >= expected_samples: - break - eventlet.sleep(0) - - # we only got the new name of meters - for sample in self.notified_samples: - self.assertEqual('testanother', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/agent/test_plugin.py ceilometer-5.0.0~b3/ceilometer/tests/agent/test_plugin.py --- ceilometer-5.0.0~b2/ceilometer/tests/agent/test_plugin.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/agent/test_plugin.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent import plugin_base - - -class NotificationBaseTestCase(base.BaseTestCase): - def setUp(self): - super(NotificationBaseTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - class FakePlugin(plugin_base.NotificationBase): - event_types = ['compute.*'] - - def process_notification(self, message): - pass - - def get_targets(self, conf): - pass - - def test_plugin_info(self): - plugin = self.FakePlugin(mock.Mock()) - plugin.to_samples_and_publish = mock.Mock() - ctxt = {'user_id': 'fake_user_id', 'project_id': 'fake_project_id'} - publisher_id = 'fake.publisher_id' - event_type = 'fake.event' - payload = {'foo': 'bar'} - metadata = {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', - 'timestamp': '2015-06-1909:19:35.786893'} - plugin.info(ctxt, publisher_id, event_type, payload, metadata) - notification = { - 'priority': 'info', - 'event_type': 'fake.event', - 'timestamp': '2015-06-1909:19:35.786893', - '_context_user_id': 'fake_user_id', - '_context_project_id': 'fake_project_id', - 'publisher_id': 'fake.publisher_id', - 'payload': {'foo': 'bar'}, - 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' - } - plugin.to_samples_and_publish.assert_called_with(mock.ANY, - notification) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/base.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base class for tests in ceilometer/alarm/evaluator/ -""" -import mock -from oslotest import base - - -class TestEvaluatorBase(base.BaseTestCase): - def setUp(self): - super(TestEvaluatorBase, self).setUp() - self.api_client = mock.Mock() - self.notifier = mock.MagicMock() - self.evaluator = self.EVALUATOR(self.notifier) - self.prepare_alarms() - - @staticmethod - def prepare_alarms(self): - self.alarms = [] - - def _evaluate_all_alarms(self): - for alarm in self.alarms: - self.evaluator.evaluate(alarm) - - def _set_all_alarms(self, state): - for alarm in self.alarms: - alarm.state = state - - def _assert_all_alarms(self, state): - for alarm in self.alarms: - self.assertEqual(state, alarm.state) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_base.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""class for tests in ceilometer/alarm/evaluator/__init__.py -""" -import datetime - -import mock -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.alarm import evaluator - - -class TestEvaluatorBaseClass(base.BaseTestCase): - def setUp(self): - super(TestEvaluatorBaseClass, self).setUp() - self.called = False - - def _notify(self, alarm, previous, reason, details): - self.called = True - raise Exception('Boom!') - - def test_base_refresh(self): - notifier = mock.MagicMock() - notifier.notify = self._notify - - class EvaluatorSub(evaluator.Evaluator): - def evaluate(self, alarm): - pass - - ev = EvaluatorSub(notifier) - ev.api_client = mock.MagicMock() - ev._refresh(mock.MagicMock(), mock.MagicMock(), - mock.MagicMock(), mock.MagicMock()) - self.assertTrue(self.called) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - {'name': 'test2', - 'description': 'test', - 'start': '0 23 * * *', # daily at 23:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - ] - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_by_month(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - ] - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_complex(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - # Every consecutive 2 minutes (from the 3rd to the 57th) past - # every consecutive 2 hours (between 3:00 and 12:59) on every day. - 'start': '3-57/2 3-12/2 * * *', - 'duration': 30, - 'timezone': ''} - ] - cls = evaluator.Evaluator - - # test minutes inside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - # test minutes outside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - # test hours inside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - # test hours outside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_timezone(self, mock_utcnow): - alarm = mock.MagicMock() - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0) - - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.assertTrue(cls.within_time_constraint(alarm)) - - alarm.time_constraints = [ - {'name': 'test2', - 'description': 'test2', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'US/Eastern'} - ] - self.assertFalse(cls.within_time_constraint(alarm)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_combination.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_combination.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_combination.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_combination.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,408 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Authors: Mehdi Abaakouk -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for ceilometer/alarm/evaluator/combination.py -""" - -import datetime -import uuid - -from ceilometerclient import exc -from ceilometerclient.v2 import alarms -import mock -from oslo_utils import timeutils -import pytz - -from ceilometer.alarm.evaluator import combination -from ceilometer.alarm.storage import models -from ceilometer.tests.alarm.evaluator import base -from ceilometer.tests import constants - - -class TestEvaluate(base.TestEvaluatorBase): - EVALUATOR = combination.CombinationEvaluator - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='or-alarm', - description='the or alarm', - type='combination', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=str(uuid.uuid4()), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - alarm_ids=[ - '9cfc3e51-2ff1-4b1d-ac01-c1bd4c6d0d1e', - '1d441595-d069-4e05-95ab-8693ba6a8302'], - operator='or', - ), - severity='critical'), - models.Alarm(name='and-alarm', - description='the and alarm', - type='combination', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=str(uuid.uuid4()), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - alarm_ids=[ - 'b82734f4-9d06-48f3-8a86-fa59a0c99dc8', - '15a700e5-2fe8-4b3d-8c55-9e92831f6a2b'], - operator='and', - ), - severity='critical') - ] - - @staticmethod - def _get_alarm(state): - return alarms.Alarm(None, {'state': state}) - - @staticmethod - def _reason_data(alarm_ids): - return {'type': 'combination', 'alarm_ids': alarm_ids} - - def _combination_transition_reason(self, state, alarm_ids1, alarm_ids2): - return ([('Transition to %(state)s due to alarms %(alarm_ids)s' - ' in state %(state)s') - % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, - ('Transition to %(state)s due to alarms %(alarm_ids)s' - ' in state %(state)s') - % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], - [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) - - def _combination_remaining_reason(self, state, alarm_ids1, alarm_ids2): - return ([('Remaining as %(state)s due to alarms %(alarm_ids)s' - ' in state %(state)s') - % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, - ('Remaining as %(state)s due to alarms %(alarm_ids)s' - ' in state %(state)s') - % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], - [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) - - def test_retry_transient_api_failure(self): - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - broken = exc.CommunicationError(message='broken') - self.api_client.alarms.get.side_effect = [ - broken, - broken, - broken, - broken, - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - - def test_simple_insufficient(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - broken = exc.CommunicationError(message='broken') - self.api_client.alarms.get.side_effect = broken - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm.alarm_id, state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call( - alarm, - 'ok', - ('Alarms %s are in unknown state' % - (",".join(alarm.rule['alarm_ids']))), - self._reason_data(alarm.rule['alarm_ids'])) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_ok_with_all_ok(self): - self._set_all_alarms('insufficient data') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons, reason_datas = self._combination_transition_reason( - 'ok', - self.alarms[0].rule['alarm_ids'], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'insufficient data', - reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_ok_with_one_alarm(self): - self._set_all_alarms('alarm') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('alarm'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons, reason_datas = self._combination_transition_reason( - 'ok', - self.alarms[0].rule['alarm_ids'], - [self.alarms[1].rule['alarm_ids'][1]]) - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_alarm_with_all_alarm(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('alarm'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons, reason_datas = self._combination_transition_reason( - 'alarm', - self.alarms[0].rule['alarm_ids'], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_alarm_with_one_insufficient_data(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('insufficient data'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons, reason_datas = self._combination_transition_reason( - 'alarm', - [self.alarms[0].rule['alarm_ids'][1]], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_alarm_with_one_ok(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - self._get_alarm('alarm'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons, reason_datas = self._combination_transition_reason( - 'alarm', - [self.alarms[0].rule['alarm_ids'][1]], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_to_unknown(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - broken = exc.CommunicationError(message='broken') - self.api_client.alarms.get.side_effect = [ - broken, - self._get_alarm('ok'), - self._get_alarm('insufficient data'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Alarms %s are in unknown state' - % self.alarms[0].rule['alarm_ids'][0], - 'Alarms %s are in unknown state' - % self.alarms[1].rule['alarm_ids'][0]] - reason_datas = [ - self._reason_data([self.alarms[0].rule['alarm_ids'][0]]), - self._reason_data([self.alarms[1].rule['alarm_ids'][0]])] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_no_state_change(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual([], update_calls) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_no_state_change_and_repeat_actions(self): - self.alarms[0].repeat_actions = True - self.alarms[1].repeat_actions = True - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual([], update_calls) - reasons, reason_datas = self._combination_remaining_reason( - 'ok', - self.alarms[0].rule['alarm_ids'], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_state_change_inside_time_constraint(self, mock_utcnow): - self._set_all_alarms('insufficient data') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 12, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls, - "Alarm should change state if the current " - "time is inside its time constraint.") - reasons, reason_datas = self._combination_transition_reason( - 'ok', - self.alarms[0].rule['alarm_ids'], - self.alarms[1].rule['alarm_ids']) - expected = [mock.call(alarm, 'insufficient data', - reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_no_state_change_outside_time_constraint(self, mock_utcnow): - self._set_all_alarms('insufficient data') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 15, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.alarms.get.side_effect = [ - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - self._get_alarm('ok'), - ] - self._evaluate_all_alarms() - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual([], update_calls, - "Alarm should not change state if the current " - " time is outside its time constraint.") - self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_gnocchi.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_gnocchi.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_gnocchi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,438 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import unittest -import uuid - -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslotest import mockpatch -import pytz -import six -from six import moves - -from ceilometer.alarm.evaluator import gnocchi -from ceilometer.alarm.storage import models -from ceilometer.tests.alarm.evaluator import base -from ceilometer.tests import constants - - -class FakeResponse(object): - def __init__(self, code, data): - if code == 200: - self.values = [d[2] for d in data] - else: - self.values = [] - self.text = jsonutils.dumps(data) - self.status_code = code - - -class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase): - EVALUATOR = gnocchi.GnocchiThresholdEvaluator - - def setUp(self): - ks_client = mock.Mock(auth_token='fake_token') - ks_client.users.find.return_value = 'gnocchi' - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - return_value=ks_client)) - - super(TestGnocchiThresholdEvaluate, self).setUp() - - self.useFixture(mockpatch.Patch('ceilometerclient.client.get_client', - return_value=self.api_client)) - self.requests = self.useFixture(mockpatch.Patch( - 'ceilometer.alarm.evaluator.gnocchi.requests')).mock - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='instance_running_hot', - description='instance_running_hot', - type='gnocchi_resources_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=str(uuid.uuid4()), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=5, - aggregation_method='mean', - granularity=60, - metric='cpu_util', - resource_type='instance', - resource_id='my_instance') - ), - models.Alarm(name='group_running_idle', - description='group_running_idle', - type='gnocchi_aggregation_by_metrics_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=str(uuid.uuid4()), - time_constraints=[], - rule=dict( - comparison_operator='le', - threshold=10.0, - evaluation_periods=4, - aggregation_method='max', - granularity=300, - metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', - '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), - ), - models.Alarm(name='instance_not_running', - description='instance_running_hot', - type='gnocchi_aggregation_by_resources_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=str(uuid.uuid4()), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=6, - aggregation_method='mean', - granularity=50, - metric='cpu_util', - resource_type='instance', - query='{"=": {"server_group": ' - '"my_autoscaling_group"}}') - ), - - ] - - @staticmethod - def _get_stats(granularity, values): - now = timeutils.utcnow_ts() - return FakeResponse( - 200, [[six.text_type(now - len(values) * granularity), - granularity, value] for value in values]) - - @staticmethod - def _reason_data(disposition, count, most_recent): - return {'type': 'threshold', 'disposition': disposition, - 'count': count, 'most_recent': most_recent} - - def _set_all_rules(self, field, value): - for alarm in self.alarms: - alarm.rule[field] = value - - def test_retry_transient_api_failure(self): - means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(5)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v - for v in moves.xrange(1, 4)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v - for v in moves.xrange(6)]) - self.requests.get.side_effect = [Exception('boom'), - FakeResponse(500, "error"), - means, - maxs] - self.requests.post.side_effect = [FakeResponse(500, "error"), avgs2] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - - def test_simple_insufficient(self): - self._set_all_alarms('ok') - self.requests.get.return_value = FakeResponse(200, []) - self.requests.post.return_value = FakeResponse(200, []) - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm.alarm_id, state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_simple_alarm_trip(self, utcnow): - utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) - self._set_all_alarms('ok') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(4)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(1, 7)]) - - self.requests.get.side_effect = [avgs, maxs] - self.requests.post.side_effect = [avgs2] - self._evaluate_all_alarms() - - expected_headers = {'X-Auth-Token': 'fake_token', - 'Content-Type': 'application/json'} - - start_alarm1 = "2015-01-26T12:51:00" - start_alarm2 = "2015-01-26T12:32:00" - start_alarm3 = "2015-01-26T12:51:10" - end = "2015-01-26T12:57:00" - - self.assertEqual([ - mock.call(url='http://localhost:8041/v1/resource/instance/' - 'my_instance/metric/cpu_util/measures', - params={'aggregation': 'mean', - 'start': start_alarm1, 'end': end}, - headers=expected_headers), - mock.call(url='http://localhost:8041/v1/aggregation/metric', - params={'aggregation': 'max', - 'start': start_alarm2, 'end': end, - 'metric[]': [ - '0bb1604d-1193-4c0a-b4b8-74b170e35e83', - '9ddc209f-42f8-41e1-b8f1-8804f59c4053']}, - headers=expected_headers)], - - self.requests.get.mock_calls) - self.assertEqual([ - mock.call(url='http://localhost:8041/v1/aggregation/resource/' - 'instance/metric/cpu_util', - params={'aggregation': 'mean', - 'start': start_alarm3, 'end': end}, - data='{"=": {"server_group": "my_autoscaling_group"}}', - headers=expected_headers), - ], - self.requests.post.mock_calls) - - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs.values[-1], - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs.values[-1], - 'Transition to alarm due to 6 samples outside' - ' threshold, most recent: %s' % avgs2.values[-1], - ] - reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), - self._reason_data('outside', 4, maxs.values[-1]), - self._reason_data('outside', 6, avgs2.values[-1])] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(5)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v - for v in moves.xrange(1, 5)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v - for v in moves.xrange(6)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs.values[-1], - 'Transition to ok due to 4 samples inside' - ' threshold, most recent: %s' % maxs.values[-1], - 'Transition to ok due to 6 samples inside' - ' threshold, most recent: %s' % avgs2.values[-1]] - reason_datas = [self._reason_data('inside', 5, avgs.values[-1]), - self._reason_data('inside', 4, maxs.values[-1]), - self._reason_data('inside', 6, avgs2.values[-1])] - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_equivocal_from_known_state(self): - self._set_all_alarms('ok') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(5)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(-1, 3)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(6)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual( - [], - self.api_client.alarms.set_state.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_equivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[1].repeat_actions = True - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(5)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(-1, 3)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(6)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual([], self.api_client.alarms.set_state.call_args_list) - reason = ('Remaining as ok due to 4 samples inside' - ' threshold, most recent: 8.0') - reason_datas = self._reason_data('inside', 4, 8.0) - expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_unequivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('alarm') - self.alarms[1].repeat_actions = True - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(4)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(6)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - self.assertEqual([], self.api_client.alarms.set_state.call_args_list) - reason = ('Remaining as alarm due to 4 samples outside' - ' threshold, most recent: 7.0') - reason_datas = self._reason_data('outside', 4, 7.0) - expected = [mock.call(self.alarms[1], 'alarm', - reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_state_change_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[0].repeat_actions = True - self.alarms[1].repeat_actions = True - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(4)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(1, 7)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs.values[-1], - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs.values[-1], - 'Transition to alarm due to 6 samples outside' - ' threshold, most recent: %s' % avgs2.values[-1]] - reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), - self._reason_data('outside', 4, maxs.values[-1]), - self._reason_data('outside', 6, avgs2.values[-1])] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_equivocal_from_unknown(self): - self._set_all_alarms('insufficient data') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v - for v in moves.xrange(4)]) - avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v - for v in moves.xrange(1, 7)]) - self.requests.post.side_effect = [avgs2] - self.requests.get.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs.values[-1], - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs.values[-1], - 'Transition to alarm due to 6 samples outside' - ' threshold, most recent: %s' % avgs2.values[-1]] - reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), - self._reason_data('outside', 4, maxs.values[-1]), - self._reason_data('outside', 6, avgs2.values[-1])] - expected = [mock.call(alarm, 'insufficient data', - reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @unittest.skipIf(six.PY3, - "the ceilometer base class is not python 3 ready") - @mock.patch.object(timeutils, 'utcnow') - def test_no_state_change_outside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - self.alarms[2].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 15, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - self.requests.get.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual([], update_calls, - "Alarm should not change state if the current " - " time is outside its time constraint.") - self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_threshold.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_threshold.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/evaluator/test_threshold.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/evaluator/test_threshold.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,540 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/alarm/evaluator/threshold.py -""" -import datetime -import uuid - -from ceilometerclient import exc -from ceilometerclient.v2 import statistics -import mock -from oslo_config import cfg -from oslo_utils import timeutils -import pytz -from six import moves - -from ceilometer.alarm.evaluator import threshold -from ceilometer.alarm.storage import models -from ceilometer.tests.alarm.evaluator import base -from ceilometer.tests import constants - - -class TestEvaluate(base.TestEvaluatorBase): - EVALUATOR = threshold.ThresholdEvaluator - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='instance_running_hot', - description='instance_running_hot', - type='threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=str(uuid.uuid4()), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=5, - statistic='avg', - period=60, - meter_name='cpu_util', - query=[{'field': 'meter', - 'op': 'eq', - 'value': 'cpu_util'}, - {'field': 'resource_id', - 'op': 'eq', - 'value': 'my_instance'}]), - severity='critical' - ), - models.Alarm(name='group_running_idle', - description='group_running_idle', - type='threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=str(uuid.uuid4()), - time_constraints=[], - rule=dict( - comparison_operator='le', - threshold=10.0, - evaluation_periods=4, - statistic='max', - period=300, - meter_name='cpu_util', - query=[{'field': 'meter', - 'op': 'eq', - 'value': 'cpu_util'}, - {'field': 'metadata.user_metadata.AS', - 'op': 'eq', - 'value': 'my_group'}]), - severity='critical' - ), - ] - - @staticmethod - def _get_stat(attr, value, count=1): - return statistics.Statistics(None, {attr: value, 'count': count}) - - @staticmethod - def _reason_data(disposition, count, most_recent): - return {'type': 'threshold', 'disposition': disposition, - 'count': count, 'most_recent': most_recent} - - def _set_all_rules(self, field, value): - for alarm in self.alarms: - alarm.rule[field] = value - - def test_retry_transient_api_failure(self): - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - broken = exc.CommunicationError(message='broken') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) - for v in moves.xrange(1, 5)] - self.api_client.statistics.list.side_effect = [broken, - broken, - avgs, - maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - - def test_simple_insufficient(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm.alarm_id, state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_less_insufficient_data(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(4)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(1, 4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm.alarm_id, state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(update_calls, expected) - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - alarm.rule['threshold'] - 3)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_trip(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) - for v in moves.xrange(1, 5)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to ok due to 4 samples inside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('inside', 5, avgs[-1].avg), - self._reason_data('inside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_equivocal_from_known_state(self): - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(-1, 3)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual( - [], - self.api_client.alarms.set_state.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_equivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[1].repeat_actions = True - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', - self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', - self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(-1, 3)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual([], - self.api_client.alarms.set_state.call_args_list) - reason = ('Remaining as ok due to 4 samples inside' - ' threshold, most recent: 8.0') - reason_datas = self._reason_data('inside', 4, 8.0) - expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_unequivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('alarm') - self.alarms[1].repeat_actions = True - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', - self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', - self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - self.assertEqual([], - self.api_client.alarms.set_state.call_args_list) - reason = ('Remaining as alarm due to 4 samples outside' - ' threshold, most recent: 7.0') - reason_datas = self._reason_data('outside', 4, 7.0) - expected = [mock.call(self.alarms[1], 'alarm', - reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_state_change_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[0].repeat_actions = True - self.alarms[1].repeat_actions = True - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_equivocal_from_unknown(self): - self._set_all_alarms('insufficient data') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'insufficient data', - reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def _do_test_bound_duration(self, start, exclude_outliers=None): - alarm = self.alarms[0] - if exclude_outliers is not None: - alarm.rule['exclude_outliers'] = exclude_outliers - with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - constraint = self.evaluator._bound_duration(alarm, []) - self.assertEqual([ - {'field': 'timestamp', - 'op': 'le', - 'value': timeutils.utcnow().isoformat()}, - {'field': 'timestamp', - 'op': 'ge', - 'value': start}, - ], constraint) - - def test_bound_duration_outlier_exclusion_defaulted(self): - self._do_test_bound_duration('2012-07-02T10:39:00') - - def test_bound_duration_outlier_exclusion_clear(self): - self._do_test_bound_duration('2012-07-02T10:39:00', False) - - def test_bound_duration_outlier_exclusion_set(self): - self._do_test_bound_duration('2012-07-02T10:35:00', True) - - def test_threshold_endpoint_types(self): - endpoint_types = ["internalURL", "publicURL"] - for endpoint_type in endpoint_types: - cfg.CONF.set_override('os_endpoint_type', - endpoint_type, - group='service_credentials') - with mock.patch('ceilometerclient.client.get_client') as client: - self.evaluator.api_client = None - self._evaluate_all_alarms() - conf = cfg.CONF.service_credentials - expected = [mock.call(2, - os_auth_url=conf.os_auth_url, - os_region_name=conf.os_region_name, - os_tenant_name=conf.os_tenant_name, - os_password=conf.os_password, - os_username=conf.os_username, - os_cacert=conf.os_cacert, - os_endpoint_type=conf.os_endpoint_type, - timeout=cfg.CONF.http_timeout, - insecure=conf.insecure)] - actual = client.call_args_list - self.assertEqual(expected, actual) - - def _do_test_simple_alarm_trip_outlier_exclusion(self, exclude_outliers): - self._set_all_rules('exclude_outliers', exclude_outliers) - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - # most recent datapoints inside threshold but with - # anomalously low sample count - threshold = self.alarms[0].rule['threshold'] - avgs = [self._get_stat('avg', - threshold + (v if v < 10 else -v), - count=20 if v < 10 else 1) - for v in moves.xrange(1, 11)] - threshold = self.alarms[1].rule['threshold'] - maxs = [self._get_stat('max', - threshold - (v if v < 7 else -v), - count=20 if v < 7 else 1) - for v in moves.xrange(8)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm' if exclude_outliers else 'ok') - if exclude_outliers: - expected = [mock.call(alarm.alarm_id, state='alarm') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-2].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-2].max] - reason_datas = [self._reason_data('outside', 5, avgs[-2].avg), - self._reason_data('outside', 4, maxs[-2].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_trip_with_outlier_exclusion(self): - self. _do_test_simple_alarm_trip_outlier_exclusion(True) - - def test_simple_alarm_no_trip_without_outlier_exclusion(self): - self. _do_test_simple_alarm_trip_outlier_exclusion(False) - - def _do_test_simple_alarm_clear_outlier_exclusion(self, exclude_outliers): - self._set_all_rules('exclude_outliers', exclude_outliers) - self._set_all_alarms('alarm') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - # most recent datapoints outside threshold but with - # anomalously low sample count - threshold = self.alarms[0].rule['threshold'] - avgs = [self._get_stat('avg', - threshold - (v if v < 9 else -v), - count=20 if v < 9 else 1) - for v in moves.xrange(10)] - threshold = self.alarms[1].rule['threshold'] - maxs = [self._get_stat('max', - threshold + (v if v < 8 else -v), - count=20 if v < 8 else 1) - for v in moves.xrange(1, 9)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok' if exclude_outliers else 'alarm') - if exclude_outliers: - expected = [mock.call(alarm.alarm_id, state='ok') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs[-2].avg, - 'Transition to ok due to 4 samples inside' - ' threshold, most recent: %s' % maxs[-2].max] - reason_datas = [self._reason_data('inside', 5, avgs[-2].avg), - self._reason_data('inside', 4, maxs[-2].max)] - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_clear_with_outlier_exclusion(self): - self. _do_test_simple_alarm_clear_outlier_exclusion(True) - - def test_simple_alarm_no_clear_without_outlier_exclusion(self): - self. _do_test_simple_alarm_clear_outlier_exclusion(False) - - @mock.patch.object(timeutils, 'utcnow') - def test_state_change_inside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 12, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - # the following part based on test_simple_insufficient - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm.alarm_id, - state='insufficient data') - for alarm in self.alarms] - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual(expected, update_calls, - "Alarm should change state if the current " - "time is inside its time constraint.") - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_no_state_change_outside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 15, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - update_calls = self.api_client.alarms.set_state.call_args_list - self.assertEqual([], update_calls, - "Alarm should not change state if the current " - " time is outside its time constraint.") - self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_alarm_svc.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_alarm_svc.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_alarm_svc.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_alarm_svc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.alarm.service.SingletonAlarmService. -""" -import mock -from oslo_config import fixture as fixture_config -from stevedore import extension - -from ceilometer.alarm import service -from ceilometer.tests import base as tests_base - - -class TestAlarmEvaluationService(tests_base.BaseTestCase): - def setUp(self): - super(TestAlarmEvaluationService, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - self.threshold_eval = mock.Mock() - self.evaluators = extension.ExtensionManager.make_test_instance( - [ - extension.Extension( - 'threshold', - None, - None, - self.threshold_eval), - ] - ) - self.api_client = mock.MagicMock() - self.svc = service.AlarmEvaluationService() - self.svc.tg = mock.Mock() - self.svc.partition_coordinator = mock.MagicMock() - p_coord = self.svc.partition_coordinator - p_coord.extract_my_subset.side_effect = lambda _, x: x - self.svc.evaluators = self.evaluators - self.svc.supported_evaluators = ['threshold'] - - def _do_test_start(self, test_interval=120, - coordination_heartbeat=1.0, - coordination_active=False): - self.CONF.set_override('evaluation_interval', - test_interval, - group='alarm') - self.CONF.set_override('heartbeat', - coordination_heartbeat, - group='coordination') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - p_coord_mock = self.svc.partition_coordinator - p_coord_mock.is_active.return_value = coordination_active - - self.svc.start() - self.svc.partition_coordinator.start.assert_called_once_with() - self.svc.partition_coordinator.join_group.assert_called_once_with( - self.svc.PARTITIONING_GROUP_NAME) - - initial_delay = test_interval if coordination_active else None - expected = [ - mock.call(test_interval, - self.svc._evaluate_assigned_alarms, - initial_delay=initial_delay), - mock.call(604800, mock.ANY), - ] - if coordination_active: - hb_interval = min(coordination_heartbeat, test_interval / 4) - hb_call = mock.call(hb_interval, - self.svc.partition_coordinator.heartbeat) - expected.insert(1, hb_call) - actual = self.svc.tg.add_timer.call_args_list - self.assertEqual(expected, actual) - - def test_start_singleton(self): - self._do_test_start(coordination_active=False) - - def test_start_coordinated(self): - self._do_test_start(coordination_active=True) - - def test_start_coordinated_high_hb_interval(self): - self._do_test_start(coordination_active=True, test_interval=10, - coordination_heartbeat=5) - - def test_evaluation_cycle(self): - alarm = mock.Mock(type='threshold') - self.api_client.alarms.list.return_value = [alarm] - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - p_coord_mock = self.svc.partition_coordinator - p_coord_mock.extract_my_subset.return_value = [alarm] - - self.svc._evaluate_assigned_alarms() - - p_coord_mock.extract_my_subset.assert_called_once_with( - self.svc.PARTITIONING_GROUP_NAME, [alarm]) - self.threshold_eval.evaluate.assert_called_once_with(alarm) - - def test_evaluation_cycle_with_bad_alarm(self): - alarms = [ - mock.Mock(type='threshold', name='bad'), - mock.Mock(type='threshold', name='good'), - ] - self.api_client.alarms.list.return_value = alarms - self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - p_coord_mock = self.svc.partition_coordinator - p_coord_mock.extract_my_subset.return_value = alarms - - self.svc._evaluate_assigned_alarms() - self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])], - self.threshold_eval.evaluate.call_args_list) - - def test_unknown_extension_skipped(self): - alarms = [ - mock.Mock(type='not_existing_type'), - mock.Mock(type='threshold') - ] - - self.api_client.alarms.list.return_value = alarms - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - self.svc.start() - self.svc._evaluate_assigned_alarms() - self.threshold_eval.evaluate.assert_called_once_with(alarms[1]) - - def test_singleton_endpoint_types(self): - endpoint_types = ["internalURL", "publicURL"] - for endpoint_type in endpoint_types: - self.CONF.set_override('os_endpoint_type', - endpoint_type, - group='service_credentials') - with mock.patch('ceilometerclient.client.get_client') as client: - self.svc.api_client = None - self.svc._evaluate_assigned_alarms() - conf = self.CONF.service_credentials - expected = [mock.call(2, - os_auth_url=conf.os_auth_url, - os_region_name=conf.os_region_name, - os_tenant_name=conf.os_tenant_name, - os_password=conf.os_password, - os_username=conf.os_username, - os_cacert=conf.os_cacert, - os_endpoint_type=conf.os_endpoint_type, - timeout=self.CONF.http_timeout, - insecure=conf.insecure)] - actual = client.call_args_list - self.assertEqual(expected, actual) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_notifier.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_notifier.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_notifier.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_notifier.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,266 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslo_context import context -from oslo_serialization import jsonutils -from oslotest import mockpatch -import requests -import six.moves.urllib.parse as urlparse - -from ceilometer import alarm as ceilometer_alarm -from ceilometer.alarm import service -from ceilometer.tests import base as tests_base - - -DATA_JSON = jsonutils.loads( - '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' - ' "severity": "critical", "reason": "what ?",' - ' "reason_data": {"test": "test"}, "previous": "OK"}' -) -NOTIFICATION = dict(alarm_id='foobar', - alarm_name='testalarm', - severity='critical', - condition=dict(threshold=42), - reason='what ?', - reason_data={'test': 'test'}, - previous='OK', - current='ALARM') - - -class TestAlarmNotifier(tests_base.BaseTestCase): - - def setUp(self): - super(TestAlarmNotifier, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - self.service = service.AlarmNotifierService() - self.useFixture(mockpatch.Patch( - 'oslo_context.context.generate_request_id', - self._fake_generate_request_id)) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_init_host(self): - # If we try to create a real RPC connection, init_host() never - # returns. Mock it out so we can establish the service - # configuration. - with mock.patch.object(self.service.rpc_server, 'start'): - self.service.start() - - def test_notify_alarm(self): - data = { - 'actions': ['test://'], - 'alarm_id': 'foobar', - 'alarm_name': 'testalarm', - 'severity': 'critical', - 'previous': 'OK', - 'current': 'ALARM', - 'reason': 'Everything is on fire', - 'reason_data': {'fire': 'everywhere'} - } - self.service.notify_alarm(context.get_admin_context(), data) - notifications = ceilometer_alarm.NOTIFIERS['test'].obj.notifications - self.assertEqual(1, len(notifications)) - self.assertEqual((urlparse.urlsplit(data['actions'][0]), - data['alarm_id'], - data['alarm_name'], - data['severity'], - data['previous'], - data['current'], - data['reason'], - data['reason_data']), - notifications[0]) - - def test_notify_alarm_no_action(self): - self.service.notify_alarm(context.get_admin_context(), {}) - - def test_notify_alarm_log_action(self): - self.service.notify_alarm(context.get_admin_context(), - { - 'actions': ['log://'], - 'alarm_id': 'foobar', - 'condition': {'threshold': 42}}) - - @staticmethod - def _fake_spawn_n(func, *args, **kwargs): - func(*args, **kwargs) - - @staticmethod - def _notification(action): - notification = {} - notification.update(NOTIFICATION) - notification['actions'] = [action] - return notification - - HTTP_HEADERS = {'x-openstack-request-id': 'fake_request_id', - 'content-type': 'application/json'} - - def _fake_generate_request_id(self): - return self.HTTP_HEADERS['x-openstack-request-id'] - - def test_notify_alarm_rest_action_ok(self): - action = 'http://host/action' - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_client_cert(self): - action = 'https://host/action' - certificate = "/etc/ssl/cert/whatever.pem" - - self.CONF.set_override("rest_notifier_certificate_file", certificate, - group='alarm') - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - cert=certificate, verify=True) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self): - action = 'https://host/action' - certificate = "/etc/ssl/cert/whatever.pem" - key = "/etc/ssl/cert/whatever.key" - - self.CONF.set_override("rest_notifier_certificate_file", certificate, - group='alarm') - self.CONF.set_override("rest_notifier_certificate_key", key, - group='alarm') - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - cert=(certificate, key), verify=True) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self): - action = 'https://host/action' - - self.CONF.set_override("rest_notifier_ssl_verify", False, - group='alarm') - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=False) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_disable(self): - action = 'https://host/action?ceilometer-alarm-ssl-verify=0' - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=False) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self): - action = 'https://host/action?ceilometer-alarm-ssl-verify=1' - - self.CONF.set_override("rest_notifier_ssl_verify", False, - group='alarm') - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=True) - args, kwargs = poster.call_args - self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - @staticmethod - def _fake_urlsplit(*args, **kwargs): - raise Exception("Evil urlsplit!") - - def test_notify_alarm_invalid_url(self): - with mock.patch('oslo_utils.netutils.urlsplit', - self._fake_urlsplit): - LOG = mock.MagicMock() - with mock.patch('ceilometer.alarm.service.LOG', LOG): - self.service.notify_alarm( - context.get_admin_context(), - { - 'actions': ['no-such-action-i-am-sure'], - 'alarm_id': 'foobar', - 'condition': {'threshold': 42}, - }) - self.assertTrue(LOG.error.called) - - def test_notify_alarm_invalid_action(self): - LOG = mock.MagicMock() - with mock.patch('ceilometer.alarm.service.LOG', LOG): - self.service.notify_alarm( - context.get_admin_context(), - { - 'actions': ['no-such-action-i-am-sure://'], - 'alarm_id': 'foobar', - 'condition': {'threshold': 42}, - }) - self.assertTrue(LOG.error.called) - - def test_notify_alarm_trust_action(self): - action = 'trust+http://trust-1234@host/action' - url = 'http://host/action' - - client = mock.MagicMock() - client.auth_token = 'token_1234' - headers = {'X-Auth-Token': 'token_1234'} - headers.update(self.HTTP_HEADERS) - - self.useFixture(mockpatch.Patch('keystoneclient.v3.client.Client', - lambda **kwargs: client)) - - with mock.patch('eventlet.spawn_n', self._fake_spawn_n): - with mock.patch.object(requests.Session, 'post') as poster: - self.service.notify_alarm(context.get_admin_context(), - self._notification(action)) - headers = {'X-Auth-Token': 'token_1234'} - headers.update(self.HTTP_HEADERS) - poster.assert_called_with( - url, data=mock.ANY, headers=mock.ANY) - args, kwargs = poster.call_args - self.assertEqual(headers, kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_rpc.py ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_rpc.py --- ceilometer-5.0.0~b2/ceilometer/tests/alarm/test_rpc.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/alarm/test_rpc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Authors: Mehdi Abaakouk -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from ceilometerclient.v2 import alarms -from oslo_config import fixture as fixture_config -import six - -from ceilometer.alarm import rpc as rpc_alarm -from ceilometer.alarm.storage import models -from ceilometer import messaging -from ceilometer.tests import base as tests_base - - -class FakeNotifier(object): - def __init__(self, transport): - self.rpc = messaging.get_rpc_server( - transport, "alarm_notifier", self) - self.notified = [] - - def start(self, expected_length): - self.expected_length = expected_length - self.rpc.start() - - def notify_alarm(self, context, data): - self.notified.append(data) - if len(self.notified) == self.expected_length: - self.rpc.stop() - - -class TestRPCAlarmNotifier(tests_base.BaseTestCase): - def setUp(self): - super(TestRPCAlarmNotifier, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - self.notifier_server = FakeNotifier(self.transport) - self.notifier = rpc_alarm.RPCAlarmNotifier() - self.alarms = [ - alarms.Alarm(None, info={ - 'name': 'instance_running_hot', - 'meter_name': 'cpu_util', - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'evaluation_periods': 5, - 'statistic': 'avg', - 'state': 'ok', - 'ok_actions': ['http://host:8080/path'], - 'user_id': 'foobar', - 'project_id': 'snafu', - 'period': 60, - 'alarm_id': str(uuid.uuid4()), - 'severity': 'critical', - 'matching_metadata':{'resource_id': - 'my_instance'} - }), - alarms.Alarm(None, info={ - 'name': 'group_running_idle', - 'meter_name': 'cpu_util', - 'comparison_operator': 'le', - 'threshold': 10.0, - 'statistic': 'max', - 'evaluation_periods': 4, - 'state': 'insufficient data', - 'insufficient_data_actions': ['http://other_host/path'], - 'user_id': 'foobar', - 'project_id': 'snafu', - 'period': 300, - 'alarm_id': str(uuid.uuid4()), - 'severity': 'critical', - 'matching_metadata':{'metadata.user_metadata.AS': - 'my_group'} - }), - ] - - def test_rpc_target(self): - topic = self.notifier.client.target.topic - self.assertEqual('alarm_notifier', topic) - - def test_notify_alarm(self): - self.notifier_server.start(2) - - previous = ['alarm', 'ok'] - for i, a in enumerate(self.alarms): - self.notifier.notify(a, previous[i], "what? %d" % i, - {'fire': '%d' % i}) - - self.notifier_server.rpc.wait() - - self.assertEqual(2, len(self.notifier_server.notified)) - for i, a in enumerate(self.alarms): - actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state]) - self.assertEqual(self.alarms[i].alarm_id, - self.notifier_server.notified[i]["alarm_id"]) - self.assertEqual(self.alarms[i].name, - self.notifier_server.notified[i]["alarm_name"]) - self.assertEqual(self.alarms[i].severity, - self.notifier_server.notified[i]["severity"]) - self.assertEqual(actions, - self.notifier_server.notified[i]["actions"]) - self.assertEqual(previous[i], - self.notifier_server.notified[i]["previous"]) - self.assertEqual(self.alarms[i].state, - self.notifier_server.notified[i]["current"]) - self.assertEqual("what? %d" % i, - self.notifier_server.notified[i]["reason"]) - self.assertEqual({'fire': '%d' % i}, - self.notifier_server.notified[i]["reason_data"]) - - def test_notify_non_string_reason(self): - self.notifier_server.start(1) - self.notifier.notify(self.alarms[0], 'ok', 42, {}) - self.notifier_server.rpc.wait() - reason = self.notifier_server.notified[0]['reason'] - self.assertIsInstance(reason, six.string_types) - - def test_notify_no_actions(self): - alarm = alarms.Alarm(None, info={ - 'name': 'instance_running_hot', - 'meter_name': 'cpu_util', - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'evaluation_periods': 5, - 'statistic': 'avg', - 'state': 'ok', - 'user_id': 'foobar', - 'project_id': 'snafu', - 'period': 60, - 'ok_actions': [], - 'alarm_id': str(uuid.uuid4()), - 'matching_metadata': {'resource_id': - 'my_instance'} - }) - self.notifier.notify(alarm, 'alarm', "what?", {}) - self.assertEqual(0, len(self.notifier_server.notified)) - - -class FakeCoordinator(object): - def __init__(self, transport): - self.rpc = messaging.get_rpc_server( - transport, "alarm_partition_coordination", self) - self.notified = [] - - def presence(self, context, data): - self._record('presence', data) - - def allocate(self, context, data): - self._record('allocate', data) - - def assign(self, context, data): - self._record('assign', data) - - def _record(self, method, data): - self.notified.append((method, data)) - self.rpc.stop() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/api/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/__init__.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base classes for API tests. -""" - -from oslo_config import cfg -from oslo_config import fixture as fixture_config -from oslo_policy import opts -import pecan -import pecan.testing - -from ceilometer.tests import db as db_test_base - -OPT_GROUP_NAME = 'keystone_authtoken' -cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token") -cfg.CONF.import_group('api', 'ceilometer.api.controllers.v2.root') - - -class FunctionalTest(db_test_base.TestBase): - """Used for functional tests of Pecan controllers. - - Used in case when you need to test your literal application and its - integration with the framework. - """ - - PATH_PREFIX = '' - - def setUp(self): - super(FunctionalTest, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - opts.set_defaults(self.CONF) - - self.CONF.set_override("auth_version", "v2.0", - group=OPT_GROUP_NAME) - self.CONF.set_override("policy_file", - self.path_get('etc/ceilometer/policy.json'), - group='oslo_policy') - - self.CONF.set_override('gnocchi_is_enabled', False, group='api') - self.CONF.set_override('aodh_is_enabled', False, group='api') - - self.app = self._make_app() - - def _make_app(self, enable_acl=False): - self.config = { - 'app': { - 'root': 'ceilometer.api.controllers.root.RootController', - 'modules': ['ceilometer.api'], - 'enable_acl': enable_acl, - }, - 'wsme': { - 'debug': True, - }, - } - - return pecan.testing.load_test_app(self.config) - - def tearDown(self): - super(FunctionalTest, self).tearDown() - pecan.set_config({}, overwrite=True) - - def put_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PUT request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - return self.post_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="put") - - def post_json(self, path, params, expect_errors=False, headers=None, - method="post", extra_environ=None, status=None): - """Sends simulated HTTP POST request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param method: Request method type. Appropriate method function call - should be used rather than passing attribute in. - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - full_path = self.PATH_PREFIX + path - response = getattr(self.app, "%s_json" % method)( - str(full_path), - params=params, - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors - ) - return response - - def delete(self, path, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP DELETE request to Pecan test app. - - :param path: url path of target service - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - full_path = self.PATH_PREFIX + path - response = self.app.delete(str(full_path), - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors) - return response - - def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=None, groupby=None, status=None, - override_params=None, **params): - """Sends simulated HTTP GET request to Pecan test app. - - :param path: url path of target service - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param q: list of queries consisting of: field, value, op, and type - keys - :param groupby: list of fields to group by - :param status: Expected status code of response - :param override_params: literally encoded query param string - :param params: content for wsgi.input of request - """ - q = q or [] - groupby = groupby or [] - full_path = self.PATH_PREFIX + path - if override_params: - all_params = override_params - else: - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - 'q.type': [], - } - for query in q: - for name in ['field', 'op', 'value', 'type']: - query_params['q.%s' % name].append(query.get(name, '')) - all_params = {} - all_params.update(params) - if q: - all_params.update(query_params) - if groupby: - all_params.update({'groupby': groupby}) - response = self.app.get(full_path, - params=all_params, - headers=headers, - extra_environ=extra_environ, - expect_errors=expect_errors, - status=status) - if not expect_errors: - response = response.json - return response diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/test_app.py ceilometer-5.0.0~b3/ceilometer/tests/api/test_app.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/test_app.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/test_app.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -from oslo_log import log - -from ceilometer.api import app -from ceilometer.tests import base - - -class TestApp(base.BaseTestCase): - - def setUp(self): - super(TestApp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - log.register_options(cfg.CONF) - - def test_api_paste_file_not_exist(self): - self.CONF.set_override('api_paste_config', 'non-existent-file') - with mock.patch.object(self.CONF, 'find_file') as ff: - ff.return_value = None - self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) - - @mock.patch('ceilometer.storage.get_connection_from_config', - mock.MagicMock()) - @mock.patch('pecan.make_app') - def test_pecan_debug(self, mocked): - def _check_pecan_debug(g_debug, p_debug, expected, workers=1): - self.CONF.set_override('debug', g_debug) - if p_debug is not None: - self.CONF.set_override('pecan_debug', p_debug, group='api') - self.CONF.set_override('api_workers', workers) - app.setup_app() - args, kwargs = mocked.call_args - self.assertEqual(expected, kwargs.get('debug')) - - _check_pecan_debug(g_debug=False, p_debug=None, expected=False) - _check_pecan_debug(g_debug=True, p_debug=None, expected=False) - _check_pecan_debug(g_debug=True, p_debug=False, expected=False) - _check_pecan_debug(g_debug=False, p_debug=True, expected=True) - _check_pecan_debug(g_debug=True, p_debug=None, expected=False, - workers=5) - _check_pecan_debug(g_debug=False, p_debug=True, expected=False, - workers=5) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/test_hooks.py ceilometer-5.0.0~b3/ceilometer/tests/api/test_hooks.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/test_hooks.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/test_hooks.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging - -from ceilometer.api import hooks -from ceilometer.tests import base - - -class TestTestNotifierHook(base.BaseTestCase): - - def setUp(self): - super(TestTestNotifierHook, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_init_notifier_with_drivers(self): - self.CONF.set_override('telemetry_driver', 'messagingv2', - group='publisher_notifier') - hook = hooks.NotifierHook() - notifier = hook.notifier - self.assertIsInstance(notifier, oslo_messaging.Notifier) - self.assertEqual(['messagingv2'], notifier._driver_names) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/test_versions.py ceilometer-5.0.0~b3/ceilometer/tests/api/test_versions.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/test_versions.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/test_versions.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ceilometer.tests import api - -V2_MEDIA_TYPES = [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.telemetry-v2+json' - }, { - 'base': 'application/xml', - 'type': 'application/vnd.openstack.telemetry-v2+xml' - } -] - -V2_HTML_DESCRIPTION = { - 'href': 'http://docs.openstack.org/', - 'rel': 'describedby', - 'type': 'text/html', -} - -V2_EXPECTED_RESPONSE = { - 'id': 'v2', - 'links': [ - { - 'rel': 'self', - 'href': 'http://localhost/v2', - }, - V2_HTML_DESCRIPTION - ], - 'media-types': V2_MEDIA_TYPES, - 'status': 'stable', - 'updated': '2013-02-13T00:00:00Z', -} - -V2_VERSION_RESPONSE = { - "version": V2_EXPECTED_RESPONSE -} - -VERSIONS_RESPONSE = { - "versions": { - "values": [ - V2_EXPECTED_RESPONSE - ] - } -} - - -class TestVersions(api.FunctionalTest): - - def test_versions(self): - data = self.get_json('/') - self.assertEqual(VERSIONS_RESPONSE, data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.tests import api - - -class FunctionalTest(api.FunctionalTest): - PATH_PREFIX = '/v2' diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_acl_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_acl_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_acl_scenarios.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_acl_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,220 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test ACL.""" - -import datetime -import hashlib -import json - -from oslo_utils import timeutils -import webtest - -from ceilometer.api import app -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import api as acl -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - -VALID_TOKEN = '4562138218392831' -VALID_TOKEN2 = '4562138218392832' - - -class FakeMemcache(object): - - TOKEN_HASH = hashlib.sha256(VALID_TOKEN.encode('utf-8')).hexdigest() - TOKEN2_HASH = hashlib.sha256(VALID_TOKEN2.encode('utf-8')).hexdigest() - - def get(self, key): - if (key == "tokens/%s" % VALID_TOKEN or - key == "tokens/%s" % self.TOKEN_HASH): - dt = timeutils.utcnow() + datetime.timedelta(minutes=5) - dt_isoformat = dt.isoformat() - return json.dumps(({'access': { - 'token': {'id': VALID_TOKEN, - 'expires': dt_isoformat}, - 'user': { - 'id': 'user_id1', - 'name': 'user_name1', - 'tenantId': '123i2910', - 'tenantName': 'mytenant', - 'roles': [ - {'name': 'admin'}, - ]}, - }}, dt_isoformat)) - if (key == "tokens/%s" % VALID_TOKEN2 or - key == "tokens/%s" % self.TOKEN2_HASH): - dt = timeutils.utcnow() + datetime.timedelta(minutes=5) - dt_isoformat = dt.isoformat() - return json.dumps(({'access': { - 'token': {'id': VALID_TOKEN2, - 'expires': dt_isoformat}, - 'user': { - 'id': 'user_id2', - 'name': 'user-good', - 'tenantId': 'project-good', - 'tenantName': 'goodies', - 'roles': [ - {'name': 'Member'}, - ]}, - }}, dt_isoformat)) - - @staticmethod - def set(key, value, **kwargs): - pass - - -class TestAPIACL(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestAPIACL, self).setUp() - self.environ = {'fake.cache': FakeMemcache()} - - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-good', - 'project-good', - 'resource-good', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample'}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-fred', - 'project-good', - 'resource-56', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4'}, - source='test_source')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - def get_json(self, path, expect_errors=False, headers=None, - q=None, **params): - return super(TestAPIACL, self).get_json(path, - expect_errors=expect_errors, - headers=headers, - q=q or [], - extra_environ=self.environ, - **params) - - def _make_app(self): - self.CONF.set_override("cache", "fake.cache", group=acl.OPT_GROUP_NAME) - file_name = self.path_get('etc/ceilometer/api_paste.ini') - self.CONF.set_override("api_paste_config", file_name) - return webtest.TestApp(app.load_app()) - - def test_non_authenticated(self): - response = self.get_json('/meters', expect_errors=True) - self.assertEqual(401, response.status_int) - - def test_authenticated_wrong_role(self): - response = self.get_json('/meters', - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - self.assertEqual(401, response.status_int) - - # FIXME(dhellmann): This test is not properly looking at the tenant - # info. We do not correctly detect the improper tenant. That's - # really something the keystone middleware would have to do using - # the incoming token, which we aren't providing. - # - # def test_authenticated_wrong_tenant(self): - # response = self.get_json('/meters', - # expect_errors=True, - # headers={ - # "X-Roles": "admin", - # "X-Tenant-Name": "achoo", - # "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", - # }) - # self.assertEqual(401, response.status_int) - - def test_authenticated(self): - data = self.get_json('/meters', - headers={"X-Auth-Token": VALID_TOKEN, - "X-Roles": "admin", - "X-Tenant-Name": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin_missing_project_query(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }]) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_non_admin_wrong_project(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-wrong', - }]) - self.assertEqual(401, data.status_int) - - def test_non_admin_two_projects(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }, - {'field': 'project_id', - 'value': 'project-naughty', - }]) - self.assertEqual(401, data.status_int) - - def test_non_admin_get_events(self): - data = self.get_json('/event_types', expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}) - self.assertEqual(401, data.status_int) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_alarm_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_alarm_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_alarm_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_alarm_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2929 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests alarm operation.""" - -import datetime -import uuid - -import mock -import oslo_messaging.conffixture -from oslo_serialization import jsonutils -import requests -import six -from six import moves -import six.moves.urllib.parse as urlparse - -from ceilometer.alarm.storage import models -from ceilometer import messaging -from ceilometer.tests.api import v2 -from ceilometer.tests import constants -from ceilometer.tests import db as tests_db - - -class TestListEmptyAlarms(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def test_empty(self): - data = self.get_json('/alarms') - self.assertEqual([], data) - - -class TestAlarms(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestAlarms, self).setUp() - self.auth_headers = {'X-User-Id': str(uuid.uuid4()), - 'X-Project-Id': str(uuid.uuid4())} - for alarm in [ - models.Alarm(name='name1', - type='threshold', - enabled=True, - alarm_id='a', - description='a', - state='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[dict(name='testcons', - start='0 11 * * *', - duration=300)], - rule=dict(comparison_operator='gt', - threshold=2.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - self.auth_headers['X-Project-Id']} - ]), - ), - models.Alarm(name='name2', - type='threshold', - enabled=True, - alarm_id='b', - description='b', - state='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=4.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - self.auth_headers['X-Project-Id']} - ]), - ), - models.Alarm(name='name3', - type='threshold', - enabled=True, - alarm_id='c', - description='c', - state='insufficient data', - severity='moderate', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=3.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.mine', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - self.auth_headers['X-Project-Id']} - ]), - ), - models.Alarm(name='name4', - type='combination', - enabled=True, - alarm_id='d', - description='d', - state='insufficient data', - severity='low', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(alarm_ids=['a', 'b'], - operator='or'), - ), - models.Alarm(name='name5', - type='gnocchi_resources_threshold', - enabled=True, - alarm_id='e', - description='e', - state='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - granularity=60, - evaluation_periods=1, - metric='meter.test', - resource_type='instance', - resource_id=( - '6841c175-d7c4-4bc2-bc7a-1c7832271b8f'), - ) - ), - models.Alarm(name='name6', - type='gnocchi_aggregation_by_metrics_threshold', - enabled=True, - alarm_id='f', - description='f', - state='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - evaluation_periods=1, - granularity=60, - metrics=[ - '41869681-5776-46d6-91ed-cccc43b6e4e3', - 'a1fb80f4-c242-4f57-87c6-68f47521059e'] - ), - ), - models.Alarm(name='name7', - type='gnocchi_aggregation_by_resources_threshold', - enabled=True, - alarm_id='g', - description='f', - state='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - granularity=60, - evaluation_periods=1, - metric='meter.test', - resource_type='instance', - query='{"=": {"server_group": ' - '"my_autoscaling_group"}}') - ), - - ]: - - self.alarm_conn.update_alarm(alarm) - - @staticmethod - def _add_default_threshold_rule(alarm): - if (alarm['type'] == 'threshold' and - 'exclude_outliers' not in alarm['threshold_rule']): - alarm['threshold_rule']['exclude_outliers'] = False - - def _verify_alarm(self, json, alarm, expected_name=None): - if expected_name and alarm.name != expected_name: - self.fail("Alarm not found") - self._add_default_threshold_rule(json) - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(json[key], getattr(alarm, storage_key)) - - def test_list_alarms(self): - data = self.get_json('/alarms') - self.assertEqual(7, len(data)) - self.assertEqual(set(['name1', 'name2', 'name3', 'name4', 'name5', - 'name6', 'name7']), - set(r['name'] for r in data)) - self.assertEqual(set(['meter.test', 'meter.mine']), - set(r['threshold_rule']['meter_name'] - for r in data if 'threshold_rule' in r)) - self.assertEqual(set(['or']), - set(r['combination_rule']['operator'] - for r in data if 'combination_rule' in r)) - self.assertEqual(set(['meter.test']), - set(r['gnocchi_resources_threshold_rule']['metric'] - for r in data - if 'gnocchi_resources_threshold_rule' in r)) - - def test_alarms_query_with_timestamp(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - resp = self.get_json('/alarms', - q=[{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - expect_errors=True) - self.assertEqual(resp.status_code, 400) - self.assertEqual(jsonutils.loads(resp.body)['error_message'] - ['faultstring'], - 'Unknown argument: "timestamp": ' - 'not valid for this resource') - - def test_alarms_query_with_meter(self): - resp = self.get_json('/alarms', - q=[{'field': 'meter', - 'op': 'eq', - 'value': 'meter.mine'}], - ) - self.assertEqual(1, len(resp)) - self.assertEqual('c', - resp[0]['alarm_id']) - self.assertEqual('meter.mine', - resp[0] - ['threshold_rule'] - ['meter_name']) - - def test_alarms_query_with_state(self): - alarm = models.Alarm(name='disabled', - type='combination', - enabled=False, - alarm_id='d', - description='d', - state='ok', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(alarm_ids=['a', 'b'], operator='or'), - severity='critical') - self.alarm_conn.update_alarm(alarm) - resp = self.get_json('/alarms', - q=[{'field': 'state', - 'op': 'eq', - 'value': 'ok'}], - ) - self.assertEqual(1, len(resp)) - self.assertEqual('ok', resp[0]['state']) - - def test_list_alarms_by_type(self): - alarms = self.get_json('/alarms', - q=[{'field': 'type', - 'op': 'eq', - 'value': 'threshold'}]) - self.assertEqual(3, len(alarms)) - self.assertEqual(set(['threshold']), - set(alarm['type'] for alarm in alarms)) - - def test_get_not_existing_alarm(self): - resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) - self.assertEqual(404, resp.status_code) - self.assertEqual('Alarm alarm-id-3 not found', - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_get_alarm(self): - alarms = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual('name1', alarms[0]['name']) - self.assertEqual('meter.test', - alarms[0]['threshold_rule']['meter_name']) - - one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) - self.assertEqual('name1', one['name']) - self.assertEqual('meter.test', one['threshold_rule']['meter_name']) - self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) - self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) - self.assertEqual(alarms[0]['time_constraints'], - one['time_constraints']) - - def test_get_alarm_disabled(self): - alarm = models.Alarm(name='disabled', - type='combination', - enabled=False, - alarm_id='d', - description='d', - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(alarm_ids=['a', 'b'], operator='or'), - severity='critical') - self.alarm_conn.update_alarm(alarm) - - alarms = self.get_json('/alarms', - q=[{'field': 'enabled', - 'value': 'False'}]) - self.assertEqual(1, len(alarms)) - self.assertEqual('disabled', alarms[0]['name']) - - one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) - self.assertEqual('disabled', one['name']) - - def test_get_alarm_combination(self): - alarms = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name4', - }]) - self.assertEqual('name4', alarms[0]['name']) - self.assertEqual(['a', 'b'], - alarms[0]['combination_rule']['alarm_ids']) - self.assertEqual('or', alarms[0]['combination_rule']['operator']) - - one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) - self.assertEqual('name4', one['name']) - self.assertEqual(['a', 'b'], - alarms[0]['combination_rule']['alarm_ids']) - self.assertEqual('or', alarms[0]['combination_rule']['operator']) - self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) - self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) - - def test_get_alarm_project_filter_wrong_op_normal_user(self): - project = self.auth_headers['X-Project-Id'] - - def _test(field, op): - response = self.get_json('/alarms', - q=[{'field': field, - 'op': op, - 'value': project}], - expect_errors=True, - status=400, - headers=self.auth_headers) - faultstring = ('Invalid input for field/attribute op. ' - 'Value: \'%(op)s\'. unimplemented operator ' - 'for %(field)s' % {'field': field, 'op': op}) - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - _test('project', 'ne') - _test('project_id', 'ne') - - def test_get_alarm_project_filter_normal_user(self): - project = self.auth_headers['X-Project-Id'] - - def _test(field): - alarms = self.get_json('/alarms', - q=[{'field': field, - 'op': 'eq', - 'value': project}]) - self.assertEqual(7, len(alarms)) - - _test('project') - _test('project_id') - - def test_get_alarm_other_project_normal_user(self): - def _test(field): - response = self.get_json('/alarms', - q=[{'field': field, - 'op': 'eq', - 'value': 'other-project'}], - expect_errors=True, - status=401, - headers=self.auth_headers) - faultstring = 'Not Authorized to access project other-project' - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - _test('project') - _test('project_id') - - def test_post_alarm_wsme_workaround(self): - jsons = { - 'type': { - 'name': 'missing type', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 2.0, - } - }, - 'name': { - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 2.0, - } - }, - 'threshold_rule/meter_name': { - 'name': 'missing meter_name', - 'type': 'threshold', - 'threshold_rule': { - 'threshold': 2.0, - } - }, - 'threshold_rule/threshold': { - 'name': 'missing threshold', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - } - }, - 'combination_rule/alarm_ids': { - 'name': 'missing alarm_ids', - 'type': 'combination', - 'combination_rule': {} - } - } - for field, json in six.iteritems(jsons): - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual("Invalid input for field/attribute %s." - " Value: \'None\'. Mandatory field missing." - % field.split('/', 1)[-1], - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_time_constraint_start(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '11:00am', - 'duration': 10 - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_duplicate_time_constraint_name(self): - json = { - 'name': 'added_alarm_duplicate_constraint_name', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': 10 - }, - { - 'name': 'testcons', - 'start': '* * * * *', - 'duration': 20 - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual( - "Time constraint names must be unique for a given alarm.", - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_alarm_null_time_constraint(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': None, - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - - def test_post_invalid_alarm_time_constraint_duration(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': -1, - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_time_constraint_timezone(self): - json = { - 'name': 'added_alarm_invalid_constraint_timezone', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': 10, - 'timezone': 'aaaa' - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_period(self): - json = { - 'name': 'added_alarm_invalid_period', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - 'period': -1, - } - - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_null_threshold_rule(self): - json = { - 'name': 'added_alarm_invalid_threshold_rule', - 'type': 'threshold', - 'threshold_rule': None, - 'combination_rule': None, - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual( - "threshold_rule must be set for threshold type alarm", - resp.json['error_message']['faultstring']) - - def test_post_invalid_alarm_statistic(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'magic', - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " statistic. Value: 'magic'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_state(self): - json = { - 'name': 'alarm1', - 'state': 'bad_state', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute state." - " Value: 'bad_state'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_severity(self): - json = { - 'name': 'alarm1', - 'state': 'ok', - 'severity': 'bad_value', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute severity." - " Value: 'bad_value'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_comparison_operator(self): - json = { - 'name': 'alarm2', - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'bad_co', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " comparison_operator." - " Value: 'bad_co'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_type(self): - json = { - 'name': 'alarm3', - 'state': 'ok', - 'type': 'bad_type', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " type." - " Value: 'bad_type'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_enabled_str(self): - json = { - 'name': 'alarm5', - 'enabled': 'bad_enabled', - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = "Value not an unambiguous boolean: bad_enabled" - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_input_enabled_int(self): - json = { - 'name': 'alarm6', - 'enabled': 0, - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, - headers=self.auth_headers) - self.assertFalse(resp.json['enabled']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(8, len(alarms)) - - def test_post_invalid_combination_alarm_input_operator(self): - json = { - 'enabled': False, - 'name': 'alarm6', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'bad_operator', - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " operator." - " Value: 'bad_operator'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_query(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.invalid', - 'field': 'gt', - 'value': 'value'}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_query_field_type(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.valid', - 'op': 'eq', - 'value': 'value', - 'type': 'blob'}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = 'The data type blob is not supported.' - resp_string = jsonutils.loads(resp.body) - fault_string = resp_string['error_message']['faultstring'] - self.assertTrue(fault_string.startswith(expected_error_message)) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_query_non_field(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'q.field': 'metadata.valid', - 'value': 'value'}], - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = ("Invalid input for field/attribute field. " - "Value: 'None'. Mandatory field missing.") - fault_string = resp.json['error_message']['faultstring'] - self.assertEqual(expected_error_message, fault_string) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_query_non_value(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.valid', - 'q.value': 'value'}], - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = ("Invalid input for field/attribute value. " - "Value: 'None'. Mandatory field missing.") - fault_string = resp.json['error_message']['faultstring'] - self.assertEqual(expected_error_message, fault_string) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - def test_post_invalid_alarm_have_multiple_rules(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'meter', - 'value': 'ameter'}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - }, - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - - # threshold_rule and combination_rule order is not - # predictable so it is not possible to do an exact match - # here - error_faultstring = resp.json['error_message']['faultstring'] - for expected_string in ['threshold_rule', 'combination_rule', - 'cannot be set at the same time']: - self.assertIn(expected_string, error_faultstring) - - def test_post_invalid_alarm_timestamp_in_threshold_rule(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - - json = { - 'name': 'invalid_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - self.assertEqual( - 'Unknown argument: "timestamp": ' - 'not valid for this resource', - resp.json['error_message']['faultstring']) - - def _do_post_alarm_invalid_action(self, ok_actions=None, - alarm_actions=None, - insufficient_data_actions=None, - error_message=None): - - ok_actions = ok_actions or [] - alarm_actions = alarm_actions or [] - insufficient_data_actions = insufficient_data_actions or [] - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'ok_actions': ok_actions, - 'alarm_actions': alarm_actions, - 'insufficient_data_actions': insufficient_data_actions, - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - resp = self.post_json('/alarms', params=json, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(7, len(alarms)) - self.assertEqual(error_message, - resp.json['error_message']['faultstring']) - - def test_post_invalid_alarm_ok_actions(self): - self._do_post_alarm_invalid_action( - ok_actions=['spam://something/ok'], - error_message='Unsupported action spam://something/ok') - - def test_post_invalid_alarm_alarm_actions(self): - self._do_post_alarm_invalid_action( - alarm_actions=['spam://something/alarm'], - error_message='Unsupported action spam://something/alarm') - - def test_post_invalid_alarm_insufficient_data_actions(self): - self._do_post_alarm_invalid_action( - insufficient_data_actions=['spam://something/insufficient'], - error_message='Unsupported action spam://something/insufficient') - - @staticmethod - def _fake_urlsplit(*args, **kwargs): - raise Exception("Evil urlsplit!") - - def test_post_invalid_alarm_actions_format(self): - with mock.patch('oslo_utils.netutils.urlsplit', - self._fake_urlsplit): - self._do_post_alarm_invalid_action( - alarm_actions=['http://[::1'], - error_message='Unable to parse action http://[::1') - - def test_post_alarm_defaults(self): - to_check = { - 'enabled': True, - 'name': 'added_alarm_defaults', - 'state': 'insufficient data', - 'description': ('Alarm when ameter is eq a avg of ' - '300.0 over 60 seconds'), - 'type': 'threshold', - 'ok_actions': [], - 'alarm_actions': [], - 'insufficient_data_actions': [], - 'repeat_actions': False, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'project_id', - 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}], - 'threshold': 300.0, - 'comparison_operator': 'eq', - 'statistic': 'avg', - 'evaluation_periods': 1, - 'period': 60, - } - - } - self._add_default_threshold_rule(to_check) - - json = { - 'name': 'added_alarm_defaults', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(8, len(alarms)) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - for key in to_check: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(to_check[key], - getattr(alarm, storage_key)) - break - else: - self.fail("Alarm not found") - - def test_post_conflict(self): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - self.post_json('/alarms', params=json, status=409, - headers=self.auth_headers) - - def _do_test_post_alarm(self, exclude_outliers=None): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'low', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - if exclude_outliers is not None: - json['threshold_rule']['exclude_outliers'] = exclude_outliers - - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - json['threshold_rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - # to check to IntegerType type conversion - json['threshold_rule']['evaluation_periods'] = 3 - json['threshold_rule']['period'] = 180 - self._verify_alarm(json, alarms[0], 'added_alarm') - - def test_post_alarm_outlier_exclusion_set(self): - self._do_test_post_alarm(True) - - def test_post_alarm_outlier_exclusion_clear(self): - self._do_test_post_alarm(False) - - def test_post_alarm_outlier_exclusion_defaulted(self): - self._do_test_post_alarm() - - def test_post_alarm_noauth(self): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'low', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'exclude_outliers': False, - 'period': '180', - } - } - self.post_json('/alarms', params=json, status=201) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - # to check to BoundedInt type conversion - json['threshold_rule']['evaluation_periods'] = 3 - json['threshold_rule']['period'] = 180 - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(getattr(alarms[0], storage_key), - json[key]) - else: - self.fail("Alarm not found") - - def _do_test_post_alarm_as_admin(self, explicit_project_constraint): - """Test the creation of an alarm as admin for another project.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'user_id': 'auseridthatisnotmine', - 'project_id': 'aprojectidthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - if explicit_project_constraint: - project_constraint = {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'} - json['threshold_rule']['query'].append(project_constraint) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual('auseridthatisnotmine', alarms[0].user_id) - self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) - self._add_default_threshold_rule(json) - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - if explicit_project_constraint: - self.assertEqual(json[key], - getattr(alarms[0], storage_key)) - else: - query = getattr(alarms[0], storage_key).get('query') - self.assertEqual(2, len(query)) - implicit_constraint = { - u'field': u'project_id', - u'value': u'aprojectidthatisnotmine', - u'op': u'eq' - } - self.assertEqual(implicit_constraint, query[1]) - else: - self.assertEqual(json[key], getattr(alarms[0], key)) - else: - self.fail("Alarm not found") - - def test_post_alarm_as_admin_explicit_project_constraint(self): - """Test the creation of an alarm as admin for another project. - - With an explicit query constraint on the owner's project ID. - """ - self._do_test_post_alarm_as_admin(True) - - def test_post_alarm_as_admin_implicit_project_constraint(self): - """Test the creation of an alarm as admin for another project. - - Test without an explicit query constraint on the owner's project ID. - """ - self._do_test_post_alarm_as_admin(False) - - def test_post_alarm_as_admin_no_user(self): - """Test the creation of an alarm. - - Test the creation of an alarm as admin for another project but - forgetting to set the values. - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'project_id': 'aprojectidthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual(self.auth_headers['X-User-Id'], alarms[0].user_id) - self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) - self._verify_alarm(json, alarms[0], 'added_alarm') - - def test_post_alarm_as_admin_no_project(self): - """Test the creation of an alarm. - - Test the creation of an alarm as admin for another project but - forgetting to set the values. - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'user_id': 'auseridthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual('auseridthatisnotmine', alarms[0].user_id) - self.assertEqual(self.auth_headers['X-Project-Id'], - alarms[0].project_id) - self._verify_alarm(json, alarms[0], 'added_alarm') - - @staticmethod - def _alarm_representation_owned_by(identifiers): - json = { - 'name': 'added_alarm', - 'enabled': False, - 'type': 'threshold', - 'ok_actions': ['http://something/ok'], - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - for aspect, id in six.iteritems(identifiers): - json['%s_id' % aspect] = id - return json - - def _do_test_post_alarm_as_nonadmin_on_behalf_of_another(self, - identifiers): - """Test posting an alarm. - - Test that posting an alarm as non-admin on behalf of another - user/project fails with an explicit 401 instead of reverting - to the requestor's identity. - """ - json = self._alarm_representation_owned_by(identifiers) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'demo' - resp = self.post_json('/alarms', params=json, status=401, - headers=headers) - aspect = 'user' if 'user' in identifiers else 'project' - params = dict(aspect=aspect, id=identifiers[aspect]) - self.assertEqual("Not Authorized to access %(aspect)s %(id)s" % params, - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_user(self): - identifiers = dict(user='auseridthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_project(self): - identifiers = dict(project='aprojectidthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_creds(self): - identifiers = dict(user='auseridthatisnotmine', - project='aprojectidthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def _do_test_post_alarm_as_nonadmin_on_behalf_of_self(self, identifiers): - """Test posting an alarm. - - Test posting an alarm as non-admin on behalf of own user/project - creates alarm associated with the requestor's identity. - """ - json = self._alarm_representation_owned_by(identifiers) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'demo' - self.post_json('/alarms', params=json, status=201, headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual(alarms[0].user_id, - self.auth_headers['X-User-Id']) - self.assertEqual(alarms[0].project_id, - self.auth_headers['X-Project-Id']) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_user(self): - identifiers = dict(user=self.auth_headers['X-User-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_project(self): - identifiers = dict(project=self.auth_headers['X-Project-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_creds(self): - identifiers = dict(user=self.auth_headers['X-User-Id'], - project=self.auth_headers['X-Project-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_combination(self): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(json[key], getattr(alarms[0], storage_key)) - else: - self.fail("Alarm not found") - - def test_post_combination_alarm_as_user_with_unauthorized_alarm(self): - """Test posting a combination alarm. - - Test that post a combination alarm as normal user/project - with an alarm_id unauthorized for this project/user - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - an_other_user_auth = {'X-User-Id': str(uuid.uuid4()), - 'X-Project-Id': str(uuid.uuid4())} - resp = self.post_json('/alarms', params=json, status=404, - headers=an_other_user_auth) - self.assertEqual("Alarm a not found in project " - "%s" % - an_other_user_auth['X-Project-Id'], - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_post_combination_alarm_as_admin_on_behalf_of_an_other_user(self): - """Test posting a combination alarm. - - Test that post a combination alarm as admin on behalf of an other - user/project with an alarm_id unauthorized for this project/user - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'user_id': 'auseridthatisnotmine', - 'project_id': 'aprojectidthatisnotmine', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - resp = self.post_json('/alarms', params=json, status=404, - headers=headers) - self.assertEqual("Alarm a not found in project " - "aprojectidthatisnotmine", - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_post_combination_alarm_with_reasonable_description(self): - """Test posting a combination alarm. - - Test that post a combination alarm with two blanks around the - operator in alarm description. - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual(u'Combined state of alarms a and b', - alarms[0].description) - - def test_post_combination_alarm_as_admin_success_owner_unset(self): - self._do_post_combination_alarm_as_admin_success(False) - - def test_post_combination_alarm_as_admin_success_owner_set(self): - self._do_post_combination_alarm_as_admin_success(True) - - def test_post_combination_alarm_with_threshold_rule(self): - """Test the creation of an combination alarm with threshold rule.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - resp = self.post_json('/alarms', params=json, - expect_errors=True, status=400, - headers=self.auth_headers) - self.assertEqual( - "combination_rule must be set for combination type alarm", - resp.json['error_message']['faultstring']) - - def test_post_threshold_alarm_with_combination_rule(self): - """Test the creation of an threshold alarm with combination rule.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - resp = self.post_json('/alarms', params=json, - expect_errors=True, status=400, - headers=self.auth_headers) - self.assertEqual( - "threshold_rule must be set for threshold type alarm", - resp.json['error_message']['faultstring']) - - def _do_post_combination_alarm_as_admin_success(self, owner_is_set): - """Test posting a combination alarm. - - Test that post a combination alarm as admin on behalf of nobody - with an alarm_id of someone else, with owner set or not - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['a', - 'b'], - 'operator': 'and', - } - } - an_other_admin_auth = {'X-User-Id': str(uuid.uuid4()), - 'X-Project-Id': str(uuid.uuid4()), - 'X-Roles': 'admin'} - if owner_is_set: - json['project_id'] = an_other_admin_auth['X-Project-Id'] - json['user_id'] = an_other_admin_auth['X-User-Id'] - - self.post_json('/alarms', params=json, status=201, - headers=an_other_admin_auth) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(json[key], getattr(alarms[0], storage_key)) - else: - self.fail("Alarm not found") - - def test_post_invalid_alarm_combination(self): - """Test that post a combination alarm with a not existing alarm id.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'combination', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'combination_rule': { - 'alarm_ids': ['not_exists', - 'b'], - 'operator': 'and', - } - } - self.post_json('/alarms', params=json, status=404, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(0, len(alarms)) - - def test_post_alarm_combination_duplicate_alarm_ids(self): - """Test combination alarm doesn't allow duplicate alarm ids.""" - json_body = { - 'name': 'dup_alarm_id', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'a', 'd', 'a', 'c', 'c', 'b'], - } - } - self.post_json('/alarms', params=json_body, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(name='dup_alarm_id')) - self.assertEqual(1, len(alarms)) - self.assertEqual(['a', 'd', 'c', 'b'], - alarms[0].rule.get('alarm_ids')) - - def _test_post_alarm_combination_rule_less_than_two_alarms(self, - alarm_ids=None): - json_body = { - 'name': 'one_alarm_in_combination_rule', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': alarm_ids or [] - } - } - - resp = self.post_json('/alarms', params=json_body, - expect_errors=True, status=400, - headers=self.auth_headers) - self.assertEqual( - 'Alarm combination rule should contain at' - ' least two different alarm ids.', - resp.json['error_message']['faultstring']) - - def test_post_alarm_combination_rule_with_no_alarm(self): - self._test_post_alarm_combination_rule_less_than_two_alarms() - - def test_post_alarm_combination_rule_with_one_alarm(self): - self._test_post_alarm_combination_rule_less_than_two_alarms(['a']) - - def test_post_alarm_combination_rule_with_two_same_alarms(self): - self._test_post_alarm_combination_rule_less_than_two_alarms(['a', - 'a']) - - def test_post_alarm_with_duplicate_actions(self): - body = { - 'name': 'dup-alarm-actions', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': ['http://no.where', 'http://no.where'] - } - resp = self.post_json('/alarms', params=body, - headers=self.auth_headers) - self.assertEqual(201, resp.status_code) - alarms = list(self.alarm_conn.get_alarms(name='dup-alarm-actions')) - self.assertEqual(1, len(alarms)) - self.assertEqual(['http://no.where'], alarms[0].alarm_actions) - - def test_post_alarm_with_too_many_actions(self): - self.CONF.set_override('alarm_max_actions', 1, group='alarm') - body = { - 'name': 'alarm-with-many-actions', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': ['http://no.where', 'http://no.where2'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(400, resp.status_code) - self.assertEqual("alarm_actions count exceeds maximum value 1", - resp.json['error_message']['faultstring']) - - def test_post_alarm_normal_user_set_log_actions(self): - body = { - 'name': 'log_alarm_actions', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': ['log://'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(401, resp.status_code) - expected_msg = ("You are not authorized to create action: log://") - self.assertEqual(expected_msg, - resp.json['error_message']['faultstring']) - - def test_post_alarm_normal_user_set_test_actions(self): - body = { - 'name': 'test_alarm_actions', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': ['test://'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(401, resp.status_code) - expected_msg = ("You are not authorized to create action: test://") - self.assertEqual(expected_msg, - resp.json['error_message']['faultstring']) - - def test_post_alarm_admin_user_set_log_test_actions(self): - body = { - 'name': 'admin_alarm_actions', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': ['test://', 'log://'] - } - headers = self.auth_headers - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=body, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(name='admin_alarm_actions')) - self.assertEqual(1, len(alarms)) - self.assertEqual(['test://', 'log://'], - alarms[0].alarm_actions) - - def test_post_alarm_without_actions(self): - body = { - 'name': 'alarm_actions_none', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['a', 'b'], - }, - 'alarm_actions': None - } - headers = self.auth_headers - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=body, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(name='alarm_actions_none')) - self.assertEqual(1, len(alarms)) - - # FIXME(sileht): This should really returns [] not None - # but the mongodb and sql just store the json dict as is... - # migration script for sql will be a mess because we have - # to parse all JSON :( - # I guess we assume that wsme convert the None input to [] - # because of the array type, but it won't... - self.assertIsNone(alarms[0].alarm_actions) - - def test_post_alarm_trust(self): - json = { - 'name': 'added_alarm_defaults', - 'type': 'threshold', - 'ok_actions': ['trust+http://my.server:1234/foo'], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - auth = mock.Mock() - trust_client = mock.Mock() - with mock.patch('ceilometer.keystone_client.get_v3_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - trust_client.trusts.create.return_value = mock.Mock(id='5678') - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers, - extra_environ={'keystone.token_auth': auth}) - trust_client.trusts.create.assert_called_once_with( - trustor_user=self.auth_headers['X-User-Id'], - trustee_user='my_user', - project=self.auth_headers['X-Project-Id'], - impersonation=True, - role_names=[]) - alarms = list(self.alarm_conn.get_alarms()) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - self.assertEqual( - ['trust+http://5678:delete@my.server:1234/foo'], - alarm.ok_actions) - break - else: - self.fail("Alarm not found") - - with mock.patch('ceilometer.keystone_client.get_v3_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - self.delete('/alarms/%s' % alarm.alarm_id, - headers=self.auth_headers, - status=204, - extra_environ={'keystone.token_auth': auth}) - trust_client.trusts.delete.assert_called_once_with('5678') - - def test_put_alarm(self): - json = { - 'enabled': False, - 'name': 'name_put', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=self.auth_headers) - alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, - enabled=False))[0] - json['threshold_rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - self._verify_alarm(json, alarm) - - def test_put_alarm_as_admin(self): - json = { - 'user_id': 'myuserid', - 'project_id': 'myprojectid', - 'enabled': False, - 'name': 'name_put', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'myprojectid'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - - data = self.get_json('/alarms', - headers=headers, - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=headers) - alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, - enabled=False))[0] - self.assertEqual('myuserid', alarm.user_id) - self.assertEqual('myprojectid', alarm.project_id) - self._verify_alarm(json, alarm) - - def test_put_alarm_wrong_field(self): - # Note: wsme will ignore unknown fields so will just not appear in - # the Alarm. - json = { - 'this_can_not_be_correct': 'ha', - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=self.auth_headers) - self.assertEqual(200, resp.status_code) - - def test_put_alarm_with_existing_name(self): - """Test that update a threshold alarm with an existing name.""" - json = { - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name2', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - expect_errors=True, status=409, - params=json, - headers=self.auth_headers) - self.assertEqual( - 'Alarm with name=name1 exists', - resp.json['error_message']['faultstring']) - - def test_put_invalid_alarm_actions(self): - json = { - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['spam://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name2', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - expect_errors=True, status=400, - params=json, - headers=self.auth_headers) - self.assertEqual( - 'Unsupported action spam://something/ok', - resp.json['error_message']['faultstring']) - - def test_put_alarm_combination_cannot_specify_itself(self): - json = { - 'name': 'name4', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['d', 'a'], - } - } - - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name4', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - expect_errors=True, status=400, - params=json, - headers=self.auth_headers) - - msg = 'Cannot specify alarm %s itself in combination rule' % alarm_id - self.assertEqual(msg, resp.json['error_message']['faultstring']) - - def _test_put_alarm_combination_rule_less_than_two_alarms(self, - alarm_ids=None): - json_body = { - 'name': 'name4', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': alarm_ids or [] - } - } - - data = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name4', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, params=json_body, - expect_errors=True, status=400, - headers=self.auth_headers) - self.assertEqual( - 'Alarm combination rule should contain at' - ' least two different alarm ids.', - resp.json['error_message']['faultstring']) - - def test_put_alarm_combination_rule_with_no_alarm(self): - self._test_put_alarm_combination_rule_less_than_two_alarms() - - def test_put_alarm_combination_rule_with_one_alarm(self): - self._test_put_alarm_combination_rule_less_than_two_alarms(['a']) - - def test_put_alarm_combination_rule_with_two_same_alarm_itself(self): - self._test_put_alarm_combination_rule_less_than_two_alarms(['d', - 'd']) - - def test_put_combination_alarm_with_duplicate_ids(self): - """Test combination alarm doesn't allow duplicate alarm ids.""" - alarms = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'name4', - }]) - self.assertEqual(1, len(alarms)) - alarm_id = alarms[0]['alarm_id'] - - json_body = { - 'name': 'name4', - 'type': 'combination', - 'combination_rule': { - 'alarm_ids': ['c', 'a', 'b', 'a', 'c', 'b'], - } - } - self.put_json('/alarms/%s' % alarm_id, - params=json_body, status=200, - headers=self.auth_headers) - - alarms = list(self.alarm_conn.get_alarms(alarm_id=alarm_id)) - self.assertEqual(1, len(alarms)) - self.assertEqual(['c', 'a', 'b'], alarms[0].rule.get('alarm_ids')) - - def test_put_alarm_trust(self): - data = self._get_alarm('a') - data.update({'ok_actions': ['trust+http://something/ok']}) - trust_client = mock.Mock() - with mock.patch('ceilometer.keystone_client.get_v3_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - trust_client.trusts.create.return_value = mock.Mock(id='5678') - self.put_json('/alarms/%s' % data['alarm_id'], - params=data, - headers=self.auth_headers) - data = self._get_alarm('a') - self.assertEqual( - ['trust+http://5678:delete@something/ok'], data['ok_actions']) - - data.update({'ok_actions': ['http://no-trust-something/ok']}) - - with mock.patch('ceilometer.keystone_client.get_v3_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - self.put_json('/alarms/%s' % data['alarm_id'], - params=data, - headers=self.auth_headers) - trust_client.trusts.delete.assert_called_once_with('5678') - - data = self._get_alarm('a') - self.assertEqual( - ['http://no-trust-something/ok'], data['ok_actions']) - - def test_delete_alarm(self): - data = self.get_json('/alarms') - self.assertEqual(7, len(data)) - - resp = self.delete('/alarms/%s' % data[0]['alarm_id'], - headers=self.auth_headers, - status=204) - self.assertEqual(b'', resp.body) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(6, len(alarms)) - - def test_get_state_alarm(self): - data = self.get_json('/alarms') - self.assertEqual(7, len(data)) - - resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers) - self.assertEqual(resp, data[0]['state']) - - def test_set_state_alarm(self): - data = self.get_json('/alarms') - self.assertEqual(7, len(data)) - - resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers, - params='alarm') - alarms = list(self.alarm_conn.get_alarms(alarm_id=data[0]['alarm_id'])) - self.assertEqual(1, len(alarms)) - self.assertEqual('alarm', alarms[0].state) - self.assertEqual('alarm', resp.json) - - def test_set_invalid_state_alarm(self): - data = self.get_json('/alarms') - self.assertEqual(7, len(data)) - - self.put_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers, - params='not valid', - status=400) - - def _get_alarm(self, id): - data = self.get_json('/alarms') - match = [a for a in data if a['alarm_id'] == id] - self.assertEqual(1, len(match), 'alarm %s not found' % id) - return match[0] - - def _get_alarm_history(self, alarm, auth_headers=None, query=None, - expect_errors=False, status=200): - url = '/alarms/%s/history' % alarm['alarm_id'] - if query: - url += '?q.op=%(op)s&q.value=%(value)s&q.field=%(field)s' % query - resp = self.get_json(url, - headers=auth_headers or self.auth_headers, - expect_errors=expect_errors) - if expect_errors: - self.assertEqual(status, resp.status_code) - return resp - - def _update_alarm(self, alarm, updated_data, auth_headers=None): - data = self._get_alarm(alarm['alarm_id']) - data.update(updated_data) - self.put_json('/alarms/%s' % alarm['alarm_id'], - params=data, - headers=auth_headers or self.auth_headers) - - def _delete_alarm(self, alarm, auth_headers=None): - self.delete('/alarms/%s' % alarm['alarm_id'], - headers=auth_headers or self.auth_headers, - status=204) - - def _assert_is_subset(self, expected, actual): - for k, v in six.iteritems(expected): - self.assertEqual(v, actual.get(k), 'mismatched field: %s' % k) - self.assertIsNotNone(actual['event_id']) - - def _assert_in_json(self, expected, actual): - actual = jsonutils.dumps(jsonutils.loads(actual), sort_keys=True) - for k, v in six.iteritems(expected): - fragment = jsonutils.dumps({k: v}, sort_keys=True)[1:-1] - self.assertIn(fragment, actual, - '%s not in %s' % (fragment, actual)) - - def test_record_alarm_history_config(self): - self.CONF.set_override('record_history', False, group='alarm') - alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self._update_alarm(alarm, dict(name='renamed')) - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self.CONF.set_override('record_history', True, group='alarm') - self._update_alarm(alarm, dict(name='foobar')) - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - - def test_record_alarm_history_severity(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self.assertEqual('critical', alarm['severity']) - - self._update_alarm(alarm, dict(severity='low')) - new_alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - history[0]['detail']) - self.assertEqual('low', new_alarm['severity']) - - def test_redundant_update_alarm_property_no_history_change(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self.assertEqual('critical', alarm['severity']) - - self._update_alarm(alarm, dict(severity='low')) - new_alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - history[0]['detail']) - self.assertEqual('low', new_alarm['severity']) - - self._update_alarm(alarm, dict(severity='low')) - updated_alarm = self._get_alarm('a') - updated_history = self._get_alarm_history(updated_alarm) - self.assertEqual(1, len(updated_history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - updated_history[0]['detail']) - self.assertEqual(history, updated_history) - - def test_get_recorded_alarm_history_on_create(self): - new_alarm = { - 'name': 'new_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - self.post_json('/alarms', params=new_alarm, status=201, - headers=self.auth_headers) - - alarms = self.get_json('/alarms', - q=[{'field': 'name', - 'value': 'new_alarm', - }]) - self.assertEqual(1, len(alarms)) - alarm = alarms[0] - - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='creation', - user_id=alarm['user_id']), - history[0]) - self._add_default_threshold_rule(new_alarm) - new_alarm['rule'] = new_alarm['threshold_rule'] - del new_alarm['threshold_rule'] - new_alarm['rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - self._assert_in_json(new_alarm, history[0]['detail']) - - def _do_test_get_recorded_alarm_history_on_update(self, - data, - type, - detail, - auth=None): - alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self._update_alarm(alarm, data, auth) - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - project_id = auth['X-Project-Id'] if auth else alarm['project_id'] - user_id = auth['X-User-Id'] if auth else alarm['user_id'] - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=project_id, - type=type, - user_id=user_id), - history[0]) - - def test_get_recorded_alarm_history_rule_change(self): - data = dict(name='renamed') - detail = '{"name": "renamed"}' - self._do_test_get_recorded_alarm_history_on_update(data, - 'rule change', - detail) - - def test_get_recorded_alarm_history_state_transition_on_behalf_of(self): - # credentials for new non-admin user, on who's behalf the alarm - # is created - member_user = str(uuid.uuid4()) - member_project = str(uuid.uuid4()) - member_auth = {'X-Roles': 'member', - 'X-User-Id': member_user, - 'X-Project-Id': member_project} - new_alarm = { - 'name': 'new_alarm', - 'type': 'threshold', - 'state': 'ok', - 'threshold_rule': { - 'meter_name': 'other_meter', - 'query': [{'field': 'project_id', - 'op': 'eq', - 'value': member_project}], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'evaluation_periods': 1, - 'period': 60 - } - } - self.post_json('/alarms', params=new_alarm, status=201, - headers=member_auth) - alarm = self.get_json('/alarms', headers=member_auth)[0] - - # effect a state transition as a new administrative user - admin_user = str(uuid.uuid4()) - admin_project = str(uuid.uuid4()) - admin_auth = {'X-Roles': 'admin', - 'X-User-Id': admin_user, - 'X-Project-Id': admin_project} - data = dict(state='alarm') - self._update_alarm(alarm, data, auth_headers=admin_auth) - - self._add_default_threshold_rule(new_alarm) - new_alarm['rule'] = new_alarm['threshold_rule'] - del new_alarm['threshold_rule'] - - # ensure that both the creation event and state transition - # are visible to the non-admin alarm owner and admin user alike - for auth in [member_auth, admin_auth]: - history = self._get_alarm_history(alarm, auth_headers=auth) - self.assertEqual(2, len(history), 'hist: %s' % history) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail='{"state": "alarm"}', - on_behalf_of=alarm['project_id'], - project_id=admin_project, - type='rule change', - user_id=admin_user), - history[0]) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=member_project, - type='creation', - user_id=member_user), - history[1]) - self._assert_in_json(new_alarm, history[1]['detail']) - - # ensure on_behalf_of cannot be constrained in an API call - query = dict(field='on_behalf_of', - op='eq', - value=alarm['project_id']) - self._get_alarm_history(alarm, auth_headers=auth, query=query, - expect_errors=True, status=400) - - def test_get_recorded_alarm_history_segregation(self): - data = dict(name='renamed') - detail = '{"name": "renamed"}' - self._do_test_get_recorded_alarm_history_on_update(data, - 'rule change', - detail) - auth = {'X-Roles': 'member', - 'X-User-Id': str(uuid.uuid4()), - 'X-Project-Id': str(uuid.uuid4())} - history = self._get_alarm_history(self._get_alarm('a'), auth) - self.assertEqual([], history) - - def test_get_recorded_alarm_history_preserved_after_deletion(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history(alarm) - self.assertEqual([], history) - self._update_alarm(alarm, dict(name='renamed')) - history = self._get_alarm_history(alarm) - self.assertEqual(1, len(history)) - alarm = self._get_alarm('a') - self.delete('/alarms/%s' % alarm['alarm_id'], - headers=self.auth_headers, - status=204) - history = self._get_alarm_history(alarm) - self.assertEqual(2, len(history)) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='deletion', - user_id=alarm['user_id']), - history[0]) - alarm['rule'] = alarm['threshold_rule'] - del alarm['threshold_rule'] - self._assert_in_json(alarm, history[0]['detail']) - detail = '{"name": "renamed"}' - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='rule change', - user_id=alarm['user_id']), - history[1]) - - def test_get_alarm_history_ordered_by_recentness(self): - alarm = self._get_alarm('a') - for i in moves.xrange(10): - self._update_alarm(alarm, dict(name='%s' % i)) - alarm = self._get_alarm('a') - self._delete_alarm(alarm) - history = self._get_alarm_history(alarm) - self.assertEqual(11, len(history), 'hist: %s' % history) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - type='deletion'), - history[0]) - alarm['rule'] = alarm['threshold_rule'] - del alarm['threshold_rule'] - self._assert_in_json(alarm, history[0]['detail']) - for i in moves.xrange(1, 10): - detail = '{"name": "%s"}' % (10 - i) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - type='rule change'), - history[i]) - - def test_get_alarm_history_constrained_by_timestamp(self): - alarm = self._get_alarm('a') - self._update_alarm(alarm, dict(name='renamed')) - after = datetime.datetime.utcnow().isoformat() - query = dict(field='timestamp', op='gt', value=after) - history = self._get_alarm_history(alarm, query=query) - self.assertEqual(0, len(history)) - query['op'] = 'le' - history = self._get_alarm_history(alarm, query=query) - self.assertEqual(1, len(history)) - detail = '{"name": "renamed"}' - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='rule change', - user_id=alarm['user_id']), - history[0]) - - def test_get_alarm_history_constrained_by_type(self): - alarm = self._get_alarm('a') - self._delete_alarm(alarm) - query = dict(field='type', op='eq', value='deletion') - history = self._get_alarm_history(alarm, query=query) - self.assertEqual(1, len(history)) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='deletion', - user_id=alarm['user_id']), - history[0]) - alarm['rule'] = alarm['threshold_rule'] - del alarm['threshold_rule'] - self._assert_in_json(alarm, history[0]['detail']) - - def test_get_alarm_history_constrained_by_alarm_id_failed(self): - alarm = self._get_alarm('b') - query = dict(field='alarm_id', op='eq', value='b') - resp = self._get_alarm_history(alarm, query=query, - expect_errors=True, status=400) - msg = ('Unknown argument: "alarm_id": unrecognized' - " field in query: [], valid keys: ['project', " - "'search_offset', 'severity', 'timestamp'," - " 'type', 'user']") - msg = msg.format(key=u'alarm_id', value=u'b') - self.assertEqual(msg, - resp.json['error_message']['faultstring']) - - def test_get_alarm_history_constrained_by_not_supported_rule(self): - alarm = self._get_alarm('b') - query = dict(field='abcd', op='eq', value='abcd') - resp = self._get_alarm_history(alarm, query=query, - expect_errors=True, status=400) - msg = ('Unknown argument: "abcd": unrecognized' - " field in query: [], valid keys: ['project', " - "'search_offset', 'severity', 'timestamp'," - " 'type', 'user']") - msg = msg.format(key=u'abcd', value=u'abcd') - self.assertEqual(msg, - resp.json['error_message']['faultstring']) - - def test_get_nonexistent_alarm_history(self): - # the existence of alarm history is independent of the - # continued existence of the alarm itself - history = self._get_alarm_history(dict(alarm_id='foobar')) - self.assertEqual([], history) - - def test_alarms_sends_notification(self): - # Hit the AlarmsController ... - json = { - 'name': 'sent_notification', - 'type': 'threshold', - 'severity': 'low', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - - } - endpoint = mock.MagicMock() - target = oslo_messaging.Target(topic="notifications") - listener = messaging.get_notification_listener( - self.transport, [target], [endpoint]) - listener.start() - endpoint.info.side_effect = lambda *args: listener.stop() - self.post_json('/alarms', params=json, headers=self.auth_headers) - listener.wait() - - class PayloadMatcher(object): - def __eq__(self, payload): - return (payload['detail']['name'] == 'sent_notification' and - payload['type'] == 'creation' and - payload['detail']['rule']['meter_name'] == 'ameter' and - set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', - 'project_id', 'timestamp', - 'user_id']).issubset(payload.keys())) - - endpoint.info.assert_called_once_with( - {'resource_uuid': None, - 'domain': None, - 'project_domain': None, - 'auth_token': None, - 'is_admin': False, - 'user': None, - 'tenant': None, - 'read_only': False, - 'show_deleted': False, - 'user_identity': '- - - - -', - 'request_id': mock.ANY, - 'user_domain': None}, - 'ceilometer.api', 'alarm.creation', - PayloadMatcher(), mock.ANY) - - def test_alarm_sends_notification(self): - # Hit the AlarmController (with alarm_id supplied) ... - data = self.get_json('/alarms') - del_alarm_name = "name1" - for d in data: - if d['name'] == del_alarm_name: - del_alarm_id = d['alarm_id'] - - with mock.patch.object(messaging, 'get_notifier') as get_notifier: - notifier = get_notifier.return_value - - self.delete('/alarms/%s' % del_alarm_id, - headers=self.auth_headers, status=204) - get_notifier.assert_called_once_with(mock.ANY, - publisher_id='ceilometer.api') - calls = notifier.info.call_args_list - self.assertEqual(1, len(calls)) - args, _ = calls[0] - context, event_type, payload = args - self.assertEqual('alarm.deletion', event_type) - self.assertEqual(del_alarm_name, payload['detail']['name']) - self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', - 'project_id', 'timestamp', 'type', - 'user_id']).issubset(payload.keys())) - - @mock.patch('ceilometer.keystone_client.get_client') - def test_post_gnocchi_resources_alarm(self, __): - json = { - 'enabled': False, - 'name': 'name_post', - 'state': 'ok', - 'type': 'gnocchi_resources_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_resources_threshold_rule': { - 'metric': 'ameter', - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - 'resource_type': 'instance', - 'resource_id': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e', - } - } - - with mock.patch('requests.get', - side_effect=requests.ConnectionError()): - resp = self.post_json('/alarms', params=json, - headers=self.auth_headers, - expect_errors=True) - self.assertEqual(503, resp.status_code, resp.body) - - with mock.patch('requests.get', - return_value=mock.Mock(status_code=500, - body="my_custom_error", - text="my_custom_error")): - resp = self.post_json('/alarms', params=json, - headers=self.auth_headers, - expect_errors=True) - self.assertEqual(503, resp.status_code, resp.body) - self.assertIn('my_custom_error', - resp.json['error_message']['faultstring']) - - cap_result = mock.Mock(status_code=201, - text=jsonutils.dumps( - {'aggregation_methods': ['count']})) - resource_result = mock.Mock(status_code=200, text="blob") - with mock.patch('requests.get', side_effect=[cap_result, - resource_result] - ) as gnocchi_get: - self.post_json('/alarms', params=json, headers=self.auth_headers) - - gnocchi_url = self.CONF.alarms.gnocchi_url - capabilities_url = urlparse.urljoin(gnocchi_url, - '/v1/capabilities') - resource_url = urlparse.urljoin( - gnocchi_url, - '/v1/resource/instance/209ef69c-c10c-4efb-90ff-46f4b2d90d2e' - ) - - expected = [mock.call(capabilities_url, - headers=mock.ANY), - mock.call(resource_url, - headers=mock.ANY)] - self.assertEqual(expected, gnocchi_get.mock_calls) - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self._verify_alarm(json, alarms[0]) - - @mock.patch('ceilometer.keystone_client.get_client') - def test_post_gnocchi_metrics_alarm(self, __): - json = { - 'enabled': False, - 'name': 'name_post', - 'state': 'ok', - 'type': 'gnocchi_aggregation_by_metrics_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_aggregation_by_metrics_threshold_rule': { - 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', - '009d4faf-c275-46f0-8f2d-670b15bac2b0'], - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - } - } - - cap_result = mock.Mock(status_code=200, - text=jsonutils.dumps( - {'aggregation_methods': ['count']})) - with mock.patch('requests.get', return_value=cap_result): - self.post_json('/alarms', params=json, headers=self.auth_headers) - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self._verify_alarm(json, alarms[0]) - - @mock.patch('ceilometer.keystone_client.get_client') - def test_post_gnocchi_aggregation_alarm_project_constraint(self, __): - self.CONF.set_override('gnocchi_url', 'http://localhost:8041', - group='alarms') - json = { - 'enabled': False, - 'name': 'project_constraint', - 'state': 'ok', - 'type': 'gnocchi_aggregation_by_resources_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_aggregation_by_resources_threshold_rule': { - 'metric': 'ameter', - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - 'resource_type': 'instance', - 'query': '{"=": {"server_group": "my_autoscaling_group"}}', - } - } - - cap_result = mock.Mock(status_code=201, - text=jsonutils.dumps( - {'aggregation_methods': ['count']})) - resource_result = mock.Mock(status_code=200, text="blob") - query_check_result = mock.Mock(status_code=200, text="blob") - - expected_query = ('{"and": [{"=": {"created_by_project_id": "%s"}}, ' - '{"=": {"server_group": "my_autoscaling_group"}}]}' % - self.auth_headers['X-Project-Id']) - - with mock.patch('requests.get', - side_effect=[cap_result, resource_result]): - with mock.patch('requests.post', - side_effect=[query_check_result]) as fake_post: - - self.post_json('/alarms', params=json, - headers=self.auth_headers) - - self.assertEqual([mock.call( - url=('http://localhost:8041/v1/aggregation/' - 'resource/instance/metric/ameter'), - headers={'Content-Type': 'application/json', - 'X-Auth-Token': mock.ANY}, - params={'aggregation': 'count'}, - data=expected_query)], - fake_post.mock_calls), - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - - json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = ( - expected_query) - self._verify_alarm(json, alarms[0]) - - -class TestAlarmsQuotas(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestAlarmsQuotas, self).setUp() - - self.auth_headers = {'X-User-Id': str(uuid.uuid4()), - 'X-Project-Id': str(uuid.uuid4())} - - def _test_alarm_quota(self): - alarm = { - 'name': 'alarm', - 'type': 'threshold', - 'user_id': self.auth_headers['X-User-Id'], - 'project_id': self.auth_headers['X-Project-Id'], - 'threshold_rule': { - 'meter_name': 'testmeter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - self.assertEqual(201, resp.status_code) - alarms = self.get_json('/alarms') - self.assertEqual(1, len(alarms)) - - alarm['name'] = 'another_user_alarm' - resp = self.post_json('/alarms', params=alarm, - expect_errors=True, - headers=self.auth_headers) - self.assertEqual(403, resp.status_code) - faultstring = 'Alarm quota exceeded for user' - self.assertIn(faultstring, - resp.json['error_message']['faultstring']) - - alarms = self.get_json('/alarms') - self.assertEqual(1, len(alarms)) - - def test_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, group='alarm') - self.CONF.set_override('project_alarm_quota', 1, group='alarm') - self._test_alarm_quota() - - def test_project_alarms_quotas(self): - self.CONF.set_override('project_alarm_quota', 1, group='alarm') - self._test_alarm_quota() - - def test_user_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, group='alarm') - self._test_alarm_quota() - - def test_larger_limit_project_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, group='alarm') - self.CONF.set_override('project_alarm_quota', 2, group='alarm') - self._test_alarm_quota() - - def test_larger_limit_user_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 2, group='alarm') - self.CONF.set_override('project_alarm_quota', 1, group='alarm') - self._test_alarm_quota() - - def test_larger_limit_user_alarm_quotas_multitenant_user(self): - self.CONF.set_override('user_alarm_quota', 2, group='alarm') - self.CONF.set_override('project_alarm_quota', 1, group='alarm') - - def _test(field, value): - query = [{ - 'field': field, - 'op': 'eq', - 'value': value - }] - alarms = self.get_json('/alarms', q=query) - self.assertEqual(1, len(alarms)) - - alarm = { - 'name': 'alarm', - 'type': 'threshold', - 'user_id': self.auth_headers['X-User-Id'], - 'project_id': self.auth_headers['X-Project-Id'], - 'threshold_rule': { - 'meter_name': 'testmeter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - - self.assertEqual(201, resp.status_code) - _test('project_id', self.auth_headers['X-Project-Id']) - - self.auth_headers['X-Project-Id'] = str(uuid.uuid4()) - alarm['name'] = 'another_user_alarm' - alarm['project_id'] = self.auth_headers['X-Project-Id'] - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - - self.assertEqual(201, resp.status_code) - _test('project_id', self.auth_headers['X-Project-Id']) - - alarms = self.get_json('/alarms') - self.assertEqual(2, len(alarms)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_api_upgrade.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_api_upgrade.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_api_upgrade.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_api_upgrade.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneclient.openstack.common.apiclient import exceptions -import mock -from oslotest import mockpatch - -from ceilometer.tests.api import v2 - - -class TestAPIUpgradePath(v2.FunctionalTest): - def _setup_osloconfig_options(self): - self.CONF.set_override('gnocchi_is_enabled', True, group='api') - self.CONF.set_override('aodh_is_enabled', True, group='api') - self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', - group='api') - - def _setup_keystone_mock(self): - self.CONF.set_override('gnocchi_is_enabled', None, group='api') - self.CONF.set_override('aodh_is_enabled', None, group='api') - self.CONF.set_override('aodh_url', None, group='api') - self.ks = mock.Mock() - self.ks.service_catalog.url_for.side_effect = self._url_for - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', return_value=self.ks)) - - @staticmethod - def _url_for(service_type=None): - if service_type == 'metric': - return 'http://gnocchi/' - elif service_type == 'alarming': - return 'http://alarm-endpoint:8008/' - raise exceptions.EndpointNotFound() - - def _do_test_gnocchi_enabled_without_database_backend(self): - self.CONF.set_override('dispatcher', 'gnocchi') - for endpoint in ['meters', 'samples', 'resources']: - response = self.app.get(self.PATH_PREFIX + '/' + endpoint, - status=410) - self.assertIn('Gnocchi API', response.body) - - for endpoint in ['events', 'event_types']: - self.app.get(self.PATH_PREFIX + '/' + endpoint, - status=200) - - response = self.post_json('/query/samples', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=410) - self.assertIn('Gnocchi API', response.body) - - def _do_test_alarm_redirect(self): - response = self.app.get(self.PATH_PREFIX + '/alarms', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms", - response.headers['Location']) - - response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.post_json('/query/alarms', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=307) - self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", - response.headers['Location']) - - def test_gnocchi_enabled_without_database_backend_keystone(self): - self._setup_keystone_mock() - self._do_test_gnocchi_enabled_without_database_backend() - self.assertEqual([mock.call(service_type="alarming"), - mock.call(service_type="metric")], - sorted(self.ks.service_catalog.url_for.mock_calls)) - - def test_gnocchi_enabled_without_database_backend_configoptions(self): - self._setup_osloconfig_options() - self._do_test_gnocchi_enabled_without_database_backend() - - def test_alarm_redirect_keystone(self): - self._setup_keystone_mock() - self._do_test_alarm_redirect() - self.assertEqual([mock.call(service_type="alarming")], - self.ks.service_catalog.url_for.mock_calls) - - def test_alarm_redirect_configoptions(self): - self._setup_osloconfig_options() - self._do_test_alarm_redirect() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_app.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_app.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_app.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_app.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,182 +0,0 @@ -# -# Copyright 2013 IBM Corp. -# Copyright 2013 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test basic ceilometer-api app -""" -import json - -import mock -import six -import wsme - -from ceilometer import i18n -from ceilometer.tests.api import v2 - - -class TestPecanApp(v2.FunctionalTest): - - def test_pecan_extension_guessing_unset(self): - # check Pecan does not assume .jpg is an extension - response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') - self.assertEqual('application/json', response.content_type) - - -class TestApiMiddleware(v2.FunctionalTest): - - no_lang_translated_error = 'No lang translated error' - en_US_translated_error = 'en-US translated error' - - def _fake_translate(self, message, user_locale): - if user_locale is None: - return self.no_lang_translated_error - else: - return self.en_US_translated_error - - def test_json_parsable_error_middleware_404(self): - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json,application/xml"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/xml;q=0.8, \ - application/json"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "text/html,*/*"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - - def test_json_parsable_error_middleware_translation_400(self): - # Ensure translated messages get placed properly into json faults - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/json"} - ) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - self.assertEqual(self.no_lang_translated_error, - response.json['error_message']['faultstring']) - - def test_xml_parsable_error_middleware_404(self): - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/xml,*/*"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json;q=0.8 \ - ,application/xml"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - - def test_xml_parsable_error_middleware_translation_400(self): - # Ensure translated messages get placed properly into xml faults - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/xml,*/*"} - ) - self.assertEqual(400, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - fault = response.xml.findall('./error/faultstring') - for fault_string in fault: - self.assertEqual(self.no_lang_translated_error, fault_string.text) - - def test_best_match_language(self): - # Ensure that we are actually invoking language negotiation - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/xml,*/*", - "Accept-Language": - "en-US"} - ) - - self.assertEqual(400, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - fault = response.xml.findall('./error/faultstring') - for fault_string in fault: - self.assertEqual(self.en_US_translated_error, fault_string.text) - - def test_translated_then_untranslated_error(self): - resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) - self.assertEqual(404, resp.status_code) - body = resp.body - if six.PY3: - body = body.decode('utf-8') - self.assertEqual("Alarm alarm-id-3 not found", - json.loads(body)['error_message'] - ['faultstring']) - - with mock.patch('ceilometer.api.controllers.' - 'v2.base.AlarmNotFound') as CustomErrorClass: - CustomErrorClass.return_value = wsme.exc.ClientSideError( - "untranslated_error", status_code=404) - resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) - - self.assertEqual(404, resp.status_code) - body = resp.body - if six.PY3: - body = body.decode('utf-8') - self.assertEqual("untranslated_error", - json.loads(body)['error_message'] - ['faultstring']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_capabilities.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_capabilities.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_capabilities.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_capabilities.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# -# Copyright Ericsson AB 2014. All rights reserved -# -# Authors: Ildiko Vancsa -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testscenarios - -from ceilometer.tests.api import v2 as tests_api -from ceilometer.tests import db as tests_db - -load_tests = testscenarios.load_tests_apply_scenarios - - -class TestCapabilitiesController(tests_api.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestCapabilitiesController, self).setUp() - self.url = '/capabilities' - - def test_capabilities(self): - data = self.get_json(self.url) - # check that capabilities data contains both 'api' and 'storage' fields - self.assertIsNotNone(data) - self.assertNotEqual({}, data) - self.assertIn('api', data) - self.assertIn('storage', data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_complex_query.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_complex_query.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_complex_query.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_complex_query.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,402 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to complex query.""" -import datetime - -import fixtures -import jsonschema -import mock -from oslotest import base -import wsme - -from ceilometer.alarm.storage import models as alarm_models -from ceilometer.api.controllers.v2 import query -from ceilometer.storage import models - - -class FakeComplexQuery(query.ValidatedComplexQuery): - def __init__(self, db_model, additional_name_mapping=None, metadata=False): - super(FakeComplexQuery, self).__init__(query=None, - db_model=db_model, - additional_name_mapping=( - additional_name_mapping or - {}), - metadata_allowed=metadata) - - -sample_name_mapping = {"resource": "resource_id", - "meter": "counter_name", - "type": "counter_type", - "unit": "counter_unit", - "volume": "counter_volume"} - - -class TestComplexQuery(base.BaseTestCase): - def setUp(self): - super(TestComplexQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - self.query_alarm = FakeComplexQuery(alarm_models.Alarm) - self.query_alarmchange = FakeComplexQuery( - alarm_models.AlarmChange) - - def test_replace_isotime_utc(self): - filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_timezone_removed(self): - filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_wrong_syntax(self): - filter_expr = {"=": {"timestamp": "not a valid isotime string"}} - self.assertRaises(wsme.exc.ClientSideError, - self.query._replace_isotime_with_datetime, - filter_expr) - - def test_replace_isotime_in_complex_filter(self): - filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["and"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["and"][1]["="]["timestamp"]) - - def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): - subfilter = {"and": [{"=": {"project_id": 42}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - - filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - subfilter]} - - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["or"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["or"][1]["and"][1]["="]["timestamp"]) - - def test_convert_operator_to_lower_case(self): - filter_expr = {"AND": [{"=": {"project_id": 42}}, - {"=": {"project_id": 44}}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("and", list(filter_expr.keys())[0]) - - filter_expr = {"Or": [{"=": {"project_id": 43}}, - {"anD": [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("or", list(filter_expr.keys())[0]) - self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) - - def test_invalid_filter_misstyped_field_name_samples(self): - filter = {"=": {"project_id11": 42}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_filter_misstyped_field_name_alarms(self): - filter = {"=": {"enabbled": True}} - self.assertRaises(jsonschema.ValidationError, - self.query_alarm._validate_filter, - filter) - - def test_invalid_filter_misstyped_field_name_alarmchange(self): - filter = {"=": {"tpe": "rule change"}} - self.assertRaises(jsonschema.ValidationError, - self.query_alarmchange._validate_filter, - filter) - - def test_invalid_complex_filter_wrong_field_names(self): - filter = {"and": - [{"=": {"non_existing_field": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"and": - [{"=": {"project_id": 42}}, - {"=": {"non_existing_field": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query_alarm._validate_filter, - filter) - - filter = {"and": - [{"=": {"project_id11": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query_alarmchange._validate_filter, - filter) - - filter = {"or": - [{"=": {"non_existing_field": 42}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"or": - [{"=": {"project_id": 43}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"non_existing_field": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query_alarm._validate_filter, - filter) - - def test_convert_orderby(self): - orderby = [] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([], orderby) - - orderby = [{"project_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "desc"}], orderby) - - orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], - orderby) - - def test_validate_orderby_empty_direction(self): - orderby = [{"project_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"resource_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_order_string(self): - orderby = [{"project_id": "not a valid order"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_multiple_item_order_string(self): - orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_empty_field_name(self): - orderby = [{"": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"": "desc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name(self): - orderby = [{"project_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): - orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_metadata_is_not_allowed(self): - orderby = [{"metadata.display_name": "asc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - -class TestFilterSyntaxValidation(base.BaseTestCase): - def setUp(self): - super(TestFilterSyntaxValidation, self).setUp() - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - - def test_simple_operator(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=>": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - def test_valid_value_types(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 42}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 3.14}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": True}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": False}} - self.query._validate_filter(filter) - - def test_invalid_simple_operator(self): - filter = {"==": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_operator_is_invalid(self): - filter = {"=": {"project_id": "string_value"}, - "<": {"": ""}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_expression_is_invalid(self): - filter = {} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_field_name(self): - filter = {"=": {"": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {" ": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {"\t": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_field_is_invalid(self): - filter = {"=": {"project_id": "value", "resource_id": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_missing_field_after_simple_op_is_invalid(self): - filter = {"=": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - def test_complex_operator_with_in(self): - filter = {"and": [{"<": {"counter_volume": 42}}, - {">=": {"counter_volume": 36}}, - {"in": {"project_id": ["project_id1", - "project_id2", - "project_id3"]}}]} - self.query._validate_filter(filter) - - def test_invalid_complex_operator(self): - filter = {"xor": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or_with_one_child_is_invalid(self): - filter = {"or": [{"=": {"project_id": "string_value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_complex_operator_with_zero_child_is_invalid(self): - filter = {"or": []} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_complex_operator_is_invalid(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}], - "or": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not(self): - filter = {"not": {"=": {"project_id": "value"}}} - self.query._validate_filter(filter) - - filter = { - "not": - {"or": - [{"and": - [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]}} - self.query._validate_filter(filter) - - def test_not_with_zero_child_is_invalid(self): - filter = {"not": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not_with_more_than_one_child_is_invalid(self): - filter = {"not": {"=": {"project_id": "value"}, - "!=": {"resource_id": "value"}}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_in_query_not_passing(self): - filter = {"in": {"resource_id": []}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_complex_query_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_complex_query_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_complex_query_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_complex_query_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,600 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests complex queries for samples -""" - -import datetime - -from oslo_utils import timeutils - -from ceilometer.alarm.storage import models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.api import v2 as tests_api -from ceilometer.tests import db as tests_db - - -admin_header = {"X-Roles": "admin", - "X-Project-Id": - "project-id1"} -non_admin_header = {"X-Roles": "Member", - "X-Project-Id": - "project-id1"} - - -class TestQueryMetersController(tests_api.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestQueryMetersController, self).setUp() - self.url = '/query/samples' - - for cnt in [ - sample.Sample('meter.test', - 'cumulative', - '', - 1, - 'user-id1', - 'project-id1', - 'resource-id1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server1', - 'tag': 'self.sample', - 'size': 456, - 'util': 0.25, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 2, - 'user-id2', - 'project-id2', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server2', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 3, - 'user-id3', - 'project-id3', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server3', - 'tag': 'self.sample', - 'size': 789, - 'util': 0.95, - 'is_public': True}, - source='test_source')]: - - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - def test_query_fields_are_optional(self): - data = self.post_json(self.url, params={}) - self.assertEqual(3, len(data.json)) - - def test_query_with_isotime(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - - data = self.post_json(self.url, - params={"filter": - '{">=": {"timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - result_time = timeutils.parse_isotime(sample_item['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time >= date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.url, - params={}, - headers=non_admin_header) - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id1"}}'}, - headers=non_admin_header) - - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.url, - params={}, - headers=admin_header) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2", "project-id3"])) - - def test_admin_tenant_sees_every_project_with_complex_filter(self): - filter = ('{"OR": ' + - '[{"=": {"project_id": "project-id1"}}, ' + - '{"=": {"project_id": "project-id2"}}]}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_sees_every_project_with_in_filter(self): - filter = ('{"In": ' + - '{"project_id": ["project-id1", "project-id2"]}}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_can_query_any_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - headers=admin_header) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DESC"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_field_name_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project": "project-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_field_name_resource(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"resource": "resource-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['resource_id'], set(["resource-id2"])) - - def test_query_with_wrong_field_name(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"unknown": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"is not valid under any of the given schemas", - data.body) - - def test_query_with_wrong_json(self): - data = self.post_json(self.url, - params={"filter": - '{"=": "resource": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Filter expression not valid", data.body) - - def test_query_with_field_name_user(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"user": "user-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['user_id'], set(["user-id2"])) - - def test_query_with_field_name_meter(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"meter": "meter.test"}}'}) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['meter'], set(["meter.test"])) - - def test_query_with_lower_and_upper_case_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DeSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_user_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"user": "aSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["user-id1", "user-id2", "user-id3"], - [s["user_id"] for s in data.json]) - - def test_query_with_volume_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"volume": "deSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual([3, 2, 1], - [s["volume"] for s in data.json]) - - def test_query_with_missing_order_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": ""}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) - - def test_query_with_wrong_json_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '{"project_id": "desc"}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Order-by expression not valid: Extra data", data.body) - - def test_filter_with_metadata(self): - data = self.post_json(self.url, - params={"filter": - '{">=": {"metadata.util": 0.5}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) - - def test_filter_with_negation(self): - filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' - data = self.post_json(self.url, - params={"filter": filter_expr}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) - - def test_limit_should_be_positive(self): - data = self.post_json(self.url, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit should be positive", data.body) - - -class TestQueryAlarmsController(tests_api.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestQueryAlarmsController, self).setUp() - self.alarm_url = '/query/alarms' - - for state in ['ok', 'alarm', 'insufficient data']: - for date in [datetime.datetime(2013, 1, 1), - datetime.datetime(2013, 2, 2)]: - for id in [1, 2]: - alarm_id = "-".join([state, date.isoformat(), str(id)]) - project_id = "project-id%d" % id - alarm = models.Alarm(name=alarm_id, - type='threshold', - enabled=True, - alarm_id=alarm_id, - description='a', - state=state, - state_timestamp=date, - timestamp=date, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id="user-id%d" % id, - project_id=project_id, - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': - 'project_id', - 'op': 'eq', - 'value': - project_id}]), - severity='critical') - self.alarm_conn.update_alarm(alarm) - - def test_query_all(self): - data = self.post_json(self.alarm_url, - params={}) - - self.assertEqual(12, len(data.json)) - - def test_filter_with_isotime_timestamp(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.alarm_url, - params={"filter": - '{">": {"timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - result_time = timeutils.parse_isotime(alarm['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time > date_time) - - def test_filter_with_isotime_state_timestamp(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.alarm_url, - params={"filter": - '{">": {"state_timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - result_time = timeutils.parse_isotime(alarm['state_timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time > date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.alarm_url, - params={}, - headers=non_admin_header) - for alarm in data.json: - self.assertEqual("project-id1", alarm['project_id']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id1"}}'}, - headers=non_admin_header) - - for alarm in data.json: - self.assertEqual("project-id1", alarm['project_id']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.alarm_url, - params={}, - headers=admin_header) - - self.assertEqual(12, len(data.json)) - for alarm in data.json: - self.assertIn(alarm['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_can_query_any_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - headers=admin_header) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - self.assertIn(alarm['project_id'], set(["project-id2"])) - - def test_query_with_field_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project": "project-id2"}}'}) - - self.assertEqual(6, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_field_user_in_orderby(self): - data = self.post_json(self.alarm_url, - params={"filter": '{"=": {"state": "alarm"}}', - "orderby": '[{"user": "DESC"}]'}) - - self.assertEqual(4, len(data.json)) - self.assertEqual(["user-id2", "user-id2", "user-id1", "user-id1"], - [s["user_id"] for s in data.json]) - - def test_query_with_filter_orderby_and_limit(self): - orderby = '[{"state_timestamp": "DESC"}]' - data = self.post_json(self.alarm_url, - params={"filter": '{"=": {"state": "alarm"}}', - "orderby": orderby, - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["2013-02-02T00:00:00", - "2013-02-02T00:00:00", - "2013-01-01T00:00:00"], - [a["state_timestamp"] for a in data.json]) - for alarm in data.json: - self.assertEqual("alarm", alarm["state"]) - - def test_limit_should_be_positive(self): - data = self.post_json(self.alarm_url, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit should be positive", data.body) - - -class TestQueryAlarmsHistoryController( - tests_api.FunctionalTest, tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestQueryAlarmsHistoryController, self).setUp() - self.url = '/query/alarms/history' - for id in [1, 2]: - for type in ["creation", "state transition"]: - for date in [datetime.datetime(2013, 1, 1), - datetime.datetime(2013, 2, 2)]: - event_id = "-".join([str(id), type, date.isoformat()]) - alarm_change = {"event_id": event_id, - "alarm_id": "alarm-id%d" % id, - "type": type, - "detail": "", - "user_id": "user-id%d" % id, - "project_id": "project-id%d" % id, - "on_behalf_of": "project-id%d" % id, - "timestamp": date} - - self.alarm_conn.record_alarm_change(alarm_change) - - def test_query_all(self): - data = self.post_json(self.url, - params={}) - - self.assertEqual(8, len(data.json)) - - def test_filter_with_isotime(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.url, - params={"filter": - '{">": {"timestamp":"' - + isotime + '"}}'}) - - self.assertEqual(4, len(data.json)) - for history in data.json: - result_time = timeutils.parse_isotime(history['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time > date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.url, - params={}, - headers=non_admin_header) - for history in data.json: - self.assertEqual("project-id1", history['on_behalf_of']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"on_behalf_of":' - + ' "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"on_behalf_of":' - + ' "project-id1"}}'}, - headers=non_admin_header) - - for history in data.json: - self.assertEqual("project-id1", history['on_behalf_of']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.url, - params={}, - headers=admin_header) - - self.assertEqual(8, len(data.json)) - for history in data.json: - self.assertIn(history['on_behalf_of'], - (["project-id1", "project-id2"])) - - def test_query_with_filter_for_project_orderby_with_user(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project": "project-id1"}}', - "orderby": '[{"user": "DESC"}]', - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["user-id1", - "user-id1", - "user-id1"], - [h["user_id"] for h in data.json]) - for history in data.json: - self.assertEqual("project-id1", history['project_id']) - - def test_query_with_filter_orderby_and_limit(self): - data = self.post_json(self.url, - params={"filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["2013-02-02T00:00:00", - "2013-02-02T00:00:00", - "2013-01-01T00:00:00"], - [h["timestamp"] for h in data.json]) - for history in data.json: - self.assertEqual("creation", history['type']) - - def test_limit_should_be_positive(self): - data = self.post_json(self.url, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit should be positive", data.body) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_compute_duration_by_resource_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_compute_duration_by_resource_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_compute_duration_by_resource_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_compute_duration_by_resource_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import datetime - -import mock -from oslo_utils import timeutils - -from ceilometer.storage import models -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - - -class TestComputeDurationByResource(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestComputeDurationByResource, self).setUp() - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def _patch_get_interval(self, start, end): - def get_interval(sample_filter, period, groupby, aggregate): - self.assertIsNotNone(sample_filter.start_timestamp) - self.assertIsNotNone(sample_filter.end_timestamp) - if (sample_filter.start_timestamp > end or - sample_filter.end_timestamp < start): - return [] - duration_start = max(sample_filter.start_timestamp, start) - duration_end = min(sample_filter.end_timestamp, end) - duration = timeutils.delta_seconds(duration_start, duration_end) - return [ - models.Statistics( - unit='', - min=0, - max=0, - avg=0, - sum=0, - count=0, - period=None, - period_start=None, - period_end=None, - duration=duration, - duration_start=duration_start, - duration_end=duration_end, - groupby=None, - ) - ] - return mock.patch.object(type(self.conn), 'get_meter_statistics', - side_effect=get_interval) - - def _invoke_api(self): - return self.get_json('/meters/instance:m1.tiny/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.start.isoformat()}, - {'field': 'timestamp', - 'op': 'le', - 'value': self.end.isoformat()}, - {'field': 'search_offset', - 'value': 10}]) - - def test_before_range(self): - with self._patch_get_interval(self.early1, self.early2): - data = self._invoke_api() - self.assertEqual([], data) - - def _assert_times_match(self, actual, expected): - if actual: - actual = timeutils.parse_isotime(actual) - actual = actual.replace(tzinfo=None) - self.assertEqual(expected, actual) - - def test_overlap_range_start(self): - with self._patch_get_interval(self.early1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.start) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(8 * 60 * 60, data[0]['duration']) - - def test_within_range(self): - with self._patch_get_interval(self.middle1, self.middle2): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle2) - self.assertEqual(10 * 60 * 60, data[0]['duration']) - - def test_within_range_zero_duration(self): - with self._patch_get_interval(self.middle1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(0, data[0]['duration']) - - def test_overlap_range_end(self): - with self._patch_get_interval(self.middle2, self.late1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle2) - self._assert_times_match(data[0]['duration_end'], self.end) - self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) - - def test_after_range(self): - with self._patch_get_interval(self.late1, self.late2): - data = self._invoke_api() - self.assertEqual([], data) - - def test_without_end_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.late1, - duration_end=self.late2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance:m1.tiny/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.late1.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.late1) - self._assert_times_match(data[0]['duration_end'], self.late2) - - def test_without_start_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.early1, - duration_end=self.early2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance:m1.tiny/statistics', - q=[{'field': 'timestamp', - 'op': 'le', - 'value': self.early2.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.early1) - self._assert_times_match(data[0]['duration_end'], self.early2) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_event_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_event_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_event_scenarios.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_event_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,507 +0,0 @@ -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test event, event_type and trait retrieval.""" - -import datetime -import uuid - -import webtest.app - -from ceilometer.event.storage import models -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - -headers = {"X-Roles": "admin"} - - -class EventTestBase(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(EventTestBase, self).setUp() - self._generate_models() - - def _generate_models(self): - event_models = [] - base = 0 - self.s_time = datetime.datetime(2013, 12, 31, 5, 0) - self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) - for event_type in ['Foo', 'Bar', 'Zoo']: - trait_models = [models.Trait(name, type, value) - for name, type, value in [ - ('trait_A', models.Trait.TEXT_TYPE, - "my_%s_text" % event_type), - ('trait_B', models.Trait.INT_TYPE, - base + 1), - ('trait_C', models.Trait.FLOAT_TYPE, - float(base) + 0.123456), - ('trait_D', models.Trait.DATETIME_TYPE, - self.trait_time)]] - - # Message ID for test will be 'base'. So, message ID for the first - # event will be '0', the second '100', and so on. - # trait_time in first event will be equal to self.trait_time - # (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so - # second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on. - event_models.append( - models.Event(message_id=str(base), - event_type=event_type, - generated=self.trait_time, - traits=trait_models, - raw={'status': {'nested': 'started'}})) - base += 100 - self.trait_time += datetime.timedelta(days=1) - self.event_conn.record_events(event_models) - - -class TestEventTypeAPI(EventTestBase): - - PATH = '/event_types' - - def test_event_types(self): - data = self.get_json(self.PATH, headers=headers) - for event_type in ['Foo', 'Bar', 'Zoo']: - self.assertIn(event_type, data) - - -class TestTraitAPI(EventTestBase): - - PATH = '/event_types/%s/traits' - - def test_get_traits_for_event(self): - path = self.PATH % "Foo" - data = self.get_json(path, headers=headers) - - self.assertEqual(4, len(data)) - - def test_get_event_invalid_path(self): - data = self.get_json('/event_types/trait_A/', headers=headers, - expect_errors=True) - self.assertEqual(404, data.status_int) - - def test_get_traits_for_non_existent_event(self): - path = self.PATH % "NO_SUCH_EVENT_TYPE" - data = self.get_json(path, headers=headers) - - self.assertEqual([], data) - - def test_get_trait_data_for_event(self): - path = (self.PATH % "Foo") + "/trait_A" - data = self.get_json(path, headers=headers) - self.assertEqual(1, len(data)) - self.assertEqual("trait_A", data[0]['name']) - - path = (self.PATH % "Foo") + "/trait_B" - data = self.get_json(path, headers=headers) - self.assertEqual(1, len(data)) - self.assertEqual("trait_B", data[0]['name']) - self.assertEqual("1", data[0]['value']) - - path = (self.PATH % "Foo") + "/trait_D" - data = self.get_json(path, headers=headers) - self.assertEqual(1, len(data)) - self.assertEqual("trait_D", data[0]['name']) - self.assertEqual((self.trait_time - datetime.timedelta(days=3)). - isoformat(), data[0]['value']) - - def test_get_trait_data_for_non_existent_event(self): - path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A" - data = self.get_json(path, headers=headers) - - self.assertEqual([], data) - - def test_get_trait_data_for_non_existent_trait(self): - path = (self.PATH % "Foo") + "/no_such_trait" - data = self.get_json(path, headers=headers) - - self.assertEqual([], data) - - -class TestEventAPI(EventTestBase): - - PATH = '/events' - - def test_get_events(self): - data = self.get_json(self.PATH, headers=headers) - self.assertEqual(3, len(data)) - # We expect to get native UTC generated time back - trait_time = self.s_time - for event in data: - expected_generated = trait_time.isoformat() - self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo']) - self.assertEqual(4, len(event['traits'])) - self.assertEqual({'status': {'nested': 'started'}}, event['raw']), - self.assertEqual(expected_generated, event['generated']) - for trait_name in ['trait_A', 'trait_B', - 'trait_C', 'trait_D']: - self.assertIn(trait_name, map(lambda x: x['name'], - event['traits'])) - trait_time += datetime.timedelta(days=1) - - def test_get_event_by_message_id(self): - event = self.get_json(self.PATH + "/100", headers=headers) - expected_traits = [{'name': 'trait_A', - 'type': 'string', - 'value': 'my_Bar_text'}, - {'name': 'trait_B', - 'type': 'integer', - 'value': '101'}, - {'name': 'trait_C', - 'type': 'float', - 'value': '100.123456'}, - {'name': 'trait_D', - 'type': 'datetime', - 'value': '2014-01-01T05:00:00'}] - self.assertEqual('100', event['message_id']) - self.assertEqual('Bar', event['event_type']) - self.assertEqual('2014-01-01T05:00:00', event['generated']) - self.assertEqual(expected_traits, event['traits']) - - def test_get_event_by_message_id_no_such_id(self): - data = self.get_json(self.PATH + "/DNE", headers=headers, - expect_errors=True) - self.assertEqual(404, data.status_int) - - def test_get_events_filter_event_type(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'event_type', - 'value': 'Foo'}]) - self.assertEqual(1, len(data)) - - def test_get_events_filter_trait_no_type(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text'}]) - self.assertEqual(1, len(data)) - self.assertEqual('Foo', data[0]['event_type']) - - def test_get_events_filter_trait_empty_type(self): - return - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': ''}]) - self.assertEqual(1, len(data)) - self.assertEqual('Foo', data[0]['event_type']) - - def test_get_events_filter_trait_invalid_type(self): - resp = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'whats-up'}], - expect_errors=True) - self.assertEqual(400, resp.status_code) - self.assertEqual("The data type whats-up is not supported. The " - "supported data type list is: [\'integer\', " - "\'float\', \'string\', \'datetime\']", - resp.json['error_message']['faultstring']) - - def test_get_events_filter_operator_invalid_type(self): - resp = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'op': 'whats-up'}], - expect_errors=True) - self.assertEqual(400, resp.status_code) - self.assertEqual("operator whats-up is not supported. the " - "supported operators are: (\'lt\', \'le\', " - "\'eq\', \'ne\', \'ge\', \'gt\')", - resp.json['error_message']['faultstring']) - - def test_get_events_filter_text_trait(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'string'}]) - self.assertEqual(1, len(data)) - self.assertEqual('Foo', data[0]['event_type']) - - def test_get_events_filter_int_trait(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '101', - 'type': 'integer'}]) - self.assertEqual(1, len(data)) - self.assertEqual('Bar', data[0]['event_type']) - - traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B'] - self.assertEqual(1, len(traits)) - self.assertEqual('integer', traits[0]['type']) - self.assertEqual('101', traits[0]['value']) - - def test_get_events_filter_float_trait(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '200.123456', - 'type': 'float'}]) - self.assertEqual(1, len(data)) - self.assertEqual('Zoo', data[0]['event_type']) - - traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C'] - self.assertEqual(1, len(traits)) - self.assertEqual('float', traits[0]['type']) - self.assertEqual('200.123456', traits[0]['value']) - - def test_get_events_filter_datetime_trait(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2014-01-01T05:00:00', - 'type': 'datetime'}]) - self.assertEqual(1, len(data)) - traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D'] - self.assertEqual(1, len(traits)) - self.assertEqual('datetime', traits[0]['type']) - self.assertEqual('2014-01-01T05:00:00', traits[0]['value']) - - def test_get_events_multiple_filters(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '1', - 'type': 'integer'}, - {'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'string'}]) - self.assertEqual(1, len(data)) - self.assertEqual('Foo', data[0]['event_type']) - - def test_get_events_multiple_filters_no_matches(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '101', - 'type': 'integer'}, - {'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'string'}]) - - self.assertEqual(0, len(data)) - - def test_get_events_not_filters(self): - data = self.get_json(self.PATH, headers=headers, - q=[]) - self.assertEqual(3, len(data)) - - def test_get_events_filter_op_string(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'string', - 'op': 'eq'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Bar_text', - 'type': 'string', - 'op': 'lt'}]) - self.assertEqual(0, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Zoo_text', - 'type': 'string', - 'op': 'le'}]) - self.assertEqual(3, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Foo_text', - 'type': 'string', - 'op': 'ne'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Bar_text', - 'type': 'string', - 'op': 'gt'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_A', - 'value': 'my_Zoo_text', - 'type': 'string', - 'op': 'ge'}]) - self.assertEqual(1, len(data)) - - def test_get_events_filter_op_integer(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '101', - 'type': 'integer', - 'op': 'eq'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '201', - 'type': 'integer', - 'op': 'lt'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '1', - 'type': 'integer', - 'op': 'le'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '101', - 'type': 'integer', - 'op': 'ne'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '201', - 'type': 'integer', - 'op': 'gt'}]) - self.assertEqual(0, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '1', - 'type': 'integer', - 'op': 'ge'}]) - self.assertEqual(3, len(data)) - - def test_get_events_filter_op_float(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '100.123456', - 'type': 'float', - 'op': 'eq'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '200.123456', - 'type': 'float', - 'op': 'lt'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '0.123456', - 'type': 'float', - 'op': 'le'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '100.123456', - 'type': 'float', - 'op': 'ne'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '200.123456', - 'type': 'float', - 'op': 'gt'}]) - self.assertEqual(0, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_C', - 'value': '0.123456', - 'type': 'float', - 'op': 'ge'}]) - self.assertEqual(3, len(data)) - - def test_get_events_filter_op_datatime(self): - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2014-01-01T05:00:00', - 'type': 'datetime', - 'op': 'eq'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2014-01-02T05:00:00', - 'type': 'datetime', - 'op': 'lt'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2013-12-31T05:00:00', - 'type': 'datetime', - 'op': 'le'}]) - self.assertEqual(1, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2014-01-01T05:00:00', - 'type': 'datetime', - 'op': 'ne'}]) - self.assertEqual(2, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2014-01-02T05:00:00', - 'type': 'datetime', - 'op': 'gt'}]) - self.assertEqual(0, len(data)) - data = self.get_json(self.PATH, headers=headers, - q=[{'field': 'trait_D', - 'value': '2013-12-31T05:00:00', - 'type': 'datetime', - 'op': 'ge'}]) - self.assertEqual(3, len(data)) - - def test_get_events_filter_wrong_op(self): - self.assertRaises(webtest.app.AppError, - self.get_json, self.PATH, headers=headers, - q=[{'field': 'trait_B', - 'value': '1', - 'type': 'integer', - 'op': 'el'}]) - - -class EventRestrictionTestBase(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(EventRestrictionTestBase, self).setUp() - self.CONF.set_override('default_api_return_limit', 10, group='api') - self._generate_models() - - def _generate_models(self): - event_models = [] - base = 0 - self.s_time = datetime.datetime(2013, 12, 31, 5, 0) - self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) - for i in range(20): - trait_models = [models.Trait(name, type, value) - for name, type, value in [ - ('trait_A', models.Trait.TEXT_TYPE, - "my_text"), - ('trait_B', models.Trait.INT_TYPE, - base + 1), - ('trait_C', models.Trait.FLOAT_TYPE, - float(base) + 0.123456), - ('trait_D', models.Trait.DATETIME_TYPE, - self.trait_time)]] - - event_models.append( - models.Event(message_id=str(uuid.uuid4()), - event_type='foo.bar', - generated=self.trait_time, - traits=trait_models, - raw={'status': {'nested': 'started'}})) - self.trait_time += datetime.timedelta(seconds=1) - self.event_conn.record_events(event_models) - - -class TestEventRestriction(EventRestrictionTestBase): - - def test_get_limit(self): - data = self.get_json('/events?limit=1', headers=headers) - self.assertEqual(1, len(data)) - - def test_get_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, '/events?limit=-2', headers=headers) - - def test_get_limit_bigger(self): - data = self.get_json('/events?limit=100', headers=headers) - self.assertEqual(20, len(data)) - - def test_get_default_limit(self): - data = self.get_json('/events', headers=headers) - self.assertEqual(10, len(data)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_list_events_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_list_events_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_list_events_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_list_events_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import datetime - -import mock -from oslo_utils import timeutils -import six - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - - -class TestListEvents(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestListEvents, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42) - self.sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key': 'value'}, - 'not_ignored_list': ['returned'], - }, - source='test_source', - ) - msg = utils.meter_message_from_counter( - self.sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - self.sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='source2', - ) - msg2 = utils.meter_message_from_counter( - self.sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - def test_all(self): - data = self.get_json('/meters/instance') - self.assertEqual(2, len(data)) - for s in data: - self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at']) - - def test_all_trailing_slash(self): - data = self.get_json('/meters/instance/') - self.assertEqual(2, len(data)) - - def test_empty_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'no-such-project', - }]) - self.assertEqual([], data) - - def test_by_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(1, len(data)) - - def test_empty_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'no-such-resource', - }]) - self.assertEqual([], data) - - def test_by_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(1, len(data)) - - def test_empty_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'no-such-source', - }]) - self.assertEqual(0, len(data)) - - def test_by_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'test_source', - }]) - self.assertEqual(1, len(data)) - - def test_empty_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'no-such-user', - }]) - self.assertEqual([], data) - - def test_by_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'user-id', - }]) - self.assertEqual(1, len(data)) - - def test_metadata(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - sample = data[0] - self.assertIn('resource_metadata', sample) - self.assertEqual( - [('dict_properties.key', 'value'), - ('display_name', 'test-server'), - ('not_ignored_list', "['returned']"), - ('tag', 'self.sample'), - ], - list(sorted(six.iteritems(sample['resource_metadata'])))) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_list_meters_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_list_meters_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_list_meters_scenarios.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_list_meters_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,791 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing meters. -""" - -import base64 -import datetime - -from oslo_serialization import jsonutils -import six -import webtest.app - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - - -class TestListEmptyMeters(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def test_empty(self): - data = self.get_json('/meters') - self.assertEqual([], data) - - -class TestValidateUserInput(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def test_list_meters_query_float_metadata(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '0.7.5', - 'type': 'float'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': 'abacaba', - 'type': 'boolean'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '45.765', - 'type': 'integer'}]) - - -class TestListMetersRestriction(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestListMetersRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 3, group='api') - for x in range(5): - for i in range(5): - s = sample.Sample( - 'volume.size%s' % x, - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_meter_limit(self): - data = self.get_json('/meters?limit=1') - self.assertEqual(1, len(data)) - - def test_meter_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters?limit=-2') - - def test_meter_limit_bigger(self): - data = self.get_json('/meters?limit=42') - self.assertEqual(5, len(data)) - - def test_meter_default_limit(self): - data = self.get_json('/meters') - self.assertEqual(3, len(data)) - - def test_old_sample_limit(self): - data = self.get_json('/meters/volume.size0?limit=1') - self.assertEqual(1, len(data)) - - def test_old_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters/volume.size0?limit=-2') - - def test_old_sample_limit_bigger(self): - data = self.get_json('/meters/volume.size0?limit=42') - self.assertEqual(5, len(data)) - - def test_old_sample_default_limit(self): - data = self.get_json('/meters/volume.size0') - self.assertEqual(3, len(data)) - - def test_sample_limit(self): - data = self.get_json('/samples?limit=1') - self.assertEqual(1, len(data)) - - def test_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/samples?limit=-2') - - def test_sample_limit_bigger(self): - data = self.get_json('/samples?limit=42') - self.assertEqual(25, len(data)) - - def test_sample_default_limit(self): - data = self.get_json('/samples') - self.assertEqual(3, len(data)) - - -class TestListMeters(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestListMeters, self).setUp() - self.messages = [] - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 3, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 11, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - 'size': 0, - 'util': 0.47, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id', - 'project-id', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - 'size': 456, - 'util': 0.64, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test.new', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4', - 'properties': { - 'prop_1': 'prop_value', - 'prop_2': {'sub_prop_1': - 'sub_prop_value'}, - 'prop.3': {'$sub_prop.2': - 'sub_prop_value2'} - }, - 'size': 0, - 'util': 0.58, - 'is_public': True}, - source='test_source1'), - sample.Sample( - u'meter.accent\xe9\u0437', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2014, 7, 2, 10, 43), - resource_metadata={}, - source='test_source1')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.messages.append(msg) - self.conn.record_metering_data(msg) - - def test_list_meters(self): - data = self.get_json('/meters') - self.assertEqual(6, len(data)) - self.assertEqual(set(['resource-id', - 'resource-id2', - 'resource-id3', - 'resource-id4']), - set(r['resource_id'] for r in data)) - self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', - u'meter.accent\xe9\u0437']), - set(r['name'] for r in data)) - self.assertEqual(set(['test_source', 'test_source1']), - set(r['source'] for r in data)) - - def test_meters_query_with_timestamp(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - resp = self.get_json('/meters', - q=[{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - expect_errors=True) - self.assertEqual(400, resp.status_code) - self.assertEqual('Unknown argument: "timestamp": ' - 'not valid for this resource', - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_list_samples(self): - data = self.get_json('/samples') - self.assertEqual(7, len(data)) - - def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): - resp = self.get_json('/samples', - q=[{'field': 'non_valid_field_name', - 'op': 'gt', - 'value': 3}], - expect_errors=True) - resp_string = jsonutils.loads(resp.body) - fault_string = resp_string['error_message']['faultstring'] - msg = ('Unknown argument: "non_valid_field_name"' - ': unrecognized field in query: ' - '[= res['first_sample_timestamp']) - self.assertIn('last_sample_timestamp', res) - self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) - - def test_instance_no_metadata(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata=None, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], timestamp, timestamp) - - def test_instances(self): - timestamps = { - 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), - 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), - } - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamps['resource-id'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=timestamps['resource-id-alternate'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources') - self.assertEqual(2, len(data)) - for res in data: - timestamp = timestamps.get(res['resource_id']) - self._verify_resource_timestamps(res, timestamp, timestamp) - - def test_instance_multiple_samples(self): - timestamps = [ - datetime.datetime(2012, 7, 2, 10, 41), - datetime.datetime(2012, 7, 2, 10, 42), - datetime.datetime(2012, 7, 2, 10, 40), - ] - for timestamp in timestamps: - datapoint = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample-%s' % timestamp, - }, - source='test', - ) - msg = utils.meter_message_from_counter( - datapoint, - self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], - timestamps[-1], timestamps[1]) - - def test_instances_one(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources/resource-id') - self.assertEqual('resource-id', data['resource_id']) - - def test_with_source(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'source', - 'value': 'test_list_resources', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - sources = [r['source'] for r in data] - self.assertEqual(['test_list_resources'], sources) - - def test_with_invalid_resource_id(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test_list_resources', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - resp1 = self.get_json('/resources/resource-id-1') - self.assertEqual("resource-id-1", resp1["resource_id"]) - - resp2 = self.get_json('/resources/resource-id-2') - self.assertEqual("resource-id-2", resp2["resource_id"]) - - resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) - self.assertEqual(404, resp3.status_code) - json_data = resp3.body - if six.PY3: - json_data = json_data.decode('utf-8') - self.assertEqual("Resource resource-id-3 Not Found", - json.loads(json_data)['error_message'] - ['faultstring']) - - def test_with_user(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'user_id', - 'value': 'user-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_project(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'project_id', - 'value': 'project-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_user_non_admin(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-id2"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-id-alternate']), ids) - - def test_with_user_wrong_tenant(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-wrong"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(), ids) - - def test_metadata(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key.$1': {'$key': 'val'}}, - 'not_ignored_list': ['returned'], - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - metadata = data[0]['metadata'] - self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), - (u'display_name', u'test-server'), - (u'not_ignored_list', u"['returned']"), - (u'tag', u'self.sample')], - list(sorted(six.iteritems(metadata)))) - - def test_resource_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - links = data[0]['links'] - self.assertEqual(2, len(links)) - self.assertEqual('self', links[0]['rel']) - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - self.assertEqual('instance', links[1]['rel']) - self.assertTrue((self.PATH_PREFIX + '/meters/instance?' - 'q.field=resource_id&q.value=resource-id') - in links[1]['href']) - - def test_resource_skip_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources?meter_links=0') - links = data[0]['links'] - self.assertEqual(len(links), 1) - self.assertEqual(links[0]['rel'], 'self') - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - - -class TestListResourcesRestriction(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(TestListResourcesRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 10, group='api') - for i in range(20): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id%s' % i, - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_resource_limit(self): - data = self.get_json('/resources?limit=1') - self.assertEqual(1, len(data)) - - def test_resource_limit_negative(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/resources?limit=-2') - - def test_resource_limit_bigger(self): - data = self.get_json('/resources?limit=42') - self.assertEqual(20, len(data)) - - def test_resource_default_limit(self): - data = self.get_json('/resources') - self.assertEqual(10, len(data)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_post_samples_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_post_samples_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_post_samples_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_post_samples_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,348 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import copy -import datetime - -import mock -from oslo_utils import timeutils -from oslotest import mockpatch - -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - - -class TestPostSamples(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - def fake_notifier_sample(self, ctxt, event_type, payload): - for m in payload: - del m['message_signature'] - self.published.append(payload) - - def setUp(self): - self.published = [] - notifier = mock.Mock() - notifier.info.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=notifier)) - super(TestPostSamples, self).setUp() - - def test_one(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - self.assertEqual(s1, data.json) - self.assertEqual(s1[0], self.published[0][0]) - - def test_nested_metadata(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'nest.name1': 'value1', - 'name2': 'value2', - 'nest.name2': 'value3'}}] - - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - unwound = copy.copy(s1[0]) - unwound['resource_metadata'] = {'nest': {'name1': 'value1', - 'name2': 'value3'}, - 'name2': 'value2'} - # only the published sample should be unwound, not the representation - # in the API response - self.assertEqual(s1[0], data.json[0]) - self.assertEqual(unwound, self.published[0][0]) - - def test_invalid_counter_type(self): - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'INVALID_TYPE', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_messsage_id_provided(self): - """Do not accept sample with message_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'message_id': 'evil', - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_wrong_project_id(self): - """Do not accept cross posting samples to different projects.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "lu-tenant", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_multiple_samples(self): - """Send multiple samples. - - The usecase here is to reduce the chatter and send the counters - at a slower cadence. - """ - samples = [] - for x in range(6): - dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) - s = {'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': float(x * 3), - 'source': 'evil', - 'timestamp': dt.isoformat(), - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': str(x), - 'name2': str(x + 4)}} - samples.append(s) - - data = self.post_json('/meters/apples/', samples) - - for x, s in enumerate(samples): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - - # remove tzinfo to compare generated timestamp - # with the provided one - c = data.json[x] - timestamp = timeutils.parse_isotime(c['timestamp']) - c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - # do the same on the pipeline - msg = self.published[0][x] - timestamp = timeutils.parse_isotime(msg['timestamp']) - msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - self.assertEqual(s, c) - self.assertEqual(s, self.published[0][x]) - - def test_missing_mandatory_fields(self): - """Do not accept posting samples with missing mandatory fields.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - # one by one try posting without a mandatory field. - for m in ['counter_volume', 'counter_unit', 'counter_type', - 'resource_id', 'counter_name']: - s_broke = copy.copy(s1) - del s_broke[0][m] - print('posting without %s' % m) - data = self.post_json('/meters/my_counter_name', s_broke, - expect_errors=True) - self.assertEqual(400, data.status_int) - - def test_multiple_project_id_and_admin(self): - """Allow admin is allowed to set multiple project_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'source': 'closedstack', - 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - headers={"X-Roles": "admin"}) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - 'closedstack') - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_multiple_samples_multiple_sources(self): - """Test posting with special conditions. - - Do accept a single post with some multiples sources with some of them - null. - """ - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'paperstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 5, - 'source': 'waterstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % ( - s['project_id'], - s.get('source', self.CONF.sample_source) - ) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_missing_project_user_id(self): - """Ensure missing project & user IDs are defaulted appropriately.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - project_id = 'bc23a9d531064583ace8f67dad60f6bb' - user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff' - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - 'X-Roles': 'chief-bottle-washer', - 'X-Project-Id': project_id, - 'X-User-Id': user_id, - }) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (project_id, - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s['user_id'] = user_id - s['project_id'] = project_id - - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_query.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_query.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_query.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_query.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,417 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to query.""" -import datetime - -import fixtures -import mock -from oslo_utils import timeutils -from oslotest import base -from oslotest import mockpatch -import wsme - -from ceilometer.alarm.storage import base as alarm_storage_base -from ceilometer.api.controllers.v2 import base as v2_base -from ceilometer.api.controllers.v2 import meters -from ceilometer.api.controllers.v2 import utils -from ceilometer import storage -from ceilometer.storage import base as storage_base -from ceilometer.tests import base as tests_base - - -class TestQuery(base.BaseTestCase): - def setUp(self): - super(TestQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - - def test_get_value_as_type_with_integer(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123', - type='integer') - expected = 123 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_float(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456', - type='float') - expected = 123.456 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_boolean(self): - query = v2_base.Query(field='metadata.is_public', - op='eq', - value='True', - type='boolean') - expected = True - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_string(self): - query = v2_base.Query(field='metadata.name', - op='eq', - value='linux', - type='string') - expected = 'linux' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_datetime(self): - query = v2_base.Query(field='metadata.date', - op='eq', - value='2014-01-01T05:00:00', - type='datetime') - self.assertIsInstance(query._get_value_as_type(), datetime.datetime) - self.assertIsNone(query._get_value_as_type().tzinfo) - - def test_get_value_as_type_with_integer_without_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123') - expected = 123 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_float_without_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456') - expected = 123.456 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_boolean_without_type(self): - query = v2_base.Query(field='metadata.is_public', - op='eq', - value='True') - expected = True - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_string_without_type(self): - query = v2_base.Query(field='metadata.name', - op='eq', - value='linux') - expected = 'linux' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_bad_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456', - type='blob') - self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) - - def test_get_value_as_type_with_bad_value(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='fake', - type='integer') - self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) - - def test_get_value_as_type_integer_expression_without_type(self): - # bug 1221736 - query = v2_base.Query(field='should_be_a_string', - op='eq', - value='WWW-Layer-4a80714f') - expected = 'WWW-Layer-4a80714f' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_boolean_expression_without_type(self): - # bug 1221736 - query = v2_base.Query(field='should_be_a_string', - op='eq', - value='True or False') - expected = 'True or False' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_syntax_error(self): - # bug 1221736 - value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' - query = v2_base.Query(field='group_id', - op='eq', - value=value) - expected = value - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_syntax_error_colons(self): - # bug 1221736 - value = 'Ref::StackId' - query = v2_base.Query(field='field_name', - op='eq', - value=value) - expected = value - self.assertEqual(expected, query._get_value_as_type()) - - -class TestValidateGroupByFields(base.BaseTestCase): - - def test_valid_field(self): - result = meters._validate_groupby_fields(['user_id']) - self.assertEqual(['user_id'], result) - - def test_valid_fields_multiple(self): - result = set(meters._validate_groupby_fields( - ['user_id', 'project_id', 'source'])) - self.assertEqual(set(['user_id', 'project_id', 'source']), result) - - def test_invalid_field(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['wtf']) - - def test_invalid_field_multiple(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['user_id', 'wtf', 'project_id', 'source']) - - def test_duplicate_fields(self): - result = set( - meters._validate_groupby_fields(['user_id', 'source', 'user_id']) - ) - self.assertEqual(set(['user_id', 'source']), result) - - -class TestQueryToKwArgs(tests_base.BaseTestCase): - def setUp(self): - super(TestQueryToKwArgs, self).setUp() - self.useFixture(mockpatch.PatchObject( - utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) - self.useFixture(mockpatch.PatchObject( - utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) - - def test_sample_filter_single(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertIn('user', kwargs) - self.assertEqual(1, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - - def test_sample_filter_multi(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid'), - v2_base.Query(field='project_id', - op='eq', - value='pid'), - v2_base.Query(field='resource_id', - op='eq', - value='rid'), - v2_base.Query(field='source', - op='eq', - value='source_name'), - v2_base.Query(field='meter', - op='eq', - value='meter_name')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(5, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - self.assertEqual('pid', kwargs['project']) - self.assertEqual('rid', kwargs['resource']) - self.assertEqual('source_name', kwargs['source']) - self.assertEqual('meter_name', kwargs['meter']) - - def test_sample_filter_timestamp(self): - ts_start = timeutils.utcnow() - ts_end = ts_start + datetime.timedelta(minutes=5) - q = [v2_base.Query(field='timestamp', - op='lt', - value=str(ts_end)), - v2_base.Query(field='timestamp', - op='gt', - value=str(ts_start))] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(4, len(kwargs)) - self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) - self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) - self.assertEqual('gt', kwargs['start_timestamp_op']) - self.assertEqual('lt', kwargs['end_timestamp_op']) - - def test_sample_filter_meta(self): - q = [v2_base.Query(field='metadata.size', - op='eq', - value='20'), - v2_base.Query(field='resource_metadata.id', - op='eq', - value='meta_id')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(1, len(kwargs)) - self.assertEqual(2, len(kwargs['metaquery'])) - self.assertEqual(20, kwargs['metaquery']['metadata.size']) - self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) - - def test_sample_filter_non_equality_on_metadata(self): - queries = [v2_base.Query(field='resource_metadata.image_id', - op='gt', - value='image', - type='string'), - v2_base.Query(field='metadata.ramdisk_id', - op='le', - value='ramdisk', - type='string')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__) - - def test_sample_filter_invalid_field(self): - q = [v2_base.Query(field='invalid', - op='eq', - value='20')] - self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_invalid_op(self): - q = [v2_base.Query(field='user_id', - op='lt', - value='20')] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_timestamp_invalid_op(self): - ts_start = timeutils.utcnow() - q = [v2_base.Query(field='timestamp', - op='eq', - value=str(ts_start))] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_exclude_internal(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake', - type='string') - for f in ['y', 'on_behalf_of', 'x']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises(wsme.exc.ClientSideError, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__, - internal_keys=['on_behalf_of']) - - def test_sample_filter_self_always_excluded(self): - queries = [v2_base.Query(field='user_id', - op='eq', - value='20')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - self.assertFalse('self' in kwargs) - - def test_sample_filter_translation(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake_%s' % f, - type='string') for f in ['user_id', - 'project_id', - 'resource_id']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - for o in ['user', 'project', 'resource']: - self.assertEqual('fake_%s_id' % o, kwargs.get(o)) - - def test_timestamp_validation(self): - q = [v2_base.Query(field='timestamp', - op='le', - value='123')] - - exc = self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - expected_exc = wsme.exc.InvalidInput('timestamp', '123', - 'invalid timestamp format') - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_alarm_changes_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, - alarm_storage_base.Connection.get_alarm_changes) - valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset', - 'severity', 'timestamp', 'type', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_sample_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - valid_keys = ['message_id', 'meter', 'project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_meters_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage_base.Connection.get_meters) - valid_keys = ['project', 'resource', 'source', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_resources_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage_base.Connection.get_resources) - valid_keys = ['project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_alarms_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, - alarm_storage_base.Connection.get_alarms) - valid_keys = ['alarm_id', 'enabled', 'meter', 'name', - 'project', 'severity', 'state', 'type', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_statistics.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_statistics.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_statistics.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_statistics.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test statistics objects.""" - -import datetime - -from oslotest import base - -from ceilometer.api.controllers.v2 import meters - - -class TestStatisticsDuration(base.BaseTestCase): - - def setUp(self): - super(TestStatisticsDuration, self).setUp() - - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def test_nulls(self): - s = meters.Statistics(duration_start=None, - duration_end=None, - start_timestamp=None, - end_timestamp=None) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_overlap_range_start(self): - s = meters.Statistics(duration_start=self.early1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.start, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(8 * 60 * 60, s.duration) - - def test_within_range(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle2, s.duration_end) - self.assertEqual(10 * 60 * 60, s.duration) - - def test_within_range_zero_duration(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(0, s.duration) - - def test_overlap_range_end(self): - s = meters.Statistics(duration_start=self.middle2, - duration_end=self.late1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle2, s.duration_start) - self.assertEqual(self.end, s.duration_end) - self.assertEqual(((6 * 60) - 1) * 60, s.duration) - - def test_after_range(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_without_timestamp(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=None, - end_timestamp=None) - self.assertEqual(self.late1, s.duration_start) - self.assertEqual(self.late2, s.duration_end) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_statistics_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_statistics_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_statistics_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_statistics_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1661 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test events statistics retrieval.""" - -import datetime - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.api import v2 -from ceilometer.tests import db as tests_db - - -class TestMaxProjectVolume(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestMaxResourceVolume(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=3600) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(3600, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_period_with_negative_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=-1) - self.assertEqual(400, resp.status_code) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') - def test_period_with_large_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'user_id', - 'value': 'user-id'}], - period=10000000000000) - self.assertEqual(400, resp.status_code) - self.assertIn(b"Invalid period", resp.body) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestSumProjectVolume(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - expected = 5 + 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - expected = 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestSumResourceVolume(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(5 + 6 + 7, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=1800) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(1800, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(6 + 7, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T10:15:00'}], - period=7200) - self.assertEqual(2, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(7200, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:15:00', - u'2012-09-25T12:15:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'lt', - 'value': '2012-09-25T11:32:00', - }]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestGroupByInstance(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupByInstance, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - data = self.get_json(self.PATH, groupby=['user_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_resource(self): - data = self.get_json(self.PATH, groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_project(self): - data = self.get_json(self.PATH, groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_unknown_field(self): - response = self.get_json(self.PATH, - expect_errors=True, - groupby=['wtf']) - self.assertEqual(400, response.status_code) - - def test_group_by_multiple_regular(self): - data = self.get_json(self.PATH, groupby=['user_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1', - 'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-2'}) - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-2', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-1'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-2'}) - - def test_group_by_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_with_query_filter_multiple(self): - data = self.get_json(self.PATH, - q=[{'field': 'user_id', - 'op': 'eq', - 'value': 'user-2'}, - {'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'project_id': 'project-2', - 'resource_id': 'resource-1'}) - - def test_group_by_with_period(self): - data = self.get_json(self.PATH, - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T11:22:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - - def test_group_by_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:11:00']) - - def test_group_by_start_timestamp_after(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T17:28:01'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_end_timestamp_before(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T10:10:59'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_start_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:58:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T11:45:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - - def test_group_by_start_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T08:17:03'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T23:59:59'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_start_end_timestamp_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T11:01:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T20:00:00'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_start_end_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T17:00:00'}], - groupby=['project_id'], - period=3600) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T14:00:00', - u'2013-08-01T15:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T15:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T17:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T15:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T15:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T16:00:00']) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T10:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T18:00:00'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:00:00', - u'2013-08-01T14:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:00:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - - -@tests_db.run_with('mongodb', 'hbase', 'db2') -class TestGroupBySource(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - # FIXME(terriyu): We have to put test_group_by_source in its own class - # because SQLAlchemy currently doesn't support group by source statistics. - # When group by source is supported in SQLAlchemy, this test should be - # moved to TestGroupByInstance with all the other group by statistics - # tests. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupBySource, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def tearDown(self): - self.conn.clear() - super(TestGroupBySource, self).tearDown() - - def test_group_by_source(self): - data = self.get_json(self.PATH, groupby=['source']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'source': 'source-1'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - -class TestSelectableAggregates(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestSelectableAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def _do_test_per_tenant_selectable_standard_aggregate(self, - aggregate, - expected_values): - agg_args = {'aggregate.func': aggregate} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertAlmostEqual(r[aggregate], expected) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], expected) - for a in standard_aggregates - set([aggregate]): - self.assertNotIn(a, r) - - def test_per_tenant_selectable_max(self): - self._do_test_per_tenant_selectable_standard_aggregate('max', - [5, 4, 9]) - - def test_per_tenant_selectable_min(self): - self._do_test_per_tenant_selectable_standard_aggregate('min', - [2, 1, 9]) - - def test_per_tenant_selectable_sum(self): - self._do_test_per_tenant_selectable_standard_aggregate('sum', - [9, 9, 9]) - - def test_per_tenant_selectable_avg(self): - self._do_test_per_tenant_selectable_standard_aggregate('avg', - [3, 2.25, 9]) - - def test_per_tenant_selectable_count(self): - self._do_test_per_tenant_selectable_standard_aggregate('count', - [3, 4, 1]) - - def test_per_tenant_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'cardinality/resource_id' - expected_values = [2.0, 3.0, 1.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_large_quantum_selectable_parameterized_aggregate(self): - # add a large number of datapoints that won't impact on cardinality - # if the computation logic is tolerant of different DB behavior on - # larger numbers of samples per-period - for i in range(200): - s = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=i * 1.0, - user_id='user-1', - project_id='project-1', - resource_id='resource-1', - timestamp=datetime.datetime(2013, 8, 1, 11, i % 60), - resource_metadata={'flavor': 'm1.tiny', - 'event': 'event-1', }, - source='source', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, **agg_args) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_repeated_unparameterized_aggregate(self): - agg_params = 'aggregate.func=count&aggregate.func=count' - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'count' - expected_value = 8.0 - standard_aggregates = set(['min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn(aggregate, r) - self.assertEqual(expected_value, r[aggregate]) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_fully_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=resource_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_partially_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=project_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - expected_values = {'cardinality/resource_id': 5.0, - 'cardinality/project_id': 3.0} - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - for aggregate in expected_values.keys(): - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_values[aggregate], - r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_bad_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'injection_attack'} - resp = self.get_json(self.PATH, status=[400], - groupby=['project_id'], **agg_args) - self.assertIn('error_message', resp) - self.assertEqual(resp['error_message'].get('faultcode'), - 'Client') - self.assertEqual(resp['error_message'].get('faultstring'), - 'Bad aggregate: cardinality.injection_attack') - - -@tests_db.run_with('mongodb', 'hbase', 'db2') -class TestUnparameterizedAggregates(v2.FunctionalTest, - tests_db.MixinTestsWithBackendScenarios): - - # We put the stddev test case in a separate class so that we - # can easily exclude the sqlalchemy scenario, as sqlite doesn't - # support the stddev_pop function and fails ungracefully with - # OperationalError when it is used. However we still want to - # test the corresponding functionality in the mongo driver. - # For hbase & db2, the skip on NotImplementedError logic works - # in the usual way. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestUnparameterizedAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_per_tenant_selectable_unparameterized_aggregate(self): - agg_args = {'aggregate.func': 'stddev'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'stddev' - expected_values = [1.4142, 1.0897, 0.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], - expected, - places=4) - for a in standard_aggregates: - self.assertNotIn(a, r) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_wsme_custom_type.py ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_wsme_custom_type.py --- ceilometer-5.0.0~b2/ceilometer/tests/api/v2/test_wsme_custom_type.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/api/v2/test_wsme_custom_type.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base -import wsme - -from ceilometer.api.controllers.v2 import base as v2_base - - -class TestWsmeCustomType(base.BaseTestCase): - - def test_advenum_default(self): - class dummybase(wsme.types.Base): - ae = v2_base.AdvEnum("name", str, "one", "other", default="other") - - obj = dummybase() - self.assertEqual("other", obj.ae) - - obj = dummybase(ae="one") - self.assertEqual("one", obj.ae) - - self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/notifications/test_cpu.py ceilometer-5.0.0~b3/ceilometer/tests/compute/notifications/test_cpu.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/notifications/test_cpu.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/notifications/test_cpu.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -# -# Copyright 2013 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for converters for producing compute counter messages from -notification events. -""" - -import copy - -from oslotest import base - -from ceilometer.compute.notifications import cpu - - -METRICS_UPDATE = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'compute.metrics.update', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'payload': { - u'metrics': [ - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.frequency', 'value': 1600, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.time', 'value': 17421440000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.time', 'value': 7852600000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.time', 'value': 1307374400000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.time', 'value': 11697470000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.percent', 'value': 0.012959045637294348, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.percent', 'value': 0.027501485834103515, - 'source': 'libvirt.LibvirtDriver'}], - u'nodename': u'tianst.sh.intel.com', - u'host': u'tianst', - u'host_id': u'10.0.1.1'}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'compute.tianst.sh.intel.com', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None -} - -RES_ID = '%s_%s' % (METRICS_UPDATE['payload']['host'], - METRICS_UPDATE['payload']['nodename']) - - -class TestMetricsNotifications(base.BaseTestCase): - def _process_notification(self, ic): - self.assertIn(METRICS_UPDATE['event_type'], - ic.event_types) - samples = list(ic.process_notification(METRICS_UPDATE)) - self.assertEqual(RES_ID, samples[0].resource_id) - return samples[0] - - def test_compute_metrics(self): - ERROR_METRICS = copy.copy(METRICS_UPDATE) - ERROR_METRICS['payload'] = {"metric_err": []} - ic = cpu.CpuFrequency(None) - info = ic._get_sample(METRICS_UPDATE, 'cpu.frequency') - info_none = ic._get_sample(METRICS_UPDATE, 'abc.efg') - info_error = ic._get_sample(ERROR_METRICS, 'cpu.frequency') - self.assertEqual('cpu.frequency', info['payload']['name']) - self.assertIsNone(info_none) - self.assertIsNone(info_error) - - def test_compute_cpu_frequency(self): - c = self._process_notification(cpu.CpuFrequency(None)) - self.assertEqual('compute.node.cpu.frequency', c.name) - self.assertEqual(1600, c.volume) - - def test_compute_cpu_user_time(self): - c = self._process_notification(cpu.CpuUserTime(None)) - self.assertEqual('compute.node.cpu.user.time', c.name) - self.assertEqual(17421440000000, c.volume) - - def test_compute_cpu_kernel_time(self): - c = self._process_notification(cpu.CpuKernelTime(None)) - self.assertEqual('compute.node.cpu.kernel.time', c.name) - self.assertEqual(7852600000000, c.volume) - - def test_compute_cpu_idle_time(self): - c = self._process_notification(cpu.CpuIdleTime(None)) - self.assertEqual('compute.node.cpu.idle.time', c.name) - self.assertEqual(1307374400000000, c.volume) - - def test_compute_cpu_iowait_time(self): - c = self._process_notification(cpu.CpuIowaitTime(None)) - self.assertEqual('compute.node.cpu.iowait.time', c.name) - self.assertEqual(11697470000000, c.volume) - - def test_compute_cpu_kernel_percent(self): - c = self._process_notification(cpu.CpuKernelPercent(None)) - self.assertEqual('compute.node.cpu.kernel.percent', c.name) - self.assertEqual(0.5841204961898534, c.volume) - - def test_compute_cpu_idle_percent(self): - c = self._process_notification(cpu.CpuIdlePercent(None)) - self.assertEqual('compute.node.cpu.idle.percent', c.name) - self.assertEqual(97.24985141658965, c.volume) - - def test_compute_cpu_user_percent(self): - c = self._process_notification(cpu.CpuUserPercent(None)) - self.assertEqual('compute.node.cpu.user.percent', c.name) - self.assertEqual(1.2959045637294348, c.volume) - - def test_compute_cpu_iowait_percent(self): - c = self._process_notification(cpu.CpuIowaitPercent(None)) - self.assertEqual('compute.node.cpu.iowait.percent', c.name) - self.assertEqual(0.8701235234910634, c.volume) - - def test_compute_cpu_percent(self): - c = self._process_notification(cpu.CpuPercent(None)) - self.assertEqual('compute.node.cpu.percent', c.name) - self.assertEqual(2.7501485834103515, c.volume) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/notifications/test_instance.py ceilometer-5.0.0~b3/ceilometer/tests/compute/notifications/test_instance.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/notifications/test_instance.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/notifications/test_instance.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,785 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for converters for producing compute counter messages from -notification events. -""" -from oslotest import base - -from ceilometer.compute.notifications import instance -from ceilometer import sample - - -INSTANCE_CREATE_END = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.create.end', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -INSTANCE_DELETE_START = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:24:14.547374', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.delete.start', - u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'deleting', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:24:14.824743', -} - -INSTANCE_EXISTS = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - -INSTANCE_EXISTS_METADATA_LIST = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'metadata': [], - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - - -INSTANCE_FINISH_RESIZE_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', - u'_context_quota_class': None, - u'event_type': u'compute.instance.finish_resize.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:10:17.436974', - u'_context_is_admin': True, - u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:08:39.162612', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 5, - u'deleted_at': u'', - u'fixed_ips': [{u'floating_ips': [], - u'label': u'private', - u'version': 4, - u'meta': {}, - u'address': u'10.0.0.3', - u'type': u'fixed'}], - u'memory_mb': 2048, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'reservation_id': u'r-u3fvim06', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.923939', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 20, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 20, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.small', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_RESIZE_REVERT_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', - u'_context_quota_class': None, - u'event_type': u'compute.instance.resize.revert.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:20:32.009532', - u'_context_is_admin': True, - u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:19:51.018218', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'resize_reverting', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'deleted_at': u'', - u'reservation_id': u'r-u3fvim06', - u'memory_mb': 512, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.000000', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_A-wste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 0, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 0, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.tiny', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_DELETE_SAMPLES = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', - u'_context_quota_class': None, - u'event_type': u'compute.instance.delete.samples', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:20:32.009532', - u'_context_is_admin': True, - u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:19:51.018218', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'resize_reverting', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'deleted_at': u'', - u'reservation_id': u'r-u3fvim06', - u'memory_mb': 512, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.000000', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_A-wste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 0, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 0, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.tiny', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None, - u'samples': [{u'name': u'sample-name1', - u'type': u'sample-type1', - u'unit': u'sample-units1', - u'volume': 1}, - {u'name': u'sample-name2', - u'type': u'sample-type2', - u'unit': u'sample-units2', - u'volume': 2}, - ], - }, - u'priority': u'INFO' -} - -INSTANCE_SCHEDULED = { - u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', - u'_context_quota_class': None, - u'event_type': u'scheduler.run_instance.scheduled', - u'_context_service_catalog': [{ - u'endpoints': [{ - u'adminURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'region': u'RegionOne', - u'internalURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', - u'publicURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], - u'endpoints_links': [], - u'type': u'volume', - u'name': u'cinder'}], - u'_context_auth_token': u'TOK', - u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', - u'payload': { - 'instance_id': 'fake-uuid1-1', - u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, - u'request_spec': { - u'num_instances': 1, - u'block_device_mapping': [{ - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'guest_format': None, - u'boot_index': 0, - u'no_device': None, - u'connection_info': None, - u'volume_id': None, - u'volume_size': None, - u'device_name': None, - u'disk_bus': None, - u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'source_type': u'image', - u'device_type': u'disk', - u'snapshot_id': None, - u'destination_type': u'local', - u'delete_on_termination': True}], - u'image': { - u'status': u'active', - u'name': u'cirros-0.3.1-x86_64-uec', - u'deleted': False, - u'container_format': u'ami', - u'created_at': u'2014-02-18T13:16:26.000000', - u'disk_format': u'ami', - u'updated_at': u'2014-02-18T13:16:27.000000', - u'properties': { - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, - u'min_disk': 0, - u'min_ram': 0, - u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', - u'owner': u'2bd766a095b44486bf07cf7f666997eb', - u'is_public': True, - u'deleted_at': None, - u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'size': 25165824}, - u'instance_type': { - u'root_gb': 1, - u'name': u'm1.tiny', - u'ephemeral_gb': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'extra_specs': {}, - u'swap': 0, - u'rxtx_factor': 1.0, - u'flavorid': u'1', - u'vcpu_weight': None, - u'id': 2}, - u'instance_properties': { - u'vm_state': u'building', - u'availability_zone': None, - u'terminated_at': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'user_data': None, - u'cleaned': False, - u'vm_mode': None, - u'deleted_at': None, - u'reservation_id': u'r-ven5q6om', - u'id': 15, - u'security_groups': [{ - u'deleted_at': None, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'description': u'default', - u'deleted': False, - u'created_at': u'2014-02-19T11:02:31.000000', - u'updated_at': None, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'id': 1, - u'name': u'default'}], - u'disable_terminate': False, - u'root_device_name': None, - u'display_name': u'new', - u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'default_swap_device': None, - u'info_cache': { - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'deleted': False, - u'created_at': u'2014-03-05T12:44:00.000000', - u'updated_at': None, - u'network_info': [], - u'deleted_at': None}, - u'hostname': u'new', - u'launched_on': None, - u'display_description': u'new', - u'key_data': None, - u'deleted': False, - u'config_drive': u'', - u'power_state': 0, - u'default_ephemeral_device': None, - u'progress': 0, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'launched_at': None, - u'scheduled_at': None, - u'node': None, - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', - u'access_ip_v6': None, - u'access_ip_v4': None, - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'key_name': None, - u'updated_at': None, - u'host': None, - u'root_gb': 1, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'system_metadata': { - u'image_kernel_id': - u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'image_min_disk': u'1', - u'instance_type_memory_mb': u'512', - u'instance_type_swap': u'0', - u'instance_type_vcpu_weight': None, - u'instance_type_root_gb': u'1', - u'instance_type_name': u'm1.tiny', - u'image_ramdisk_id': - u'4999726c-545c-4a9e-bfc0-917459784275', - u'instance_type_id': u'2', - u'instance_type_ephemeral_gb': u'0', - u'instance_type_rxtx_factor': u'1.0', - u'instance_type_flavorid': u'1', - u'instance_type_vcpus': u'1', - u'image_container_format': u'ami', - u'image_min_ram': u'0', - u'image_disk_format': u'ami', - u'image_base_image_ref': - u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, - u'task_state': u'scheduling', - u'shutdown_terminate': False, - u'cell_name': None, - u'ephemeral_key_uuid': None, - u'locked': False, - u'name': u'instance-0000000f', - u'created_at': u'2014-03-05T12:44:00.000000', - u'locked_by': None, - u'launch_index': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'architecture': None, - u'auto_disk_config': False, - u'os_type': None, - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}}, - u'security_group': [u'default'], - u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_timestamp': u'2014-03-05T12:44:00.135674', - u'publisher_id': u'scheduler.eglynn-f19-devstack3', - u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', - u'_context_remote_address': u'172.16.12.21', - u'_context_roles': [u'_member_', u'admin'], - u'timestamp': u'2014-03-05 12:44:00.733758', - u'_context_user': u'0a757cd896b64b65ba3784afef564116', - u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', - u'_context_project_name': u'admin', - u'_context_read_deleted': u'no', - u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_instance_lock_checked': False, - u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_user_name': u'admin' -} - - -class TestNotifications(base.BaseTestCase): - - def test_process_notification(self): - info = list(instance.Instance(None).process_notification( - INSTANCE_CREATE_END - ))[0] - for name, actual, expected in [ - ('counter_name', info.name, 'instance'), - ('counter_type', info.type, sample.TYPE_GAUGE), - ('counter_volume', info.volume, 1), - ('timestamp', info.timestamp, - INSTANCE_CREATE_END['timestamp']), - ('resource_id', info.resource_id, - INSTANCE_CREATE_END['payload']['instance_id']), - ('instance_type_id', - info.resource_metadata['instance_type_id'], - INSTANCE_CREATE_END['payload']['instance_type_id']), - ('host', info.resource_metadata['host'], - INSTANCE_CREATE_END['publisher_id']), - ]: - self.assertEqual(expected, actual, name) - - @staticmethod - def _find_counter(counters, name): - return filter(lambda counter: counter.name == name, counters)[0] - - def _verify_user_metadata(self, metadata): - self.assertIn('user_metadata', metadata) - user_meta = metadata['user_metadata'] - self.assertEqual('Group_A', user_meta.get('server_group')) - self.assertNotIn('AutoScalingGroupName', user_meta) - self.assertIn('foo_bar', user_meta) - self.assertNotIn('foo.bar', user_meta) - - def test_instance_create_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - - def test_instance_create_flavor(self): - ic = instance.InstanceFlavor(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - - def test_instance_create_memory(self): - ic = instance.Memory(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_CREATE_END['payload']['memory_mb'], c.volume) - - def test_instance_create_vcpus(self): - ic = instance.VCpus(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_CREATE_END['payload']['vcpus'], c.volume) - - def test_instance_create_root_disk_size(self): - ic = instance.RootDiskSize(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_CREATE_END['payload']['root_gb'], c.volume) - - def test_instance_create_ephemeral_disk_size(self): - ic = instance.EphemeralDiskSize(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_CREATE_END['payload']['ephemeral_gb'], - c.volume) - - def test_instance_exists_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS)) - self.assertEqual(1, len(counters)) - - def test_instance_exists_metadata_list(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) - self.assertEqual(1, len(counters)) - - def test_instance_exists_flavor(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS)) - self.assertEqual(1, len(counters)) - - def test_instance_delete_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_DELETE_START)) - self.assertEqual(1, len(counters)) - - def test_instance_delete_flavor(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_DELETE_START)) - self.assertEqual(1, len(counters)) - - def test_instance_finish_resize_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_finish_resize_flavor(self): - ic = instance.InstanceFlavor(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self.assertEqual('instance:m1.small', c.name) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_finish_resize_memory(self): - ic = instance.Memory(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_FINISH_RESIZE_END['payload']['memory_mb'], - c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_finish_resize_vcpus(self): - ic = instance.VCpus(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_FINISH_RESIZE_END['payload']['vcpus'], - c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_flavor(self): - ic = instance.InstanceFlavor(None) - counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self.assertEqual('instance:m1.tiny', c.name) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_memory(self): - ic = instance.Memory(None) - counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_RESIZE_REVERT_END['payload']['memory_mb'], - c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_vcpus(self): - ic = instance.VCpus(None) - counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(INSTANCE_RESIZE_REVERT_END['payload']['vcpus'], - c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_delete_samples(self): - ic = instance.InstanceDelete(None) - counters = list(ic.process_notification(INSTANCE_DELETE_SAMPLES)) - self.assertEqual(2, len(counters)) - names = [c.name for c in counters] - self.assertEqual(['sample-name1', 'sample-name2'], names) - c = counters[0] - self._verify_user_metadata(c.resource_metadata) - - def test_instance_scheduled(self): - ic = instance.InstanceScheduled(None) - - self.assertIn(INSTANCE_SCHEDULED['event_type'], - ic.event_types) - - counters = list(ic.process_notification(INSTANCE_SCHEDULED)) - self.assertEqual(1, len(counters)) - names = [c.name for c in counters] - self.assertEqual(['instance.scheduled'], names) - rid = [c.resource_id for c in counters] - self.assertEqual(['fake-uuid1-1'], rid) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import mockpatch - -import ceilometer.tests.base as base - - -class TestPollsterBase(base.BaseTestCase): - - def setUp(self): - super(TestPollsterBase, self).setUp() - - self.inspector = mock.Mock() - self.instance = mock.MagicMock() - self.instance.name = 'instance-00000001' - setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', - self.instance.name) - self.instance.id = 1 - self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - self.instance.status = 'active' - self.instance.metadata = { - 'fqdn': 'vm_fqdn', - 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', - 'project_cos': 'dev'} - - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_cpu.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_cpu.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_cpu.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_cpu.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import cpu -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.compute.pollsters import base - - -class TestCPUPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), - virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), - # cpu_time resets on instance restart - virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), - )) - - def inspect_cpus(name): - return next(next_value) - - self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - def _verify_cpu_metering(expected_time): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu']), set([s.name for s in samples])) - self.assertEqual(expected_time, samples[0].volume) - self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) - # ensure elapsed time between polling cycles is non-zero - time.sleep(0.001) - - _verify_cpu_metering(1 * (10 ** 6)) - _verify_cpu_metering(3 * (10 ** 6)) - _verify_cpu_metering(2 * (10 ** 6)) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_no_caching(self): - cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) - self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(10 ** 6, samples[0].volume) - self.assertEqual(0, len(cache)) - - -class TestCPUUtilPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUUtilPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUUtilStats(util=40), - virt_inspector.CPUUtilStats(util=60), - )) - - def inspect_cpu_util(name, duration): - return next(next_value) - - self.inspector.inspect_cpu_util = (mock. - Mock(side_effect=inspect_cpu_util)) - - mgr = manager.AgentManager() - pollster = cpu.CPUUtilPollster() - - def _verify_cpu_util_metering(expected_util): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu_util']), - set([s.name for s in samples])) - self.assertEqual(expected_util, samples[0].volume) - - _verify_cpu_util_metering(40) - _verify_cpu_util_metering(60) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_diskio.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_diskio.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_diskio.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_diskio.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,361 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# Copyright 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import disk -from ceilometer.compute.virt import inspector as virt_inspector -import ceilometer.tests.base as base - - -class TestBaseDiskIO(base.BaseTestCase): - - TYPE = 'cumulative' - - def setUp(self): - super(TestBaseDiskIO, self).setUp() - - self.inspector = mock.Mock() - self.instance = self._get_fake_instances() - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) - - @staticmethod - def _get_fake_instances(): - instances = [] - for i in [1, 2]: - instance = mock.MagicMock() - instance.name = 'instance-%s' % i - setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', - instance.name) - instance.id = i - instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - instance.status = 'active' - instances.append(instance) - return instances - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, name, expected_count=2): - pollster = factory() - - mgr = manager.AgentManager() - cache = {} - samples = list(pollster.get_samples(mgr, cache, self.instance)) - self.assertIsNotEmpty(samples) - cache_key = getattr(pollster, self.CACHE_KEY) - self.assertIn(cache_key, cache) - for instance in self.instance: - self.assertIn(instance.id, cache[cache_key]) - self.assertEqual(set([name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == name] - self.assertEqual(len(match), expected_count, - 'missing counter %s' % name) - return match - - def _check_aggregate_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(self.TYPE, match[0].type) - if expected_device is not None: - self.assertEqual(set(expected_device), - set(match[0].resource_metadata.get('device'))) - instances = [i.id for i in self.instance] - for m in match: - self.assertIn(m.resource_id, instances) - - def _check_per_device_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name, expected_count=4) - match_dict = {} - for m in match: - match_dict[m.resource_id] = m - for instance in self.instance: - key = "%s-%s" % (instance.id, expected_device) - self.assertEqual(expected_volume, - match_dict[key].volume) - self.assertEqual(self.TYPE, match_dict[key].type) - - self.assertEqual(key, match_dict[key].resource_id) - - -class TestDiskPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskStats(read_bytes=1, read_requests=2, - write_bytes=3, write_requests=4, - errors=-1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskStats(read_bytes=2, read_requests=3, - write_bytes=5, write_requests=7, - errors=-1)), - ] - CACHE_KEY = "CACHE_KEY_DISK" - - def setUp(self): - super(TestDiskPollsters, self).setUp() - self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) - - def test_disk_read_requests(self): - self._check_aggregate_samples(disk.ReadRequestsPollster, - 'disk.read.requests', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_read_bytes(self): - self._check_aggregate_samples(disk.ReadBytesPollster, - 'disk.read.bytes', 3, - expected_device=['vda1', 'vda2']) - - def test_disk_write_requests(self): - self._check_aggregate_samples(disk.WriteRequestsPollster, - 'disk.write.requests', 11, - expected_device=['vda1', 'vda2']) - - def test_disk_write_bytes(self): - self._check_aggregate_samples(disk.WriteBytesPollster, - 'disk.write.bytes', 8, - expected_device=['vda1', 'vda2']) - - def test_per_disk_read_requests(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 3, - 'vda2') - - def test_per_disk_write_requests(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 4, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 7, - 'vda2') - - def test_per_disk_read_bytes(self): - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 1, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 2, - 'vda2') - - def test_per_disk_write_bytes(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 5, - 'vda2') - - -class TestDiskRatePollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskRateStats(1024, 300, 5120, 700)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskRateStats(2048, 400, 6144, 800)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_RATE" - - def setUp(self): - super(TestDiskRatePollsters, self).setUp() - self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) - - def test_disk_read_bytes_rate(self): - self._check_aggregate_samples(disk.ReadBytesRatePollster, - 'disk.read.bytes.rate', 3072, - expected_device=['disk1', 'disk2']) - - def test_disk_read_requests_rate(self): - self._check_aggregate_samples(disk.ReadRequestsRatePollster, - 'disk.read.requests.rate', 700, - expected_device=['disk1', 'disk2']) - - def test_disk_write_bytes_rate(self): - self._check_aggregate_samples(disk.WriteBytesRatePollster, - 'disk.write.bytes.rate', 11264, - expected_device=['disk1', 'disk2']) - - def test_disk_write_requests_rate(self): - self._check_aggregate_samples(disk.WriteRequestsRatePollster, - 'disk.write.requests.rate', 1500, - expected_device=['disk1', 'disk2']) - - def test_per_disk_read_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 1024, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 2048, 'disk2') - - def test_per_disk_read_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 300, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 400, 'disk2') - - def test_per_disk_write_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', - 5120, 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', 6144, - 'disk2') - - def test_per_disk_write_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 700, - 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 800, - 'disk2') - - -class TestDiskLatencyPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskLatencyStats(1000)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskLatencyStats(2000)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_LATENCY" - - def setUp(self): - super(TestDiskLatencyPollsters, self).setUp() - self.inspector.inspect_disk_latency = mock.Mock( - return_value=self.DISKS) - - def test_disk_latency(self): - self._check_aggregate_samples(disk.DiskLatencyPollster, - 'disk.latency', 3) - - def test_per_device_latency(self): - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 1, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 2, 'disk2') - - -class TestDiskIOPSPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskIOPSStats(10)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskIOPSStats(20)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_IOPS" - - def setUp(self): - super(TestDiskIOPSPollsters, self).setUp() - self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) - - def test_disk_iops(self): - self._check_aggregate_samples(disk.DiskIOPSPollster, - 'disk.iops', 30) - - def test_per_device_iops(self): - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 10, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 20, 'disk2') - - -class TestDiskInfoPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_INFO" - - def setUp(self): - super(TestDiskInfoPollsters, self).setUp() - self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) - - def test_disk_capacity(self): - self._check_aggregate_samples(disk.CapacityPollster, - 'disk.capacity', 7, - expected_device=['vda1', 'vda2']) - - def test_disk_allocation(self): - self._check_aggregate_samples(disk.AllocationPollster, - 'disk.allocation', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_physical(self): - self._check_aggregate_samples(disk.PhysicalPollster, - 'disk.usage', 3, - expected_device=['vda1', 'vda2']) - - def test_per_disk_capacity(self): - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 4, - 'vda2') - - def test_per_disk_allocation(self): - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 3, - 'vda2') - - def test_per_disk_physical(self): - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 1, - 'vda1') - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 2, - 'vda2') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_instance.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_instance.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_instance.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_instance.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import instance as pollsters_instance -from ceilometer.tests.compute.pollsters import base - - -class TestInstancePollster(base.TestPollsterBase): - - def setUp(self): - super(TestInstancePollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_instance(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('instance', samples[0].name) - self.assertEqual(1, samples[0].resource_metadata['vcpus']) - self.assertEqual(512, samples[0].resource_metadata['memory_mb']) - self.assertEqual(20, samples[0].resource_metadata['disk_gb']) - self.assertEqual(20, samples[0].resource_metadata['root_gb']) - self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) - self.assertEqual('active', samples[0].resource_metadata['status']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_instance_flavor(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstanceFlavorPollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('instance:m1.small', samples[0].name) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_keys(self): - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_keys', ['fqdn']) - - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'fqdn': 'vm_fqdn', - 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_namespace(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_namespace', []) - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertNotIn('user_metadata', samples[0].resource_metadata) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_location_metadata.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_location_metadata.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_location_metadata.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_location_metadata.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for the compute pollsters. -""" - -import mock -from oslotest import base -import six - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import util - - -class FauxInstance(object): - - def __init__(self, **kwds): - for name, value in kwds.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - try: - return getattr(self, key) - except AttributeError: - return default - - -class TestLocationMetadata(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - self.manager = manager.AgentManager() - super(TestLocationMetadata, self).setUp() - - # Mimics an instance returned from nova api call - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': - 'foo-zone', - 'reservation_id': 'reservation id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'flavor': {'id': 1, - 'disk': 20, - 'ram': 512, - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.ephemeral_gb': 42}} - - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - def test_metadata(self): - md = util._get_metadata_from_object(self.instance) - for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): - if prop not in ("metadata"): - # Special cases - if prop == 'name': - prop = 'display_name' - elif prop == 'hostId': - prop = "host" - elif prop == 'OS-EXT-SRV-ATTR:instance_name': - prop = 'name' - self.assertEqual(value, md[prop]) - user_metadata = md['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(1, len(user_metadata)) - - def test_metadata_empty_image(self): - self.INSTANCE_PROPERTIES['image'] = None - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertIsNone(md['image']) - self.assertIsNone(md['image_ref']) - self.assertIsNone(md['image_ref_url']) - - def test_metadata_image_through_conductor(self): - # There should be no links here, should default to None - self.INSTANCE_PROPERTIES['image'] = {'id': 1} - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertEqual(1, md['image_ref']) - self.assertIsNone(md['image_ref_url']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_memory.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_memory.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_memory.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_memory.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import memory -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.compute.pollsters import base - - -class TestMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryUsageStats(usage=1.0), - virt_inspector.MemoryUsageStats(usage=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_usage(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryUsageStats): - return value - else: - raise value - - self.inspector.inspect_memory_usage = mock.Mock( - side_effect=inspect_memory_usage) - - mgr = manager.AgentManager() - pollster = memory.MemoryUsagePollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_memory_metering(expected_count, expected_memory_mb, mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.usage']), - set([s.name for s in samples])) - self.assertEqual(expected_memory_mb, samples[0].volume) - else: - self.assertEqual(1, mylog.warn.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_memory_metering(1, 1.0) - _verify_memory_metering(1, 2.0) - _verify_memory_metering(0, 0) - _verify_memory_metering(0, 0) - - -class TestResidentMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestResidentMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryResidentStats(resident=1.0), - virt_inspector.MemoryResidentStats(resident=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_resident(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryResidentStats): - return value - else: - raise value - - self.inspector.inspect_memory_resident = mock.Mock( - side_effect=inspect_memory_resident) - - mgr = manager.AgentManager() - pollster = memory.MemoryResidentPollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_resident_memory_metering(expected_count, - expected_resident_memory_mb, - mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.resident']), - set([s.name for s in samples])) - self.assertEqual(expected_resident_memory_mb, - samples[0].volume) - else: - self.assertEqual(1, mylog.warn.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_resident_memory_metering(1, 1.0) - _verify_resident_memory_metering(1, 2.0) - _verify_resident_memory_metering(0, 0) - _verify_resident_memory_metering(0, 0) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_net.py ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_net.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/pollsters/test_net.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/pollsters/test_net.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,317 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import net -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer import sample -from ceilometer.tests.compute.pollsters import base - - -class FauxInstance(object): - - def __init__(self, **kwargs): - for name, value in kwargs.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - return getattr(self, key, default) - - -class TestNetPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, - tx_bytes=7, tx_packets=8) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, - tx_bytes=11, tx_packets=12) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnics = mock.Mock(return_value=vnics) - - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': 'foo-zone', - 'reservation_id': 'reservation id', - 'id': 'instance id', - 'user_id': 'user id', - 'tenant_id': 'tenant id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'flavor': {'id': 1, - 'disk': 20, - 'ram': 512, - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.ephemeral_gb': 42}} - - self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(len(match), 1, 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('cumulative', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesPollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 5, self.vnic1.fref), - ('192.168.0.4', 9, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesPollster, - [('10.0.0.2', 3, self.vnic0.fref), - ('192.168.0.3', 7, self.vnic1.fref), - ('192.168.0.4', 11, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_incoming_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingPacketsPollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 6, self.vnic1.fref), - ('192.168.0.4', 10, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingPacketsPollster, - [('10.0.0.2', 4, self.vnic0.fref), - ('192.168.0.3', 8, self.vnic1.fref), - ('192.168.0.4', 12, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_metadata(self): - factory = net.OutgoingBytesPollster - pollster = factory() - sm = pollster.make_vnic_sample(self.faux_instance, - name='network.outgoing.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=100, - vnic_data=self.vnic0) - - user_metadata = sm.resource_metadata['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(2, len(user_metadata)) - - -class TestNetPollsterCache(base.TestPollsterBase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples_cache(self, factory): - vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - vnics = [(vnic0, stats0)] - - mgr = manager.AgentManager() - pollster = factory() - cache = { - pollster.CACHE_KEY_VNIC: { - self.instance.id: vnics, - }, - } - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - - def test_incoming_bytes(self): - self._check_get_samples_cache(net.IncomingBytesPollster) - - def test_outgoing_bytes(self): - self._check_get_samples_cache(net.OutgoingBytesPollster) - - def test_incoming_packets(self): - self._check_get_samples_cache(net.IncomingPacketsPollster) - - def test_outgoing_packets(self): - self._check_get_samples_cache(net.OutgoingPacketsPollster) - - -class TestNetRatesPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetRatesPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, - tx_bytes_rate=2) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, - tx_bytes_rate=4) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, - tx_bytes_rate=6) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(1, len(match), 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('gauge', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesRatePollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 3, self.vnic1.fref), - ('192.168.0.4', 5, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesRatePollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 4, self.vnic1.fref), - ('192.168.0.4', 6, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/hyperv/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/hyperv/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/hyperv/test_inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/hyperv/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Hyper-V inspector. -""" - -import mock -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector - - -class TestHyperVInspection(base.BaseTestCase): - - def setUp(self): - self._inspector = hyperv_inspector.HyperVInspector() - self._inspector._utils = mock.MagicMock() - - super(TestHyperVInspection, self).setUp() - - def test_inspect_cpus(self): - fake_instance_name = 'fake_instance_name' - fake_host_cpu_clock = 1000 - fake_host_cpu_count = 2 - fake_cpu_clock_used = 2000 - fake_cpu_count = 3000 - fake_uptime = 4000 - - fake_cpu_percent_used = (fake_cpu_clock_used / - float(fake_host_cpu_clock * fake_cpu_count)) - fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * - 1000) - - self._inspector._utils.get_host_cpu_info.return_value = ( - fake_host_cpu_clock, fake_host_cpu_count) - - self._inspector._utils.get_cpu_metrics.return_value = ( - fake_cpu_clock_used, fake_cpu_count, fake_uptime) - - cpu_stats = self._inspector.inspect_cpus(fake_instance_name) - - self.assertEqual(fake_cpu_count, cpu_stats.number) - self.assertEqual(fake_cpu_time, cpu_stats.time) - - @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2.' - 'get_memory_metrics') - def test_inspect_memory_usage(self, mock_get_memory_metrics): - fake_usage = self._inspector._utils.get_memory_metrics.return_value - usage = self._inspector.inspect_memory_usage( - mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) - self.assertEqual(fake_usage, usage.usage) - - def test_inspect_vnics(self): - fake_instance_name = 'fake_instance_name' - fake_rx_mb = 1000 - fake_tx_mb = 2000 - fake_element_name = 'fake_element_name' - fake_address = 'fake_address' - - self._inspector._utils.get_vnic_metrics.return_value = [{ - 'rx_mb': fake_rx_mb, - 'tx_mb': fake_tx_mb, - 'element_name': fake_element_name, - 'address': fake_address}] - - inspected_vnics = list(self._inspector.inspect_vnics( - fake_instance_name)) - - self.assertEqual(1, len(inspected_vnics)) - self.assertEqual(2, len(inspected_vnics[0])) - - inspected_vnic, inspected_stats = inspected_vnics[0] - - self.assertEqual(fake_element_name, inspected_vnic.name) - self.assertEqual(fake_address, inspected_vnic.mac) - - self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) - self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) - - def test_inspect_disks(self): - fake_instance_name = 'fake_instance_name' - fake_read_mb = 1000 - fake_write_mb = 2000 - fake_instance_id = "fake_fake_instance_id" - fake_host_resource = "fake_host_resource" - - self._inspector._utils.get_disk_metrics.return_value = [{ - 'read_mb': fake_read_mb, - 'write_mb': fake_write_mb, - 'instance_id': fake_instance_id, - 'host_resource': fake_host_resource}] - - inspected_disks = list(self._inspector.inspect_disks( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - - self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) - self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) - - def test_inspect_disk_latency(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_latency = mock.sentinel.DISK_LATENCY - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_latency_metrics.return_value = [{ - 'disk_latency': fake_disk_latency, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_latency( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) - - def test_inspect_disk_iops_count(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_iops_count.return_value = [{ - 'iops_count': fake_disk_iops_count, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_iops( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/hyperv/test_utilsv2.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/hyperv/test_utilsv2.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/hyperv/test_utilsv2.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/hyperv/test_utilsv2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,283 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Hyper-V utilsv2. -""" - -import mock -from oslotest import base - -from ceilometer.compute.virt.hyperv import utilsv2 as utilsv2 -from ceilometer.compute.virt import inspector - - -class TestUtilsV2(base.BaseTestCase): - - _FAKE_RETURN_CLASS = 'fake_return_class' - - def setUp(self): - self._utils = utilsv2.UtilsV2() - self._utils._conn = mock.MagicMock() - self._utils._conn_cimv2 = mock.MagicMock() - - super(TestUtilsV2, self).setUp() - - @mock.patch.object(utilsv2.UtilsV2, '_get_metrics') - @mock.patch.object(utilsv2.UtilsV2, '_get_metric_def') - @mock.patch.object(utilsv2.UtilsV2, '_lookup_vm') - def test_get_memory_metrics(self, mock_lookup_vm, mock_get_metric_def, - mock_get_metrics): - mock_vm = mock_lookup_vm.return_value - - mock_metric_def = mock_get_metric_def.return_value - - metric_memory = mock.MagicMock() - metric_memory.MetricValue = 3 - mock_get_metrics.return_value = [metric_memory] - - response = self._utils.get_memory_metrics(mock.sentinel._FAKE_INSTANCE) - - mock_lookup_vm.assert_called_once_with(mock.sentinel._FAKE_INSTANCE) - mock_get_metric_def.assert_called_once_with( - self._utils._MEMORY_METRIC_NAME) - mock_get_metrics.assert_called_once_with(mock_vm, mock_metric_def) - - self.assertEqual(3, response) - - def test_get_host_cpu_info(self): - _fake_clock_speed = 1000 - _fake_cpu_count = 2 - - mock_cpu = mock.MagicMock() - mock_cpu.MaxClockSpeed = _fake_clock_speed - - self._utils._conn_cimv2.Win32_Processor.return_value = [mock_cpu, - mock_cpu] - cpu_info = self._utils.get_host_cpu_info() - - self.assertEqual(_fake_clock_speed, cpu_info[0]) - self.assertEqual(_fake_cpu_count, cpu_info[1]) - - def test_get_all_vms(self): - fake_vm_element_name = "fake_vm_element_name" - fake_vm_name = "fake_vm_name" - - mock_vm = mock.MagicMock() - mock_vm.ElementName = fake_vm_element_name - mock_vm.Name = fake_vm_name - self._utils._conn.Msvm_ComputerSystem.return_value = [mock_vm] - - vms = self._utils.get_all_vms() - - self.assertEqual((fake_vm_element_name, fake_vm_name), vms[0]) - - def test_get_cpu_metrics(self): - fake_vm_element_name = "fake_vm_element_name" - fake_cpu_count = 2 - fake_uptime = 1000 - fake_cpu_metric_val = 2000 - - self._utils._lookup_vm = mock.MagicMock() - self._utils._lookup_vm().OnTimeInMilliseconds = fake_uptime - - self._utils._get_vm_resources = mock.MagicMock() - mock_res = self._utils._get_vm_resources()[0] - mock_res.VirtualQuantity = fake_cpu_count - - self._utils._get_metrics = mock.MagicMock() - self._utils._get_metrics()[0].MetricValue = fake_cpu_metric_val - - cpu_metrics = self._utils.get_cpu_metrics(fake_vm_element_name) - - self.assertEqual(3, len(cpu_metrics)) - self.assertEqual(fake_cpu_metric_val, cpu_metrics[0]) - self.assertEqual(fake_cpu_count, cpu_metrics[1]) - self.assertEqual(fake_uptime, cpu_metrics[2]) - - @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2' - '._sum_metric_values_by_defs') - @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2' - '._get_metric_value_instances') - def test_get_vnic_metrics(self, mock_get_instances, mock_get_by_defs): - fake_vm_element_name = "fake_vm_element_name" - fake_vnic_element_name = "fake_vnic_name" - fake_vnic_address = "fake_vnic_address" - fake_vnic_path = "fake_vnic_path" - fake_rx_mb = 1000 - fake_tx_mb = 2000 - - self._utils._lookup_vm = mock.MagicMock() - self._utils._get_vm_resources = mock.MagicMock() - - mock_port = mock.MagicMock() - mock_port.Parent = fake_vnic_path - - mock_vnic = mock.MagicMock() - mock_vnic.path_.return_value = fake_vnic_path - mock_vnic.ElementName = fake_vnic_element_name - mock_vnic.Address = fake_vnic_address - - self._utils._get_vm_resources.side_effect = [[mock_port], [mock_vnic]] - - self._utils._get_metric_def = mock.MagicMock() - - mock_get_by_defs.return_value = [fake_rx_mb, fake_tx_mb] - - vnic_metrics = list(self._utils.get_vnic_metrics(fake_vm_element_name)) - - self.assertEqual(1, len(vnic_metrics)) - self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb']) - self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb']) - self.assertEqual(fake_vnic_element_name, - vnic_metrics[0]['element_name']) - self.assertEqual(fake_vnic_address, vnic_metrics[0]['address']) - - def test_get_disk_metrics(self): - fake_vm_element_name = "fake_vm_element_name" - fake_host_resource = "fake_host_resource" - fake_instance_id = "fake_instance_id" - fake_read_mb = 1000 - fake_write_mb = 2000 - - self._utils._lookup_vm = mock.MagicMock() - - mock_disk = mock.MagicMock() - mock_disk.HostResource = [fake_host_resource] - mock_disk.InstanceID = fake_instance_id - self._utils._get_vm_resources = mock.MagicMock( - return_value=[mock_disk]) - - self._utils._get_metric_def = mock.MagicMock() - - self._utils._get_metric_values = mock.MagicMock() - self._utils._get_metric_values.return_value = [fake_read_mb, - fake_write_mb] - - disk_metrics = list(self._utils.get_disk_metrics(fake_vm_element_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(fake_read_mb, disk_metrics[0]['read_mb']) - self.assertEqual(fake_write_mb, disk_metrics[0]['write_mb']) - self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) - self.assertEqual(fake_host_resource, disk_metrics[0]['host_resource']) - - def test_get_disk_latency(self): - fake_vm_name = mock.sentinel.VM_NAME - fake_instance_id = mock.sentinel.FAKE_INSTANCE_ID - fake_latency = mock.sentinel.FAKE_LATENCY - - self._utils._lookup_vm = mock.MagicMock() - - mock_disk = mock.MagicMock() - mock_disk.InstanceID = fake_instance_id - self._utils._get_vm_resources = mock.MagicMock( - return_value=[mock_disk]) - - self._utils._get_metric_values = mock.MagicMock( - return_value=[fake_latency]) - - disk_metrics = list(self._utils.get_disk_latency_metrics(fake_vm_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(fake_latency, disk_metrics[0]['disk_latency']) - self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) - - def test_get_disk_iops_metrics(self): - fake_vm_name = mock.sentinel.VM_NAME - fake_instance_id = mock.sentinel.FAKE_INSTANCE_ID - fake_iops_count = mock.sentinel.FAKE_IOPS_COUNT - - self._utils._lookup_vm = mock.MagicMock() - - mock_disk = mock.MagicMock() - mock_disk.InstanceID = fake_instance_id - self._utils._get_vm_resources = mock.MagicMock( - return_value=[mock_disk]) - - self._utils._get_metric_values = mock.MagicMock( - return_value=[fake_iops_count]) - - disk_metrics = list(self._utils.get_disk_iops_count(fake_vm_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(fake_iops_count, disk_metrics[0]['iops_count']) - self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) - - def test_get_metric_value_instances(self): - mock_el1 = mock.MagicMock() - mock_associator = mock.MagicMock() - mock_el1.associators.return_value = [mock_associator] - - mock_el2 = mock.MagicMock() - mock_el2.associators.return_value = [] - - returned = self._utils._get_metric_value_instances( - [mock_el1, mock_el2], self._FAKE_RETURN_CLASS) - - self.assertEqual([mock_associator], returned) - - def test_lookup_vm(self): - fake_vm_element_name = "fake_vm_element_name" - fake_vm = "fake_vm" - self._utils._conn.Msvm_ComputerSystem.return_value = [fake_vm] - - vm = self._utils._lookup_vm(fake_vm_element_name) - - self.assertEqual(fake_vm, vm) - - def test_lookup_vm_not_found(self): - fake_vm_element_name = "fake_vm_element_name" - self._utils._conn.Msvm_ComputerSystem.return_value = [] - - self.assertRaises(inspector.InstanceNotFoundException, - self._utils._lookup_vm, fake_vm_element_name) - - def test_lookup_vm_duplicate_found(self): - fake_vm_element_name = "fake_vm_element_name" - fake_vm = "fake_vm" - self._utils._conn.Msvm_ComputerSystem.return_value = [fake_vm, fake_vm] - - self.assertRaises(utilsv2.HyperVException, - self._utils._lookup_vm, fake_vm_element_name) - - def test_get_metric_values(self): - fake_metric_def_id = "fake_metric_def_id" - fake_metric_value = "1000" - - mock_metric = mock.MagicMock() - mock_metric.MetricDefinitionId = fake_metric_def_id - mock_metric.MetricValue = fake_metric_value - - mock_element = mock.MagicMock() - mock_element.associators.return_value = [mock_metric] - - mock_metric_def = mock.MagicMock() - mock_metric_def.Id = fake_metric_def_id - - metric_values = self._utils._get_metric_values(mock_element, - [mock_metric_def]) - - self.assertEqual(1, len(metric_values)) - self.assertEqual(int(fake_metric_value), metric_values[0]) - - def test_get_vm_setting_data(self): - mock_vm_s = mock.MagicMock() - mock_vm_s.VirtualSystemType = self._utils._VIRTUAL_SYSTEM_TYPE_REALIZED - - mock_vm = mock.MagicMock() - mock_vm.associators.return_value = [mock_vm_s] - - vm_setting_data = self._utils._get_vm_setting_data(mock_vm) - - self.assertEqual(mock_vm_s, vm_setting_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/libvirt/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/libvirt/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/libvirt/test_inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/libvirt/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,343 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for libvirt inspector. -""" - -try: - import contextlib2 as contextlib # for Python < 3.3 -except ImportError: - import contextlib - -import fixtures -import mock -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector - - -class TestLibvirtInspection(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspection, self).setUp() - - class VMInstance(object): - id = 'ff58e738-12f4-4c58-acde-77617b68da56' - name = 'instance-00000001' - self.instance = VMInstance - self.inspector = libvirt_inspector.LibvirtInspector() - self.inspector.connection = mock.Mock() - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - self.domain = mock.Mock() - self.addCleanup(mock.patch.stopall) - - def test_inspect_cpus(self): - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - cpu_info = self.inspector.inspect_cpus(self.instance) - self.assertEqual(2, cpu_info.number) - self.assertEqual(999999, cpu_info.time) - - def test_inspect_vnics(self): - dom_xml = """ - - - - - - - -
- - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - """ - - interface_stats = { - 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), - 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), - 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), - } - interfaceStats = interface_stats.__getitem__ - - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, - 'interfaceStats', - side_effect=interfaceStats)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - interfaces = list(self.inspector.inspect_vnics(self.instance)) - - self.assertEqual(3, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vnet0', vnic0.name) - self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) - self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) - self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) - self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) - self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) - self.assertEqual(1, info0.rx_bytes) - self.assertEqual(2, info0.rx_packets) - self.assertEqual(3, info0.tx_bytes) - self.assertEqual(4, info0.tx_packets) - - vnic1, info1 = interfaces[1] - self.assertEqual('vnet1', vnic1.name) - self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) - self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) - self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) - self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) - self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) - self.assertEqual(5, info1.rx_bytes) - self.assertEqual(6, info1.rx_packets) - self.assertEqual(7, info1.tx_bytes) - self.assertEqual(8, info1.tx_packets) - - vnic2, info2 = interfaces[2] - self.assertEqual('vnet2', vnic2.name) - self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) - self.assertIsNone(vnic2.fref) - self.assertEqual(dict(), vnic2.parameters) - self.assertEqual(9, info2.rx_bytes) - self.assertEqual(10, info2.rx_packets) - self.assertEqual(11, info2.tx_bytes) - self.assertEqual(12, info2.tx_packets) - - def test_inspect_vnics_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_vnics - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_disks(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockStats', - return_value=(1, 2, 3, - 4, -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disks(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.read_requests) - self.assertEqual(2, info0.read_bytes) - self.assertEqual(3, info0.write_requests) - self.assertEqual(4, info0.write_bytes) - - def test_inspect_disks_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_disks - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_memory_usage(self): - fake_memory_stats = {'available': 51200, 'unused': 25600} - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value=fake_memory_stats): - memory = self.inspector.inspect_memory_usage( - self.instance) - self.assertEqual(25600 / units.Ki, memory.usage) - - def test_inspect_disk_info(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disk_info(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.capacity) - self.assertEqual(2, info0.allocation) - self.assertEqual(3, info0.physical) - - def test_inspect_memory_usage_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_memory_usage, - self.instance) - - def test_inspect_memory_usage_with_empty_stats(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value={}): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.inspect_memory_usage, - self.instance) - - -class TestLibvirtInspectionWithError(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspectionWithError, self).setUp() - self.inspector = libvirt_inspector.LibvirtInspector() - self.useFixture(fixtures.MonkeyPatch( - 'ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - self._dummy_get_connection)) - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - - @staticmethod - def _dummy_get_connection(*args, **kwargs): - raise Exception('dummy') - - def test_inspect_unknown_error(self): - self.assertRaises(virt_inspector.InspectorException, - self.inspector.inspect_cpus, 'foo') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/vmware/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/vmware/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/vmware/test_inspector.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/vmware/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for VMware vSphere inspector. -""" - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.vmware import inspector as vsphere_inspector - - -class TestVsphereInspection(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False, port=7443) - vsphere_inspector.get_api_session = mock.Mock( - return_value=api_session) - self._inspector = vsphere_inspector.VsphereInspector() - self._inspector._ops = mock.MagicMock() - - super(TestVsphereInspection, self).setUp() - - def test_inspect_memory_usage(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_memory_value = 1024.0 - fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops. - get_perf_counter_id.return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_memory_value - memory_stat = self._inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_cpu_util(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_cpu_util_value = 60 - fake_stat = virt_inspector.CPUUtilStats(util=60) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops.get_perf_counter_id. - return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_cpu_util_value * 100 - cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_vnic_rates(self): - - # construct test data - test_vm_moid = "vm-21" - vnic1 = "vnic-1" - vnic2 = "vnic-2" - counter_name_to_id_map = { - vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, - vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 - } - counter_id_to_stats_map = { - 1: {vnic1: 1, vnic2: 3}, - 2: {vnic1: 2, vnic2: 4}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - result = self._inspector.inspect_vnic_rates(mock.MagicMock()) - - # validate result - expected_stats = { - vnic1: virt_inspector.InterfaceRateStats(1024, 2048), - vnic2: virt_inspector.InterfaceRateStats(3072, 4096) - } - - for vnic, rates_info in result: - self.assertEqual(expected_stats[vnic.name], rates_info) - - def test_inspect_disk_rates(self): - - # construct test data - test_vm_moid = "vm-21" - disk1 = "disk-1" - disk2 = "disk-2" - counter_name_to_id_map = { - vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, - vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, - vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, - vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 - } - counter_id_to_stats_map = { - 1: {disk1: 1, disk2: 2}, - 2: {disk1: 300, disk2: 400}, - 3: {disk1: 5, disk2: 6}, - 4: {disk1: 700}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - - result = self._inspector.inspect_disk_rates(mock.MagicMock()) - - # validate result - expected_stats = { - disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), - disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) - } - - actual_stats = dict((disk.device, rates) for (disk, rates) in result) - self.assertEqual(expected_stats, actual_stats) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/vmware/test_vsphere_operations.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/vmware/test_vsphere_operations.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/vmware/test_vsphere_operations.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/vmware/test_vsphere_operations.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt.vmware import vsphere_operations - - -class VsphereOperationsTest(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False) - api_session._vim = mock.MagicMock() - self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, - 1000) - super(VsphereOperationsTest, self).setUp() - - def test_get_vm_moid(self): - - vm1_moid = "vm-1" - vm2_moid = "vm-2" - vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" - vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" - - def construct_mock_vm_object(vm_moid, vm_instance): - vm_object = mock.MagicMock() - vm_object.obj.value = vm_moid - vm_object.propSet[0].val = vm_instance - return vm_object - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(self._vsphere_ops._max_objects, - options.maxObjects) - self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) - vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) - result = mock.MagicMock() - result.objects.__iter__.return_value = [vm1, vm2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - vim_mock.ContinueRetrievePropertiesEx.return_value = None - - vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) - self.assertEqual(vm1_moid, vm_moid) - - vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) - self.assertEqual(vm2_moid, vm_moid) - - def test_query_vm_property(self): - - vm_moid = "vm-21" - vm_property_name = "runtime.powerState" - vm_property_val = "poweredON" - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vm_moid, specSet[0].obj.value) - self.assertEqual(vm_property_name, specSet[0].pathSet[0]) - - # mock return result - result = mock.MagicMock() - result.objects[0].propSet[0].val = vm_property_val - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - actual_val = self._vsphere_ops.query_vm_property(vm_moid, - vm_property_name) - self.assertEqual(vm_property_val, actual_val) - - def test_get_perf_counter_id(self): - - def construct_mock_counter_info(group_name, counter_name, rollup_type, - counter_id): - counter_info = mock.MagicMock() - counter_info.groupInfo.key = group_name - counter_info.nameInfo.key = counter_name - counter_info.rollupType = rollup_type - counter_info.key = counter_id - return counter_info - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - counter_info1 = construct_mock_counter_info("a", "b", "c", 1) - counter_info2 = construct_mock_counter_info("x", "y", "z", 2) - result = mock.MagicMock() - (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. - return_value) = [counter_info1, counter_info2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") - self.assertEqual(1, counter_id) - - counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") - self.assertEqual(2, counter_id) - - def test_query_vm_stats(self): - - vm_moid = "vm-21" - device1 = "device-1" - device2 = "device-2" - device3 = "device-3" - counter_id = 5 - - def construct_mock_metric_series(device_name, stat_values): - metric_series = mock.MagicMock() - metric_series.value = stat_values - metric_series.id.instance = device_name - return metric_series - - def vim_query_perf_side_effect(perf_manager, querySpec): - # assert inputs - self.assertEqual(vm_moid, querySpec[0].entity.value) - self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) - self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, - querySpec[0].intervalId) - - # mock return result - perf_stats = mock.MagicMock() - perf_stats[0].sampleInfo = ["s1", "s2", "s3"] - perf_stats[0].value.__iter__.return_value = [ - construct_mock_metric_series(None, [111, 222, 333]), - construct_mock_metric_series(device1, [100, 200, 300]), - construct_mock_metric_series(device2, [10, 20, 30]), - construct_mock_metric_series(device3, [1, 2, 3]) - ] - return perf_stats - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect - ops = self._vsphere_ops - - # test aggregate stat - stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) - self.assertEqual(222, stat_val) - - # test per-device(non-aggregate) stats - expected_device_stats = { - device1: 200, - device2: 20, - device3: 2 - } - stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) - self.assertEqual(expected_device_stats, stats) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/xenapi/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/xenapi/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/compute/virt/xenapi/test_inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/compute/virt/xenapi/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for xenapi inspector. -""" - -import mock -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector - - -class TestXenapiInspection(base.BaseTestCase): - - def setUp(self): - api_session = mock.Mock() - xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) - self.inspector = xenapi_inspector.XenapiInspector() - - super(TestXenapiInspection, self).setUp() - - def test_inspect_cpu_util(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.CPUUtilStats(util=40) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '536870912', - 'VCPUs_number': '1', - 'VCPUs_utilisation': {'0': 0.4, } - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_memory_usage(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.MemoryUsageStats(usage=128) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '134217728', - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - memory_stat = self.inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_vnic_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vif_rec = { - 'metrics': 'vif_metrics_ref', - 'uuid': 'vif_uuid', - 'MAC': 'vif_mac', - } - - vif_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2', - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VIFs': - return ['vif_ref'] - elif method == 'VIF.get_record': - return vif_rec - elif method == 'VIF.get_metrics': - return 'vif_metrics_ref' - elif method == 'VIF_metrics.get_record': - return vif_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) - - self.assertEqual(1, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vif_uuid', vnic0.name) - self.assertEqual('vif_mac', vnic0.mac) - self.assertEqual(1024, info0.rx_bytes_rate) - self.assertEqual(2048, info0.tx_bytes_rate) - - def test_inspect_disk_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vbd_rec = { - 'device': 'xvdd' - } - - vbd_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2' - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VBDs': - return ['vbd_ref'] - elif method == 'VBD.get_record': - return vbd_rec - elif method == 'VBD.get_metrics': - return 'vbd_metrics_ref' - elif method == 'VBD_metrics.get_record': - return vbd_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - disks = list(self.inspector.inspect_disk_rates(fake_instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('xvdd', disk0.device) - self.assertEqual(1024, info0.read_bytes_rate) - self.assertEqual(2048, info0.write_bytes_rate) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/database/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/database/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/database/test_notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/database/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -# -# Copyright 2015 Hewlett Packard -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.database import notifications -from ceilometer import sample - -NOW = timeutils.utcnow().isoformat() - -TENANT_ID = u'76538754af6548f5b53cf9af2d35d582' -USER_ID = u'b70ece400e4e45c187168c40fa42ff7a' -INSTANCE_STATE = u'active' -INSTANCE_TYPE = u'm1.rd-tiny' -RESOURCE_ID = u'a8b55824-e731-40a3-a32d-de81474d74b2' -SERVICE_ID = u'2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b' -NOVA_INSTANCE_ID = u'1cf6ce1b-708b-4e6a-8ecf-2b60c8ccd435' -PUBLISHER_ID = u'trove' - - -def _trove_notification_for(operation): - return { - u'event_type': '%s.instance.%s' % (notifications.SERVICE, - operation), - u'priority': u'INFO', - u'timestamp': NOW, - u'publisher_id': PUBLISHER_ID, - u'message_id': u'67ba0a2a-32bd-4cdf-9bfb-ef9cefcd0f63', - u'payload': { - u'state_description': INSTANCE_STATE, - u'user_id': USER_ID, - u'audit_period_beginning': u'2015-07-10T20:05:29.870091Z', - u'tenant_id': TENANT_ID, - u'created_at': u'2015-06-29T20:52:12.000000', - u'instance_type_id': u'7', - u'launched_at': u'2015-06-29T20:52:12.000000', - u'instance_id': RESOURCE_ID, - u'instance_type': INSTANCE_TYPE, - u'state': INSTANCE_STATE, - u'service_id': SERVICE_ID, - u'nova_instance_id': NOVA_INSTANCE_ID, - u'display_name': u'test', - u'instance_name': u'test', - u'region': u'LOCAL_DEV', - u'audit_period_ending': u'2015-07-10T21:05:29.870091Z' - }, - - } - - -class TestNotification(base.BaseTestCase): - def _verify_common_sample(self, actual, operation): - self.assertIsNotNone(actual) - self.assertEqual('%s.instance.%s' % (notifications.SERVICE, operation), - actual.name) - self.assertEqual(NOW, actual.timestamp) - self.assertEqual(sample.TYPE_CUMULATIVE, actual.type) - self.assertEqual(TENANT_ID, actual.project_id) - self.assertEqual(RESOURCE_ID, actual.resource_id) - self.assertEqual(USER_ID, actual.user_id) - self.assertEqual(3600, actual.volume) - self.assertEqual('s', actual.unit) - - metadata = actual.resource_metadata - self.assertEqual(PUBLISHER_ID, metadata.get('host')) - - def _test_operation(self, operation): - notif = _trove_notification_for(operation) - handler = notifications.InstanceExists(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(1, len(data)) - self._verify_common_sample(data[0], operation) - - def test_exists(self): - self._test_operation('exists') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/data_processing/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/data_processing/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/data_processing/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/data_processing/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_log import log -from oslotest import base - -from ceilometer.data_processing import notifications -from ceilometer import sample - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - -TENANT_ID = u'4c35985848bf4419b3f3d52c22e5792d' -CLUSTER_NAME = u'AS1-ASGroup-53sqbo7sor7i' -CLUSTER_ID = u'cb4a6fd1-1f5d-4002-ae91-9b91573cfb03' -USER_NAME = u'demo' -USER_ID = u'2e61f25ec63a4f6c954a6245421448a4' -CLUSTER_STATUS = u'Active' -PROJECT_ID = TENANT_ID -RESOURCE_ID = CLUSTER_ID -PUBLISHER_ID = u'data_processing.node-n5x66lxdy67d' - - -CONF = cfg.CONF -log.register_options(CONF) -CONF.set_override('use_stderr', True) - -LOG = log.getLogger(__name__) - - -def _dp_notification_for(operation): - - return { - u'event_type': '%s.cluster.%s' % (notifications.SERVICE, - operation), - u'_context_roles': [ - u'Member', - ], - u'_context_auth_uri': u'http://0.1.0.1:1010/v2.0', - u'timestamp': NOW, - u'_context_tenant_id': TENANT_ID, - u'payload': { - u'cluster_id': CLUSTER_ID, - u'cluster_name': CLUSTER_NAME, - u'cluster_status': CLUSTER_STATUS, - u'project_id': TENANT_ID, - u'user_id': USER_ID, - }, - u'_context_username': USER_NAME, - u'_context_token': u'MIISAwYJKoZIhvcNAQcCoII...', - u'_context_user_id': USER_ID, - u'_context_tenant_name': USER_NAME, - u'priority': u'INFO', - u'_context_is_admin': False, - u'publisher_id': PUBLISHER_ID, - u'message_id': u'ef921faa-7f7b-4854-8b86-a424ab93c96e', - } - - -class TestNotification(base.BaseTestCase): - def _verify_common_sample(self, actual, operation): - self.assertIsNotNone(actual) - self.assertEqual('cluster.%s' % operation, actual.name) - self.assertEqual(NOW, actual.timestamp) - self.assertEqual(sample.TYPE_DELTA, actual.type) - self.assertEqual(PROJECT_ID, actual.project_id) - self.assertEqual(RESOURCE_ID, actual.resource_id) - self.assertEqual(USER_ID, actual.user_id) - metadata = actual.resource_metadata - self.assertEqual(PUBLISHER_ID, metadata.get('host')) - - def _test_operation(self, operation): - notif = _dp_notification_for(operation) - handler = notifications.DataProcessing(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(1, len(data)) - self._verify_common_sample(data[0], operation) - - def test_create(self): - self._test_operation('create') - - def test_update(self): - self._test_operation('update') - - def test_delete(self): - self._test_operation('delete') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_db.py ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_db.py --- ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_db.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_db.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.dispatcher import database -from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils - - -class TestDispatcherDB(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherDB, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('connection', 'sqlite://', group='database') - self.dispatcher = database.DatabaseDispatcher(self.CONF) - self.ctx = None - - def test_event_conn(self): - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}).serialize() - with mock.patch.object(self.dispatcher.event_conn, - 'record_events') as record_events: - self.dispatcher.record_events(event) - self.assertTrue(record_events.called) - - def test_valid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.record_metering_data(msg) - - record_metering_data.assert_called_once_with(msg) - - def test_invalid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'message_signature': 'invalid-signature'} - - class ErrorConnection(object): - - called = False - - def record_metering_data(self, data): - self.called = True - - self.dispatcher._meter_conn = ErrorConnection() - - self.dispatcher.record_metering_data(msg) - - if self.dispatcher.meter_conn.called: - self.fail('Should not have called the storage connection') - - def test_timestamp_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-07-02T13:53:40Z', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) - - def test_timestamp_tzinfo_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-09-30T15:31:50.262-08:00', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, - 31, 50, 262000) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_file.py ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_file.py --- ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_file.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_file.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging.handlers -import os -import tempfile - -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.dispatcher import file -from ceilometer.publisher import utils - - -class TestDispatcherFile(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherFile, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_file_dispatcher_with_all_config(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 50 - self.CONF.dispatcher_file.backup_count = 5 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist and not produce errors. - dispatcher.record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_path_only(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 0 - self.CONF.dispatcher_file.backup_count = 0 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.FileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist and not produce errors. - dispatcher.record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_no_path(self): - self.CONF.dispatcher_file.file_path = None - dispatcher = file.FileDispatcher(self.CONF) - - # The log should be None - self.assertIsNone(dispatcher.log) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_gnocchi.py ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_gnocchi.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_gnocchi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,427 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Authors: Mehdi Abaakouk -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import uuid - -import mock -from oslo_config import fixture as config_fixture -from oslo_utils import fileutils -from oslotest import mockpatch -import requests -import six -import six.moves.urllib.parse as urlparse -import tempfile -import testscenarios -import yaml - -from ceilometer.dispatcher import gnocchi -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - -load_tests = testscenarios.load_tests_apply_scenarios - - -class json_matcher(object): - def __init__(self, ref): - self.ref = ref - - def __eq__(self, obj): - return self.ref == json.loads(obj) - - def __repr__(self): - return "" % self.ref - - -class DispatcherTest(base.BaseTestCase): - - def setUp(self): - super(DispatcherTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service([]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - self.resource_id = str(uuid.uuid4()) - self.samples = [{ - 'counter_name': 'disk.root.size', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref_url': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - }}, - { - 'counter_name': 'disk.root.size', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2014-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref_url': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }] - - ks_client = mock.Mock(auth_token='fake_token') - ks_client.tenants.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.conf.conf.dispatcher_gnocchi.filter_service_activity = True - - def test_config_load(self): - self.conf.config(filter_service_activity=False, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - names = [rd.cfg['resource_type'] for rd in d.resources_definition] - self.assertIn('instance', names) - self.assertIn('volume', names) - - def test_broken_config_load(self): - contents = [("---\n" - "resources:\n" - " - resource_type: foobar\n"), - ("---\n" - "resources:\n" - " - resource_type: 0\n"), - ("---\n" - "resources:\n" - " - sample_types: ['foo', 'bar']\n"), - ("---\n" - "resources:\n" - " - sample_types: foobar\n" - " - resource_type: foobar\n"), - ] - - for content in contents: - if six.PY3: - content = content.encode('utf-8') - - temp = fileutils.write_to_tempfile(content=content, - prefix='gnocchi_resources', - suffix='.yaml') - self.addCleanup(os.remove, temp) - self.conf.config(filter_service_activity=False, - resources_definition_file=temp, - group='dispatcher_gnocchi') - self.assertRaises(gnocchi.ResourcesDefinitionException, - gnocchi.GnocchiDispatcher, self.conf.conf) - - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '._process_samples') - def _do_test_activity_filter(self, expected_samples, fake_process_samples): - d = gnocchi.GnocchiDispatcher(self.conf.conf) - d.record_metering_data(self.samples) - - fake_process_samples.assert_called_with( - mock.ANY, self.resource_id, 'disk.root.size', - expected_samples, True, - ) - - def test_archive_policy_map_config(self): - archive_policy_map = yaml.dump({ - 'foo.*': 'low' - }) - archive_policy_cfg_file = tempfile.NamedTemporaryFile( - mode='w+b', prefix="foo", suffix=".yaml") - archive_policy_cfg_file.write(archive_policy_map.encode()) - archive_policy_cfg_file.seek(0) - self.conf.conf.dispatcher_gnocchi.archive_policy_file = ( - archive_policy_cfg_file.name) - d = gnocchi.GnocchiDispatcher(self.conf.conf) - legacy = d._load_archive_policy(self.conf.conf) - self.assertEqual(legacy.get('foo.disk.rate'), "low") - archive_policy_cfg_file.close() - - def test_activity_filter_match_project_id(self): - self.samples[0]['project_id'] = ( - 'a2d42c23-d518-46b6-96ab-3fba2e146859') - self._do_test_activity_filter([self.samples[1]]) - - def test_activity_filter_match_swift_event(self): - self.samples[0]['counter_name'] = 'storage.api.request' - self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' - self._do_test_activity_filter([self.samples[1]]) - - def test_activity_filter_nomatch(self): - self._do_test_activity_filter(self.samples) - - -class MockResponse(mock.NonCallableMock): - def __init__(self, code): - text = {500: 'Internal Server Error', - 404: 'Not Found', - 204: 'Created', - 409: 'Conflict', - }.get(code) - super(MockResponse, self).__init__(spec=requests.Response, - status_code=code, - text=text) - - -class DispatcherWorkflowTest(base.BaseTestCase, - testscenarios.TestWithScenarios): - - sample_scenarios = [ - ('disk.root.size', dict( - sample={ - 'counter_name': 'disk.root.size', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'host': 'foo', - 'image_ref_url': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - 'host': 'foo', - 'image_ref': 'imageref!', - 'flavor_id': 1234, - 'display_name': 'myinstance', - }, - metric_names=[ - 'instance', 'disk.root.size', 'disk.ephemeral.size', - 'memory', 'vcpus', 'memory.usage', 'cpu', 'cpu_util'], - resource_type='instance')), - ('hardware.ipmi.node.power', dict( - sample={ - 'counter_name': 'hardware.ipmi.node.power', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'useless': 'not_used', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - }, - metric_names=[ - 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', - 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', - 'hardware.ipmi.node.voltage', - ], - resource_type='ipmi')), - ] - - worflow_scenarios = [ - ('normal_workflow', dict(measure=204, post_resource=None, metric=None, - measure_retry=None, patch_resource=204)), - ('new_resource', dict(measure=404, post_resource=204, metric=None, - measure_retry=204, patch_resource=None)), - ('new_resource_fail', dict(measure=404, post_resource=500, metric=None, - measure_retry=None, patch_resource=None)), - ('resource_update_fail', dict(measure=204, post_resource=None, - metric=None, measure_retry=None, - patch_resource=500)), - ('new_metric', dict(measure=404, post_resource=409, metric=204, - measure_retry=204, patch_resource=204)), - ('new_metric_fail', dict(measure=404, post_resource=409, metric=500, - measure_retry=None, patch_resource=None)), - ('retry_fail', dict(measure=404, post_resource=409, metric=409, - measure_retry=500, patch_resource=None)), - ('measure_fail', dict(measure=500, post_resource=None, metric=None, - measure_retry=None, patch_resource=None)), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, - cls.worflow_scenarios) - - def setUp(self): - super(DispatcherWorkflowTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - # Set this explicitly to avoid conflicts with any existing - # configuration. - self.conf.config(url='http://localhost:8041', - group='dispatcher_gnocchi') - ks_client = mock.Mock(auth_token='fake_token') - ks_client.tenants.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - - ceilometer_service.prepare_service([]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - - self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) - self.sample['resource_id'] = str(uuid.uuid4()) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - @mock.patch('ceilometer.dispatcher.gnocchi.requests') - def test_workflow(self, fake_requests, logger): - base_url = self.dispatcher.conf.dispatcher_gnocchi.url - url_params = { - 'url': urlparse.urljoin(base_url, '/v1/resource'), - 'resource_id': self.sample['resource_id'], - 'resource_type': self.resource_type, - 'metric_name': self.sample['counter_name'] - } - headers = {'Content-Type': 'application/json', - 'X-Auth-Token': 'fake_token'} - - expected_calls = [] - patch_responses = [] - post_responses = [] - - # This is needed to mock Exception in py3 - fake_requests.ConnectionError = requests.ConnectionError - - expected_calls.extend([ - mock.call.session(), - mock.call.adapters.HTTPAdapter(pool_block=True), - mock.call.session().mount('http://', mock.ANY), - mock.call.session().mount('https://', mock.ANY), - mock.call.session().post( - "%(url)s/%(resource_type)s/%(resource_id)s/" - "metric/%(metric_name)s/measures" % url_params, - headers=headers, - data=json_matcher(self.measures_attributes)) - ]) - post_responses.append(MockResponse(self.measure)) - - if self.post_resource: - attributes = self.postable_attributes.copy() - attributes.update(self.patchable_attributes) - attributes['id'] = self.sample['resource_id'] - attributes['metrics'] = dict((metric_name, - {'archive_policy_name': 'low'}) - for metric_name in self.metric_names) - expected_calls.append(mock.call.session().post( - "%(url)s/%(resource_type)s" % url_params, - headers=headers, - data=json_matcher(attributes)), - ) - post_responses.append(MockResponse(self.post_resource)) - - if self.metric: - expected_calls.append(mock.call.session().post( - "%(url)s/%(resource_type)s/%(resource_id)s/metric" - % url_params, - headers=headers, - data=json_matcher({self.sample['counter_name']: - {'archive_policy_name': 'low'}}) - )) - post_responses.append(MockResponse(self.metric)) - - if self.measure_retry: - expected_calls.append(mock.call.session().post( - "%(url)s/%(resource_type)s/%(resource_id)s/" - "metric/%(metric_name)s/measures" % url_params, - headers=headers, - data=json_matcher(self.measures_attributes)) - ) - post_responses.append(MockResponse(self.measure_retry)) - - if self.patch_resource and self.patchable_attributes: - expected_calls.append(mock.call.session().patch( - "%(url)s/%(resource_type)s/%(resource_id)s" % url_params, - headers=headers, - data=json_matcher(self.patchable_attributes)), - ) - patch_responses.append(MockResponse(self.patch_resource)) - - s = fake_requests.session.return_value - s.patch.side_effect = patch_responses - s.post.side_effect = post_responses - - self.dispatcher.record_metering_data([self.sample]) - - # Check that the last log message is the expected one - if self.measure == 500 or self.measure_retry == 500: - logger.error.assert_called_with( - "Fail to post measure on metric %s of resource %s " - "with status: %d: Internal Server Error" % - (self.sample['counter_name'], - self.sample['resource_id'], - 500)) - - elif self.post_resource == 500 or (self.patch_resource == 500 and - self.patchable_attributes): - logger.error.assert_called_with( - "Resource %s %s failed with status: " - "%d: Internal Server Error" % - (self.sample['resource_id'], - 'update' if self.patch_resource else 'creation', - 500)) - elif self.metric == 500: - logger.error.assert_called_with( - "Fail to create metric %s of resource %s " - "with status: %d: Internal Server Error" % - (self.sample['counter_name'], - self.sample['resource_id'], - 500)) - elif self.patch_resource == 204 and self.patchable_attributes: - logger.debug.assert_called_with( - 'Resource %s updated', self.sample['resource_id']) - else: - logger.debug.assert_called_with( - "Measure posted on metric %s of resource %s", - self.sample['counter_name'], - self.sample['resource_id']) - - self.assertEqual(expected_calls, fake_requests.mock_calls) - - -DispatcherWorkflowTest.generate_scenarios() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_http.py ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_http.py --- ceilometer-5.0.0~b2/ceilometer/tests/dispatcher/test_http.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/dispatcher/test_http.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -import requests - -from ceilometer.dispatcher import http -from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils - - -class TestDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - def test_http_dispatcher_config_options(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.timeout = 2 - self.CONF.dispatcher_http.cadf_only = True - dispatcher = http.HttpDispatcher(self.CONF) - - self.assertEqual('fake', dispatcher.target) - self.assertEqual(2, dispatcher.timeout) - self.assertEqual(True, dispatcher.cadf_only) - - def test_http_dispatcher_with_no_target(self): - self.CONF.dispatcher_http.target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - # The target should be None - self.assertEqual('', dispatcher.target) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_metering_data(self.msg) - - # Since the target is not set, no http post should occur, thus the - # call_count should be zero. - self.assertEqual(0, post.call_count) - - def test_http_dispatcher_with_no_metadata(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.cadf_only = True - dispatcher = http.HttpDispatcher(self.CONF) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_metering_data(self.msg) - - self.assertEqual(0, post.call_count) - - def test_http_dispatcher_without_cadf_event(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.cadf_only = True - dispatcher = http.HttpDispatcher(self.CONF) - - self.msg['resource_metadata'] = {'request': {'NONE_CADF_EVENT': { - 'q1': 'v1', 'q2': 'v2'}, }, } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_metering_data(self.msg) - - # Since the meter does not have metadata or CADF_EVENT, the method - # call count should be zero - self.assertEqual(0, post.call_count) - - def test_http_dispatcher_with_cadf_event(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.cadf_only = True - dispatcher = http.HttpDispatcher(self.CONF) - - self.msg['resource_metadata'] = {'request': {'CADF_EVENT': { - 'q1': 'v1', 'q2': 'v2'}, }, } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_metering_data(self.msg) - - self.assertEqual(1, post.call_count) - - def test_http_dispatcher_with_none_cadf_event(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.cadf_only = False - dispatcher = http.HttpDispatcher(self.CONF) - - self.msg['resource_metadata'] = {'any': {'thing1': 'v1', - 'thing2': 'v2', }, } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_metering_data(self.msg) - - self.assertEqual(1, post.call_count) - - -class TestEventDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestEventDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_http_dispatcher(self): - self.CONF.dispatcher_http.event_target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}).serialize() - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual(1, post.call_count) - - def test_http_dispatcher_bad(self): - self.CONF.dispatcher_http.event_target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}).serialize() - - with mock.patch('ceilometer.dispatcher.http.LOG', - mock.MagicMock()) as LOG: - dispatcher.record_events(event) - self.assertTrue(LOG.exception.called) - - def test_http_dispatcher_share_target(self): - self.CONF.dispatcher_http.target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}).serialize() - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual('fake', post.call_args[0][0]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/dns/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/dns/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/dns/test_notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/dns/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -# -# Copyright (c) 2015 Hewlett Packard Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.dns import notifications -from ceilometer import sample - -NOW = timeutils.utcnow().isoformat() - -TENANT_ID = u'76538754af6548f5b53cf9af2d35d582' -USER_ID = u'b70ece400e4e45c187168c40fa42ff7a' -DOMAIN_STATUS = u'ACTIVE' -RESOURCE_ID = u'a8b55824-e731-40a3-a32d-de81474d74b2' -PUBLISHER_ID = u'central.ubuntu' -POOL_ID = u'794ccc2c-d751-44fe-b57f-8894c9f5c842' - - -def _dns_notification_for(operation): - - return { - u'event_type': '%s.domain.%s' % (notifications.SERVICE, - operation), - u'_context_roles': [u'admin'], - u'timestamp': NOW, - u'_context_tenant': TENANT_ID, - u'payload': { - u'status': DOMAIN_STATUS, - u'retry': 600, - u'description': None, - u'expire': 86400, - u'deleted': u'0', - u'tenant_id': TENANT_ID, - u'created_at': u'2015-07-10T20:05:29.870091Z', - u'updated_at': None, - u'refresh': 3600, - u'pool_id': POOL_ID, - u'email': u'admin@hpcloud.com', - u'minimum': 3600, - u'parent_domain_id': None, - u'version': 1, - u'ttl': 3600, - u'action': operation.upper(), - u'serial': 1426295326, - u'deleted_at': None, - u'id': RESOURCE_ID, - u'name': u'paas.hpcloud.com.', - u'audit_period_beginning': u'2015-07-10T20:05:29.870091Z', - u'audit_period_ending': u'2015-07-10T21:05:29.870091Z' - }, - u'_context_user': USER_ID, - u'_context_auth_token': u'b95d4fc3bb2e4a5487cad06af65ffcfc', - u'_context_tenant': TENANT_ID, - u'priority': u'INFO', - u'_context_is_admin': False, - u'publisher_id': PUBLISHER_ID, - u'message_id': u'67ba0a2a-32bd-4cdf-9bfb-ef9cefcd0f63', - } - - -class TestNotification(base.BaseTestCase): - def _verify_common_sample(self, actual, operation): - self.assertIsNotNone(actual) - self.assertEqual('%s.domain.%s' % (notifications.SERVICE, operation), - actual.name) - self.assertEqual(NOW, actual.timestamp) - self.assertEqual(sample.TYPE_CUMULATIVE, actual.type) - self.assertEqual(TENANT_ID, actual.project_id) - self.assertEqual(RESOURCE_ID, actual.resource_id) - self.assertEqual(USER_ID, actual.user_id) - metadata = actual.resource_metadata - self.assertEqual(PUBLISHER_ID, metadata.get('host')) - self.assertEqual(operation.upper(), metadata.get('action')) - self.assertEqual(DOMAIN_STATUS, metadata.get('status')) - - self.assertEqual(3600, actual.volume) - self.assertEqual('s', actual.unit) - - def _test_operation(self, operation): - notif = _dns_notification_for(operation) - handler = notifications.DomainExists(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(1, len(data)) - self._verify_common_sample(data[0], operation) - - def test_exists(self): - self._test_operation('exists') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/energy/test_kwapi.py ceilometer-5.0.0~b3/ceilometer/tests/energy/test_kwapi.py --- ceilometer-5.0.0~b2/ceilometer/tests/energy/test_kwapi.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/energy/test_kwapi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from keystoneclient import exceptions -import mock -from oslo_context import context -from oslotest import base -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.energy import kwapi - - -PROBE_DICT = { - "probes": { - "A": { - "timestamp": 1357730232.68754, - "w": 107.3, - "kwh": 0.001058255421506034 - }, - "B": { - "timestamp": 1357730232.048158, - "w": 15.0, - "kwh": 0.029019045026169896 - }, - "C": { - "timestamp": 1357730232.223375, - "w": 95.0, - "kwh": 0.17361822634312918 - } - } -} - -ENDPOINT = 'end://point' - - -class TestManager(manager.AgentManager): - - @mock.patch('keystoneclient.v2_0.client', mock.MagicMock()) - def __init__(self): - super(TestManager, self).__init__() - self.keystone = mock.Mock() - - -class _BaseTestCase(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestCase, self).setUp() - self.context = context.get_admin_context() - self.manager = TestManager() - - -class TestKwapi(_BaseTestCase): - - @staticmethod - def fake_get_kwapi_client(ksclient, endpoint): - raise exceptions.EndpointNotFound("fake keystone exception") - - def test_endpoint_not_exist(self): - with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', - side_effect=self.fake_get_kwapi_client): - pollster = kwapi.EnergyPollster() - samples = list(pollster.get_samples(self.manager, {}, - [ENDPOINT])) - - self.assertEqual(0, len(samples)) - - -class TestEnergyPollster(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - unit = 'kwh' - - def setUp(self): - super(TestEnergyPollster, self).setUp() - self.useFixture(mockpatch.PatchObject( - kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) - - @staticmethod - def fake_iter_probes(ksclient, cache, endpoint): - probes = PROBE_DICT['probes'] - for key, value in six.iteritems(probes): - probe_dict = value - probe_dict['id'] = key - yield probe_dict - - def test_default_discovery(self): - pollster = kwapi.EnergyPollster() - self.assertEqual('endpoint:energy', pollster.default_discovery) - - def test_sample(self): - cache = {} - samples = list(self.pollster_cls().get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(len(PROBE_DICT['probes']), len(samples)) - samples_by_name = dict((s.resource_id, s) for s in samples) - for name, probe in PROBE_DICT['probes'].items(): - sample = samples_by_name[name] - expected = datetime.datetime.fromtimestamp( - probe['timestamp'] - ).isoformat() - self.assertEqual(expected, sample.timestamp) - self.assertEqual(probe[self.unit], sample.volume) - - -class TestPowerPollster(TestEnergyPollster): - pollster_cls = kwapi.PowerPollster - unit = 'w' - - -class TestEnergyPollsterCache(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - - def test_get_samples_cached(self): - probe = {'id': 'A'} - probe.update(PROBE_DICT['probes']['A']) - cache = { - '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], - } - self.manager.keystone = mock.Mock() - pollster = self.pollster_cls() - with mock.patch.object(pollster, '_get_probes') as do_not_call: - do_not_call.side_effect = AssertionError('should not be called') - samples = list(pollster.get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(1, len(samples)) - - -class TestPowerPollsterCache(TestEnergyPollsterCache): - pollster_cls = kwapi.PowerPollster diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/event/test_converter.py ceilometer-5.0.0~b3/ceilometer/tests/event/test_converter.py --- ceilometer-5.0.0~b2/ceilometer/tests/event/test_converter.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/event/test_converter.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,777 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import jsonpath_rw -import mock -from oslo_config import fixture as fixture_config -import six - -from ceilometer.event import converter -from ceilometer.event.storage import models -from ceilometer.tests import base - - -class ConverterBase(base.BaseTestCase): - @staticmethod - def _create_test_notification(event_type, message_id, **kw): - return dict(event_type=event_type, - message_id=message_id, - priority="INFO", - publisher_id="compute.host-1-2-3", - timestamp="2013-08-08 21:06:37.803826", - payload=kw, - ) - - def assertIsValidEvent(self, event, notification): - self.assertIsNot( - None, event, - "Notification dropped unexpectedly:" - " %s" % str(notification)) - self.assertIsInstance(event, models.Event) - - def assertIsNotValidEvent(self, event, notification): - self.assertIs( - None, event, - "Notification NOT dropped when expected to be dropped:" - " %s" % str(notification)) - - def assertHasTrait(self, event, name, value=None, dtype=None): - traits = [trait for trait in event.traits if trait.name == name] - self.assertTrue( - len(traits) > 0, - "Trait %s not found in event %s" % (name, event)) - trait = traits[0] - if value is not None: - self.assertEqual(value, trait.value) - if dtype is not None: - self.assertEqual(dtype, trait.dtype) - if dtype == models.Trait.INT_TYPE: - self.assertIsInstance(trait.value, int) - elif dtype == models.Trait.FLOAT_TYPE: - self.assertIsInstance(trait.value, float) - elif dtype == models.Trait.DATETIME_TYPE: - self.assertIsInstance(trait.value, datetime.datetime) - elif dtype == models.Trait.TEXT_TYPE: - self.assertIsInstance(trait.value, six.string_types) - - def assertDoesNotHaveTrait(self, event, name): - traits = [trait for trait in event.traits if trait.name == name] - self.assertEqual( - len(traits), 0, - "Extra Trait %s found in event %s" % (name, event)) - - def assertHasDefaultTraits(self, event): - text = models.Trait.TEXT_TYPE - self.assertHasTrait(event, 'service', dtype=text) - - def _cmp_tree(self, this, other): - if hasattr(this, 'right') and hasattr(other, 'right'): - return (self._cmp_tree(this.right, other.right) and - self._cmp_tree(this.left, other.left)) - if not hasattr(this, 'right') and not hasattr(other, 'right'): - return this == other - return False - - def assertPathsEqual(self, path1, path2): - self.assertTrue(self._cmp_tree(path1, path2), - 'JSONPaths not equivalent %s %s' % (path1, path2)) - - -class TestTraitDefinition(ConverterBase): - - def setUp(self): - super(TestTraitDefinition, self).setUp() - self.n1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_uuid="uuid-for-instance-0001", - instance_id="id-for-instance-0001", - instance_uuid2=None, - instance_id2=None, - host='host-1-2-3', - bogus_date='', - image_meta=dict( - disk_gb='20', - thing='whatzit'), - foobar=50) - - self.ext1 = mock.MagicMock(name='mock_test_plugin') - self.test_plugin_class = self.ext1.plugin - self.test_plugin = self.test_plugin_class() - self.test_plugin.trait_value.return_value = 'foobar' - self.ext1.reset_mock() - - self.ext2 = mock.MagicMock(name='mock_nothing_plugin') - self.nothing_plugin_class = self.ext2.plugin - self.nothing_plugin = self.nothing_plugin_class() - self.nothing_plugin.trait_value.return_value = None - self.ext2.reset_mock() - - self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) - - def test_to_trait_with_plugin(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_value.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_null_match_with_plugin(self): - cfg = dict(type='text', - fields=['payload.nothere', 'payload.bogus'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_value.assert_called_once_with([]) - - def test_to_trait_with_plugin_null(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='nothing')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - self.nothing_plugin_class.assert_called_once_with() - self.nothing_plugin.trait_value.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_with_plugin_with_parameters(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test', parameters=dict(a=1, b='foo'))) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with(a=1, b='foo') - self.test_plugin.trait_value.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait(self): - cfg = dict(type='text', fields='payload.instance_id') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='int', fields='payload.image_meta.disk_gb') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.INT_TYPE, t.dtype) - self.assertEqual(20, t.value) - - def test_to_trait_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='text', fields=['payload.instance_uuid', - 'payload.instance_id']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_multiple_different_nesting(self): - cfg = dict(type='int', fields=['payload.foobar', - 'payload.image_meta.disk_gb']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(50, t.value) - - cfg = dict(type='int', fields=['payload.image_meta.disk_gb', - 'payload.foobar']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(20, t.value) - - def test_to_trait_some_null_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id2', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_some_missing_multiple(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_missing(self): - cfg = dict(type='text', fields='payload.not_here_boss') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_null(self): - cfg = dict(type='text', fields='payload.instance_id2') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_empty_nontext(self): - cfg = dict(type='datetime', fields='payload.bogus_date') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_multiple_null_missing(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_id2']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_missing_fields_config(self): - self.assertRaises(converter.EventDefinitionException, - converter.TraitDefinition, - 'bogus_trait', - dict(), - self.fake_plugin_mgr) - - def test_string_fields_config(self): - cfg = dict(fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual(t.fields, jsonpath_rw.parse('payload.test')) - - def test_list_fields_config(self): - cfg = dict(fields=['payload.test', 'payload.other']) - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual( - t.fields, - jsonpath_rw.parse('(payload.test)|(payload.other)')) - - def test_invalid_path_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.bogus(') - self.assertRaises(converter.EventDefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - def test_invalid_plugin_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(bogus="true")) - self.assertRaises(converter.EventDefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_unknown_plugin(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(name='bogus')) - self.assertRaises(converter.EventDefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_type_config(self): - cfg = dict(type='text', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) - - cfg = dict(type='int', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.INT_TYPE, t.trait_type) - - cfg = dict(type='float', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) - - cfg = dict(type='datetime', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) - - def test_invalid_type_config(self): - # test invalid jsonpath... - cfg = dict(type='bogus', fields='payload.test') - self.assertRaises(converter.EventDefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - -class TestEventDefinition(ConverterBase): - - def setUp(self): - super(TestEventDefinition, self).setUp() - - self.traits_cfg = { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - } - - self.test_notification1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - - self.test_notification2 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0002", - instance_id="uuid-for-instance-0002") - - self.test_notification3 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0003", - instance_id="uuid-for-instance-0003", - host=None) - self.fake_plugin_mgr = {} - - def test_to_event(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification1) - self.assertEqual('test.thing', e.event_type) - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), - e.generated) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0001', - dtype=dtype) - - def test_to_event_missing_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification2) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0002', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_to_event_null_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification3) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0003', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_bogus_cfg_no_traits(self): - bogus = dict(event_type='test.foo') - self.assertRaises(converter.EventDefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_bogus_cfg_no_type(self): - bogus = dict(traits=self.traits_cfg) - self.assertRaises(converter.EventDefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_included_type_string(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('test.thing', edef._included_types[0]) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_included_type_list(self): - cfg = dict(event_type=['test.thing', 'other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(2, len(edef._included_types)) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertTrue(edef.included_type('other.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_excluded_type_string(self): - cfg = dict(event_type='!test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('*', edef._included_types[0]) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertEqual(1, len(edef._excluded_types)) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.included_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_excluded_type_list(self): - cfg = dict(event_type=['!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_mixed_type_list(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.whatzit')) - self.assertTrue(edef.match_type('random.thing')) - - def test_catchall(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - cfg = dict(event_type=['*', 'foo'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_extract_when(self, mock_utcnow): - now = datetime.datetime.utcnow() - modified = now + datetime.timedelta(minutes=1) - mock_utcnow.return_value = now - - body = {"timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - body = {"_context_timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - then = now + datetime.timedelta(hours=1) - body = {"timestamp": str(modified), "_context_timestamp": str(then)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - when = converter.EventDefinition._extract_when({}) - self.assertTimestampEqual(now, when) - - def test_default_traits(self): - cfg = dict(event_type='test.thing', traits={}) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), - len(edef.traits)) - - def test_traits(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertIn('host', traits) - self.assertIn('instance_id', traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, - len(edef.traits)) - - -class TestNotificationConverter(ConverterBase): - - def setUp(self): - super(TestNotificationConverter, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.valid_event_def1 = [{ - 'event_type': 'compute.instance.create.*', - 'traits': { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - }, - }] - - self.test_notification1 = self._create_test_notification( - "compute.instance.create.start", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - self.test_notification2 = self._create_test_notification( - "bogus.notification.from.mars", - "uuid-for-notif-0002", - weird='true', - host='cydonia') - self.fake_plugin_mgr = {} - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_converter_missing_keys(self, mock_utcnow): - # test a malformed notification - now = datetime.datetime.utcnow() - mock_utcnow.return_value = now - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - message = {'event_type': "foo", - 'message_id': "abc", - 'publisher_id': "1"} - e = c.to_event(message) - self.assertIsValidEvent(e, message) - self.assertEqual(1, len(e.traits)) - self.assertEqual("foo", e.event_type) - self.assertEqual(now, e.generated) - - def test_converter_with_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(2, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertDoesNotHaveTrait(e, 'instance_id') - self.assertDoesNotHaveTrait(e, 'host') - - def test_converter_without_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - def test_converter_empty_cfg_with_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - def test_converter_empty_cfg_without_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(0, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsNotValidEvent(e, self.test_notification1) - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - @staticmethod - def _convert_message(convert, level): - message = {'priority': level, 'event_type': "foo", - 'message_id': "abc", 'publisher_id': "1"} - return convert.to_event(message) - - def test_store_raw_all(self): - self.CONF.event.store_raw = ['info', 'error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only(self): - self.CONF.event.store_raw = ['info'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_error_only(self): - self.CONF.event.store_raw = ['error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_skip_all(self): - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only_no_case(self): - self.CONF.event.store_raw = ['INFO'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_skip_all(self): - self.CONF.event.store_raw = ['unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_and_good(self): - self.CONF.event.store_raw = ['info', 'unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - @mock.patch('ceilometer.event.converter.get_config_file', - mock.Mock(return_value=None)) - def test_setup_events_default_config(self): - self.CONF.set_override('drop_unmatched_notifications', - False, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(1, len(c.definitions)) - self.assertTrue(c.definitions[0].is_catchall) - - self.CONF.set_override('drop_unmatched_notifications', - True, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(0, len(c.definitions)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/event/test_endpoint.py ceilometer-5.0.0~b3/ceilometer/tests/event/test_endpoint.py --- ceilometer-5.0.0~b2/ceilometer/tests/event/test_endpoint.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/event/test_endpoint.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -import oslo_messaging - -from ceilometer.event import endpoint as event_endpoint -from ceilometer.tests import base as tests_base - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -cfg.CONF.import_opt('store_events', 'ceilometer.notification', - group='notification') - - -class TestEventEndpoint(tests_base.BaseTestCase): - - def setUp(self): - super(TestEventEndpoint, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([]) - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("store_events", True, group="notification") - self.setup_messaging(self.CONF) - - self.mock_pm = mock.MagicMock() - self.endpoint = event_endpoint.EventsNotificationEndpoint(self.mock_pm) - self.endpoint.event_converter = mock.MagicMock() - self.endpoint.event_converter.to_event.return_value = mock.MagicMock( - event_type='test.test') - - def test_message_to_event(self): - self.endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', - 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) - - def test_message_to_event_bad_event(self): - self.CONF.set_override("ack_on_event_error", False, - group="notification") - self.mock_pm.publisher.side_effect = Exception - message = {'event_type': "foo", 'message_id': "abc"} - ret = self.endpoint.process_notification(message) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/event/test_trait_plugins.py ceilometer-5.0.0~b3/ceilometer/tests/event/test_trait_plugins.py --- ceilometer-5.0.0~b2/ceilometer/tests/event/test_trait_plugins.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/event/test_trait_plugins.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.event import trait_plugins - - -class TestSplitterPlugin(base.BaseTestCase): - - def setUp(self): - super(TestSplitterPlugin, self).setUp() - self.pclass = trait_plugins.SplitterTraitPlugin - - def test_split(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_value(match_list) - self.assertEqual('test', value) - - param = dict(separator='-', segment=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_value(match_list) - self.assertEqual('foobar', value) - - param = dict(separator='-', segment=1, max_split=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_value(match_list) - self.assertEqual('foobar-baz', value) - - def test_no_sep(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test.foobar.baz')] - value = plugin.trait_value(match_list) - self.assertEqual('test.foobar.baz', value) - - def test_no_segment(self): - param = dict(separator='-', segment=5) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_value(match_list) - self.assertIs(None, value) - - def test_no_match(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [] - value = plugin.trait_value(match_list) - self.assertIs(None, value) - - -class TestBitfieldPlugin(base.BaseTestCase): - - def setUp(self): - super(TestBitfieldPlugin, self).setUp() - self.pclass = trait_plugins.BitfieldTraitPlugin - self.init = 0 - self.params = dict(initial_bitfield=self.init, - flags=[dict(path='payload.foo', bit=0, value=42), - dict(path='payload.foo', bit=1, value=12), - dict(path='payload.thud', bit=1, value=23), - dict(path='thingy.boink', bit=4), - dict(path='thingy.quux', bit=6, - value="wokka"), - dict(path='payload.bar', bit=10, - value='test')]) - - def test_bitfield(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_value(match_list) - self.assertEqual(0x412, value) - - def test_initial(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - self.params['initial_bitfield'] = 0x2000 - plugin = self.pclass(**self.params) - value = plugin.trait_value(match_list) - self.assertEqual(0x2412, value) - - def test_no_match(self): - match_list = [] - plugin = self.pclass(**self.params) - value = plugin.trait_value(match_list) - self.assertEqual(self.init, value) - - def test_multi(self): - match_list = [('payload.foo', 12), - ('payload.thud', 23), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_value(match_list) - self.assertEqual(0x412, value) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,185 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Base classes for API tests. +""" + +from oslo_config import cfg +from oslo_config import fixture as fixture_config +from oslo_policy import opts +import pecan +import pecan.testing + +from ceilometer.tests import db as db_test_base + +OPT_GROUP_NAME = 'keystone_authtoken' +cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token") +cfg.CONF.import_group('api', 'ceilometer.api.controllers.v2.root') + + +class FunctionalTest(db_test_base.TestBase): + """Used for functional tests of Pecan controllers. + + Used in case when you need to test your literal application and its + integration with the framework. + """ + + PATH_PREFIX = '' + + def setUp(self): + super(FunctionalTest, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + opts.set_defaults(self.CONF) + + self.CONF.set_override("auth_version", "v2.0", + group=OPT_GROUP_NAME) + self.CONF.set_override("policy_file", + self.path_get('etc/ceilometer/policy.json'), + group='oslo_policy') + + self.CONF.set_override('gnocchi_is_enabled', False, group='api') + self.CONF.set_override('aodh_is_enabled', False, group='api') + + self.app = self._make_app() + + def _make_app(self, enable_acl=False): + self.config = { + 'app': { + 'root': 'ceilometer.api.controllers.root.RootController', + 'modules': ['ceilometer.api'], + 'enable_acl': enable_acl, + }, + 'wsme': { + 'debug': True, + }, + } + + return pecan.testing.load_test_app(self.config) + + def tearDown(self): + super(FunctionalTest, self).tearDown() + pecan.set_config({}, overwrite=True) + + def put_json(self, path, params, expect_errors=False, headers=None, + extra_environ=None, status=None): + """Sends simulated HTTP PUT request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: boolean value whether an error is expected based + on request + :param headers: A dictionary of headers to send along with the request + :param extra_environ: A dictionary of environ variables to send along + with the request + :param status: Expected status code of response + """ + return self.post_json(path=path, params=params, + expect_errors=expect_errors, + headers=headers, extra_environ=extra_environ, + status=status, method="put") + + def post_json(self, path, params, expect_errors=False, headers=None, + method="post", extra_environ=None, status=None): + """Sends simulated HTTP POST request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: boolean value whether an error is expected based + on request + :param headers: A dictionary of headers to send along with the request + :param method: Request method type. Appropriate method function call + should be used rather than passing attribute in. + :param extra_environ: A dictionary of environ variables to send along + with the request + :param status: Expected status code of response + """ + full_path = self.PATH_PREFIX + path + response = getattr(self.app, "%s_json" % method)( + str(full_path), + params=params, + headers=headers, + status=status, + extra_environ=extra_environ, + expect_errors=expect_errors + ) + return response + + def delete(self, path, expect_errors=False, headers=None, + extra_environ=None, status=None): + """Sends simulated HTTP DELETE request to Pecan test app. + + :param path: url path of target service + :param expect_errors: boolean value whether an error is expected based + on request + :param headers: A dictionary of headers to send along with the request + :param extra_environ: A dictionary of environ variables to send along + with the request + :param status: Expected status code of response + """ + full_path = self.PATH_PREFIX + path + response = self.app.delete(str(full_path), + headers=headers, + status=status, + extra_environ=extra_environ, + expect_errors=expect_errors) + return response + + def get_json(self, path, expect_errors=False, headers=None, + extra_environ=None, q=None, groupby=None, status=None, + override_params=None, **params): + """Sends simulated HTTP GET request to Pecan test app. + + :param path: url path of target service + :param expect_errors: boolean value whether an error is expected based + on request + :param headers: A dictionary of headers to send along with the request + :param extra_environ: A dictionary of environ variables to send along + with the request + :param q: list of queries consisting of: field, value, op, and type + keys + :param groupby: list of fields to group by + :param status: Expected status code of response + :param override_params: literally encoded query param string + :param params: content for wsgi.input of request + """ + q = q or [] + groupby = groupby or [] + full_path = self.PATH_PREFIX + path + if override_params: + all_params = override_params + else: + query_params = {'q.field': [], + 'q.value': [], + 'q.op': [], + 'q.type': [], + } + for query in q: + for name in ['field', 'op', 'value', 'type']: + query_params['q.%s' % name].append(query.get(name, '')) + all_params = {} + all_params.update(params) + if q: + all_params.update(query_params) + if groupby: + all_params.update({'groupby': groupby}) + response = self.app.get(full_path, + params=all_params, + headers=headers, + extra_environ=extra_environ, + expect_errors=expect_errors, + status=status) + if not expect_errors: + response = response.json + return response diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.tests.functional import api + + +class FunctionalTest(api.FunctionalTest): + PATH_PREFIX = '/v2' diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_acl_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_acl_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_acl_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_acl_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,296 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test ACL.""" + +import datetime +import hashlib +import json +import os + +from oslo_utils import fileutils +from oslo_utils import timeutils +import six +import webtest + +from ceilometer.api import app +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional import api as acl +from ceilometer.tests.functional.api import v2 + +VALID_TOKEN = '4562138218392831' +VALID_TOKEN2 = '4562138218392832' + + +class FakeMemcache(object): + + TOKEN_HASH = hashlib.sha256(VALID_TOKEN.encode('utf-8')).hexdigest() + TOKEN2_HASH = hashlib.sha256(VALID_TOKEN2.encode('utf-8')).hexdigest() + + def get(self, key): + if (key == "tokens/%s" % VALID_TOKEN or + key == "tokens/%s" % self.TOKEN_HASH): + dt = timeutils.utcnow() + datetime.timedelta(minutes=5) + dt_isoformat = dt.isoformat() + return json.dumps(({'access': { + 'token': {'id': VALID_TOKEN, + 'expires': dt_isoformat}, + 'user': { + 'id': 'user_id1', + 'name': 'user_name1', + 'tenantId': '123i2910', + 'tenantName': 'mytenant', + 'roles': [ + {'name': 'admin'}, + ]}, + }}, dt_isoformat)) + if (key == "tokens/%s" % VALID_TOKEN2 or + key == "tokens/%s" % self.TOKEN2_HASH): + dt = timeutils.utcnow() + datetime.timedelta(minutes=5) + dt_isoformat = dt.isoformat() + return json.dumps(({'access': { + 'token': {'id': VALID_TOKEN2, + 'expires': dt_isoformat}, + 'user': { + 'id': 'user_id2', + 'name': 'user-good', + 'tenantId': 'project-good', + 'tenantName': 'goodies', + 'roles': [ + {'name': 'Member'}, + ]}, + }}, dt_isoformat)) + + @staticmethod + def set(key, value, **kwargs): + pass + + +class TestAPIACL(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestAPIACL, self).setUp() + self.environ = {'fake.cache': FakeMemcache()} + + for cnt in [ + sample.Sample( + 'meter.test', + 'cumulative', + '', + 1, + 'user-good', + 'project-good', + 'resource-good', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample'}, + source='test_source'), + sample.Sample( + 'meter.mine', + 'gauge', + '', + 1, + 'user-fred', + 'project-good', + 'resource-56', + timestamp=datetime.datetime(2012, 7, 2, 10, 43), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample4'}, + source='test_source')]: + msg = utils.meter_message_from_counter( + cnt, self.CONF.publisher.telemetry_secret) + self.conn.record_metering_data(msg) + + def get_json(self, path, expect_errors=False, headers=None, + q=None, **params): + return super(TestAPIACL, self).get_json(path, + expect_errors=expect_errors, + headers=headers, + q=q or [], + extra_environ=self.environ, + **params) + + def _make_app(self): + self.CONF.set_override("cache", "fake.cache", group=acl.OPT_GROUP_NAME) + file_name = self.path_get('etc/ceilometer/api_paste.ini') + self.CONF.set_override("api_paste_config", file_name) + return webtest.TestApp(app.load_app()) + + def test_non_authenticated(self): + response = self.get_json('/meters', expect_errors=True) + self.assertEqual(401, response.status_int) + + def test_authenticated_wrong_role(self): + response = self.get_json('/meters', + expect_errors=True, + headers={ + "X-Roles": "Member", + "X-Tenant-Name": "admin", + "X-Project-Id": + "bc23a9d531064583ace8f67dad60f6bb", + }) + self.assertEqual(401, response.status_int) + + # FIXME(dhellmann): This test is not properly looking at the tenant + # info. We do not correctly detect the improper tenant. That's + # really something the keystone middleware would have to do using + # the incoming token, which we aren't providing. + # + # def test_authenticated_wrong_tenant(self): + # response = self.get_json('/meters', + # expect_errors=True, + # headers={ + # "X-Roles": "admin", + # "X-Tenant-Name": "achoo", + # "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", + # }) + # self.assertEqual(401, response.status_int) + + def test_authenticated(self): + data = self.get_json('/meters', + headers={"X-Auth-Token": VALID_TOKEN, + "X-Roles": "admin", + "X-Tenant-Name": "admin", + "X-Project-Id": + "bc23a9d531064583ace8f67dad60f6bb", + }) + ids = set(r['resource_id'] for r in data) + self.assertEqual(set(['resource-good', 'resource-56']), ids) + + def test_with_non_admin_missing_project_query(self): + data = self.get_json('/meters', + headers={"X-Roles": "Member", + "X-Auth-Token": VALID_TOKEN2, + "X-Project-Id": "project-good"}) + ids = set(r['resource_id'] for r in data) + self.assertEqual(set(['resource-good', 'resource-56']), ids) + + def test_with_non_admin(self): + data = self.get_json('/meters', + headers={"X-Roles": "Member", + "X-Auth-Token": VALID_TOKEN2, + "X-Project-Id": "project-good"}, + q=[{'field': 'project_id', + 'value': 'project-good', + }]) + ids = set(r['resource_id'] for r in data) + self.assertEqual(set(['resource-good', 'resource-56']), ids) + + def test_non_admin_wrong_project(self): + data = self.get_json('/meters', + expect_errors=True, + headers={"X-Roles": "Member", + "X-Auth-Token": VALID_TOKEN2, + "X-Project-Id": "project-good"}, + q=[{'field': 'project_id', + 'value': 'project-wrong', + }]) + self.assertEqual(401, data.status_int) + + def test_non_admin_two_projects(self): + data = self.get_json('/meters', + expect_errors=True, + headers={"X-Roles": "Member", + "X-Auth-Token": VALID_TOKEN2, + "X-Project-Id": "project-good"}, + q=[{'field': 'project_id', + 'value': 'project-good', + }, + {'field': 'project_id', + 'value': 'project-naughty', + }]) + self.assertEqual(401, data.status_int) + + +class TestAPIEventACL(TestAPIACL): + + PATH = '/events' + + def test_non_admin_get_event_types(self): + data = self.get_json('/event_types', expect_errors=True, + headers={"X-Roles": "Member", + "X-Auth-Token": VALID_TOKEN2, + "X-Project-Id": "project-good"}) + self.assertEqual(401, data.status_int) + + +class TestApiEventRBAC(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/events' + + def setUp(self): + super(TestApiEventRBAC, self).setUp() + content = ('{"context_is_admin": "role:admin",' + '"segregation": "rule:context_is_admin",' + '"default" : "!",' + '"telemetry:events:index": "rule:context_is_admin",' + '"telemetry:events:show": "rule:context_is_admin"}') + if six.PY3: + content = content.encode('utf-8') + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='policy', + suffix='.json') + + self.CONF.set_override("policy_file", + self.path_get(self.tempfile), + group='oslo_policy') + self.app = self._make_app() + + def tearDown(self): + os.remove(self.tempfile) + super(TestApiEventRBAC, self).tearDown() + + def test_get_event_by_message_rbac(self): + headers_rbac = {"X-Roles": "non-admin"} + data = self.get_json(self.PATH + "/100", + expect_errors=True, + headers=headers_rbac, + status=403) + self.assertEqual(u'403 Forbidden\n\nAccess was denied to this ' + 'resource.\n\n RBAC Authorization Failed ', + data.json['error_message']) + + def test_get_events_rbac(self): + headers_rbac = {"X-Roles": "non-admin"} + data = self.get_json(self.PATH, + expect_errors=True, + headers=headers_rbac, + status=403) + self.assertEqual(u'403 Forbidden\n\nAccess was denied to this ' + 'resource.\n\n RBAC Authorization Failed ', + data.json['error_message']) + + def test_get_events_without_project(self): + headers_no_proj = {"X-Roles": "admin", "X-User-Id": "user-good"} + resp = self.get_json(self.PATH, expect_errors=True, + headers=headers_no_proj, status=403) + self.assertEqual(403, resp.status_int) + + def test_get_events_without_user(self): + headers_no_user = {"X-Roles": "admin", "X-Project-Id": "project-good"} + resp = self.get_json(self.PATH, expect_errors=True, + headers=headers_no_user, status=403) + self.assertEqual(403, resp.status_int) + + def test_get_events_without_scope(self): + headers_no_user_proj = {"X-Roles": "admin"} + resp = self.get_json(self.PATH, + expect_errors=True, + headers=headers_no_user_proj, + status=403) + self.assertEqual(403, resp.status_int) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_alarm_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_alarm_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_alarm_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_alarm_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,2929 @@ +# +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests alarm operation.""" + +import datetime +import uuid + +import mock +import oslo_messaging.conffixture +from oslo_serialization import jsonutils +import requests + +import six +from six import moves +import six.moves.urllib.parse as urlparse + +from ceilometer.alarm.storage import models +from ceilometer import messaging +from ceilometer.tests import constants +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestListEmptyAlarms(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def test_empty(self): + data = self.get_json('/alarms') + self.assertEqual([], data) + + +class TestAlarms(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestAlarms, self).setUp() + self.auth_headers = {'X-User-Id': str(uuid.uuid4()), + 'X-Project-Id': str(uuid.uuid4())} + for alarm in [ + models.Alarm(name='name1', + type='threshold', + enabled=True, + alarm_id='a', + description='a', + state='insufficient data', + severity='critical', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=True, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[dict(name='testcons', + start='0 11 * * *', + duration=300)], + rule=dict(comparison_operator='gt', + threshold=2.0, + statistic='avg', + evaluation_periods=60, + period=1, + meter_name='meter.test', + query=[{'field': 'project_id', + 'op': 'eq', 'value': + self.auth_headers['X-Project-Id']} + ]), + ), + models.Alarm(name='name2', + type='threshold', + enabled=True, + alarm_id='b', + description='b', + state='insufficient data', + severity='critical', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=False, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=4.0, + statistic='avg', + evaluation_periods=60, + period=1, + meter_name='meter.test', + query=[{'field': 'project_id', + 'op': 'eq', 'value': + self.auth_headers['X-Project-Id']} + ]), + ), + models.Alarm(name='name3', + type='threshold', + enabled=True, + alarm_id='c', + description='c', + state='insufficient data', + severity='moderate', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=False, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=3.0, + statistic='avg', + evaluation_periods=60, + period=1, + meter_name='meter.mine', + query=[{'field': 'project_id', + 'op': 'eq', 'value': + self.auth_headers['X-Project-Id']} + ]), + ), + models.Alarm(name='name4', + type='combination', + enabled=True, + alarm_id='d', + description='d', + state='insufficient data', + severity='low', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=False, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(alarm_ids=['a', 'b'], + operator='or'), + ), + models.Alarm(name='name5', + type='gnocchi_resources_threshold', + enabled=True, + alarm_id='e', + description='e', + state='insufficient data', + severity='critical', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=True, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=2.0, + aggregation_method='mean', + granularity=60, + evaluation_periods=1, + metric='meter.test', + resource_type='instance', + resource_id=( + '6841c175-d7c4-4bc2-bc7a-1c7832271b8f'), + ) + ), + models.Alarm(name='name6', + type='gnocchi_aggregation_by_metrics_threshold', + enabled=True, + alarm_id='f', + description='f', + state='insufficient data', + severity='critical', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=True, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=2.0, + aggregation_method='mean', + evaluation_periods=1, + granularity=60, + metrics=[ + '41869681-5776-46d6-91ed-cccc43b6e4e3', + 'a1fb80f4-c242-4f57-87c6-68f47521059e'] + ), + ), + models.Alarm(name='name7', + type='gnocchi_aggregation_by_resources_threshold', + enabled=True, + alarm_id='g', + description='f', + state='insufficient data', + severity='critical', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=True, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=2.0, + aggregation_method='mean', + granularity=60, + evaluation_periods=1, + metric='meter.test', + resource_type='instance', + query='{"=": {"server_group": ' + '"my_autoscaling_group"}}') + ), + + ]: + + self.alarm_conn.update_alarm(alarm) + + @staticmethod + def _add_default_threshold_rule(alarm): + if (alarm['type'] == 'threshold' and + 'exclude_outliers' not in alarm['threshold_rule']): + alarm['threshold_rule']['exclude_outliers'] = False + + def _verify_alarm(self, json, alarm, expected_name=None): + if expected_name and alarm.name != expected_name: + self.fail("Alarm not found") + self._add_default_threshold_rule(json) + for key in json: + if key.endswith('_rule'): + storage_key = 'rule' + else: + storage_key = key + self.assertEqual(json[key], getattr(alarm, storage_key)) + + def test_list_alarms(self): + data = self.get_json('/alarms') + self.assertEqual(7, len(data)) + self.assertEqual(set(['name1', 'name2', 'name3', 'name4', 'name5', + 'name6', 'name7']), + set(r['name'] for r in data)) + self.assertEqual(set(['meter.test', 'meter.mine']), + set(r['threshold_rule']['meter_name'] + for r in data if 'threshold_rule' in r)) + self.assertEqual(set(['or']), + set(r['combination_rule']['operator'] + for r in data if 'combination_rule' in r)) + self.assertEqual(set(['meter.test']), + set(r['gnocchi_resources_threshold_rule']['metric'] + for r in data + if 'gnocchi_resources_threshold_rule' in r)) + + def test_alarms_query_with_timestamp(self): + date_time = datetime.datetime(2012, 7, 2, 10, 41) + isotime = date_time.isoformat() + resp = self.get_json('/alarms', + q=[{'field': 'timestamp', + 'op': 'gt', + 'value': isotime}], + expect_errors=True) + self.assertEqual(resp.status_code, 400) + self.assertEqual(jsonutils.loads(resp.body)['error_message'] + ['faultstring'], + 'Unknown argument: "timestamp": ' + 'not valid for this resource') + + def test_alarms_query_with_meter(self): + resp = self.get_json('/alarms', + q=[{'field': 'meter', + 'op': 'eq', + 'value': 'meter.mine'}], + ) + self.assertEqual(1, len(resp)) + self.assertEqual('c', + resp[0]['alarm_id']) + self.assertEqual('meter.mine', + resp[0] + ['threshold_rule'] + ['meter_name']) + + def test_alarms_query_with_state(self): + alarm = models.Alarm(name='disabled', + type='combination', + enabled=False, + alarm_id='d', + description='d', + state='ok', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=False, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(alarm_ids=['a', 'b'], operator='or'), + severity='critical') + self.alarm_conn.update_alarm(alarm) + resp = self.get_json('/alarms', + q=[{'field': 'state', + 'op': 'eq', + 'value': 'ok'}], + ) + self.assertEqual(1, len(resp)) + self.assertEqual('ok', resp[0]['state']) + + def test_list_alarms_by_type(self): + alarms = self.get_json('/alarms', + q=[{'field': 'type', + 'op': 'eq', + 'value': 'threshold'}]) + self.assertEqual(3, len(alarms)) + self.assertEqual(set(['threshold']), + set(alarm['type'] for alarm in alarms)) + + def test_get_not_existing_alarm(self): + resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) + self.assertEqual(404, resp.status_code) + self.assertEqual('Alarm alarm-id-3 not found', + jsonutils.loads(resp.body)['error_message'] + ['faultstring']) + + def test_get_alarm(self): + alarms = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name1', + }]) + self.assertEqual('name1', alarms[0]['name']) + self.assertEqual('meter.test', + alarms[0]['threshold_rule']['meter_name']) + + one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) + self.assertEqual('name1', one['name']) + self.assertEqual('meter.test', one['threshold_rule']['meter_name']) + self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) + self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) + self.assertEqual(alarms[0]['time_constraints'], + one['time_constraints']) + + def test_get_alarm_disabled(self): + alarm = models.Alarm(name='disabled', + type='combination', + enabled=False, + alarm_id='d', + description='d', + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=False, + user_id=self.auth_headers['X-User-Id'], + project_id=self.auth_headers['X-Project-Id'], + time_constraints=[], + rule=dict(alarm_ids=['a', 'b'], operator='or'), + severity='critical') + self.alarm_conn.update_alarm(alarm) + + alarms = self.get_json('/alarms', + q=[{'field': 'enabled', + 'value': 'False'}]) + self.assertEqual(1, len(alarms)) + self.assertEqual('disabled', alarms[0]['name']) + + one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) + self.assertEqual('disabled', one['name']) + + def test_get_alarm_combination(self): + alarms = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name4', + }]) + self.assertEqual('name4', alarms[0]['name']) + self.assertEqual(['a', 'b'], + alarms[0]['combination_rule']['alarm_ids']) + self.assertEqual('or', alarms[0]['combination_rule']['operator']) + + one = self.get_json('/alarms/%s' % alarms[0]['alarm_id']) + self.assertEqual('name4', one['name']) + self.assertEqual(['a', 'b'], + alarms[0]['combination_rule']['alarm_ids']) + self.assertEqual('or', alarms[0]['combination_rule']['operator']) + self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) + self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) + + def test_get_alarm_project_filter_wrong_op_normal_user(self): + project = self.auth_headers['X-Project-Id'] + + def _test(field, op): + response = self.get_json('/alarms', + q=[{'field': field, + 'op': op, + 'value': project}], + expect_errors=True, + status=400, + headers=self.auth_headers) + faultstring = ('Invalid input for field/attribute op. ' + 'Value: \'%(op)s\'. unimplemented operator ' + 'for %(field)s' % {'field': field, 'op': op}) + self.assertEqual(faultstring, + response.json['error_message']['faultstring']) + + _test('project', 'ne') + _test('project_id', 'ne') + + def test_get_alarm_project_filter_normal_user(self): + project = self.auth_headers['X-Project-Id'] + + def _test(field): + alarms = self.get_json('/alarms', + q=[{'field': field, + 'op': 'eq', + 'value': project}]) + self.assertEqual(7, len(alarms)) + + _test('project') + _test('project_id') + + def test_get_alarm_other_project_normal_user(self): + def _test(field): + response = self.get_json('/alarms', + q=[{'field': field, + 'op': 'eq', + 'value': 'other-project'}], + expect_errors=True, + status=401, + headers=self.auth_headers) + faultstring = 'Not Authorized to access project other-project' + self.assertEqual(faultstring, + response.json['error_message']['faultstring']) + + _test('project') + _test('project_id') + + def test_post_alarm_wsme_workaround(self): + jsons = { + 'type': { + 'name': 'missing type', + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 2.0, + } + }, + 'name': { + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 2.0, + } + }, + 'threshold_rule/meter_name': { + 'name': 'missing meter_name', + 'type': 'threshold', + 'threshold_rule': { + 'threshold': 2.0, + } + }, + 'threshold_rule/threshold': { + 'name': 'missing threshold', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + } + }, + 'combination_rule/alarm_ids': { + 'name': 'missing alarm_ids', + 'type': 'combination', + 'combination_rule': {} + } + } + for field, json in six.iteritems(jsons): + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + self.assertEqual("Invalid input for field/attribute %s." + " Value: \'None\'. Mandatory field missing." + % field.split('/', 1)[-1], + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_time_constraint_start(self): + json = { + 'name': 'added_alarm_invalid_constraint_duration', + 'type': 'threshold', + 'time_constraints': [ + { + 'name': 'testcons', + 'start': '11:00am', + 'duration': 10 + } + ], + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + self.post_json('/alarms', params=json, expect_errors=True, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_duplicate_time_constraint_name(self): + json = { + 'name': 'added_alarm_duplicate_constraint_name', + 'type': 'threshold', + 'time_constraints': [ + { + 'name': 'testcons', + 'start': '* 11 * * *', + 'duration': 10 + }, + { + 'name': 'testcons', + 'start': '* * * * *', + 'duration': 20 + } + ], + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + self.assertEqual( + "Time constraint names must be unique for a given alarm.", + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_alarm_null_time_constraint(self): + json = { + 'name': 'added_alarm_invalid_constraint_duration', + 'type': 'threshold', + 'time_constraints': None, + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + + def test_post_invalid_alarm_time_constraint_duration(self): + json = { + 'name': 'added_alarm_invalid_constraint_duration', + 'type': 'threshold', + 'time_constraints': [ + { + 'name': 'testcons', + 'start': '* 11 * * *', + 'duration': -1, + } + ], + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + self.post_json('/alarms', params=json, expect_errors=True, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_time_constraint_timezone(self): + json = { + 'name': 'added_alarm_invalid_constraint_timezone', + 'type': 'threshold', + 'time_constraints': [ + { + 'name': 'testcons', + 'start': '* 11 * * *', + 'duration': 10, + 'timezone': 'aaaa' + } + ], + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + self.post_json('/alarms', params=json, expect_errors=True, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_period(self): + json = { + 'name': 'added_alarm_invalid_period', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 2.0, + 'statistic': 'avg', + 'period': -1, + } + + } + self.post_json('/alarms', params=json, expect_errors=True, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_null_threshold_rule(self): + json = { + 'name': 'added_alarm_invalid_threshold_rule', + 'type': 'threshold', + 'threshold_rule': None, + 'combination_rule': None, + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + self.assertEqual( + "threshold_rule must be set for threshold type alarm", + resp.json['error_message']['faultstring']) + + def test_post_invalid_alarm_statistic(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 2.0, + 'statistic': 'magic', + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute" + " statistic. Value: 'magic'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_state(self): + json = { + 'name': 'alarm1', + 'state': 'bad_state', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute state." + " Value: 'bad_state'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_severity(self): + json = { + 'name': 'alarm1', + 'state': 'ok', + 'severity': 'bad_value', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute severity." + " Value: 'bad_value'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_comparison_operator(self): + json = { + 'name': 'alarm2', + 'state': 'ok', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'bad_co', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute" + " comparison_operator." + " Value: 'bad_co'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_type(self): + json = { + 'name': 'alarm3', + 'state': 'ok', + 'type': 'bad_type', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute" + " type." + " Value: 'bad_type'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_enabled_str(self): + json = { + 'name': 'alarm5', + 'enabled': 'bad_enabled', + 'state': 'ok', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = "Value not an unambiguous boolean: bad_enabled" + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_input_enabled_int(self): + json = { + 'name': 'alarm6', + 'enabled': 0, + 'state': 'ok', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 50.0 + } + } + resp = self.post_json('/alarms', params=json, + headers=self.auth_headers) + self.assertFalse(resp.json['enabled']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(8, len(alarms)) + + def test_post_invalid_combination_alarm_input_operator(self): + json = { + 'enabled': False, + 'name': 'alarm6', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'bad_operator', + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_err_msg = ("Invalid input for field/attribute" + " operator." + " Value: 'bad_operator'.") + self.assertIn(expected_err_msg, + resp.json['error_message']['faultstring']) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_query(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.invalid', + 'field': 'gt', + 'value': 'value'}], + 'comparison_operator': 'gt', + 'threshold': 2.0, + 'statistic': 'avg', + } + } + self.post_json('/alarms', params=json, expect_errors=True, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_query_field_type(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.valid', + 'op': 'eq', + 'value': 'value', + 'type': 'blob'}], + 'comparison_operator': 'gt', + 'threshold': 2.0, + 'statistic': 'avg', + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_error_message = 'The data type blob is not supported.' + resp_string = jsonutils.loads(resp.body) + fault_string = resp_string['error_message']['faultstring'] + self.assertTrue(fault_string.startswith(expected_error_message)) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_query_non_field(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'q.field': 'metadata.valid', + 'value': 'value'}], + 'threshold': 2.0, + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_error_message = ("Unknown attribute for argument " + "data.threshold_rule.query: q.field") + fault_string = resp.json['error_message']['faultstring'] + self.assertEqual(expected_error_message, fault_string) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_query_non_value(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.valid', + 'q.value': 'value'}], + 'threshold': 2.0, + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + expected_error_message = ("Unknown attribute for argument " + "data.threshold_rule.query: q.value") + fault_string = resp.json['error_message']['faultstring'] + self.assertEqual(expected_error_message, fault_string) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + def test_post_invalid_alarm_have_multiple_rules(self): + json = { + 'name': 'added_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'meter', + 'value': 'ameter'}], + 'comparison_operator': 'gt', + 'threshold': 2.0, + }, + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + + # threshold_rule and combination_rule order is not + # predictable so it is not possible to do an exact match + # here + error_faultstring = resp.json['error_message']['faultstring'] + for expected_string in ['threshold_rule', 'combination_rule', + 'cannot be set at the same time']: + self.assertIn(expected_string, error_faultstring) + + def test_post_invalid_alarm_timestamp_in_threshold_rule(self): + date_time = datetime.datetime(2012, 7, 2, 10, 41) + isotime = date_time.isoformat() + + json = { + 'name': 'invalid_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'timestamp', + 'op': 'gt', + 'value': isotime}], + 'comparison_operator': 'gt', + 'threshold': 2.0, + } + } + resp = self.post_json('/alarms', params=json, expect_errors=True, + status=400, headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + self.assertEqual( + 'Unknown argument: "timestamp": ' + 'not valid for this resource', + resp.json['error_message']['faultstring']) + + def _do_post_alarm_invalid_action(self, ok_actions=None, + alarm_actions=None, + insufficient_data_actions=None, + error_message=None): + + ok_actions = ok_actions or [] + alarm_actions = alarm_actions or [] + insufficient_data_actions = insufficient_data_actions or [] + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'ok_actions': ok_actions, + 'alarm_actions': alarm_actions, + 'insufficient_data_actions': insufficient_data_actions, + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': '3', + 'period': '180', + } + } + resp = self.post_json('/alarms', params=json, status=400, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(7, len(alarms)) + self.assertEqual(error_message, + resp.json['error_message']['faultstring']) + + def test_post_invalid_alarm_ok_actions(self): + self._do_post_alarm_invalid_action( + ok_actions=['spam://something/ok'], + error_message='Unsupported action spam://something/ok') + + def test_post_invalid_alarm_alarm_actions(self): + self._do_post_alarm_invalid_action( + alarm_actions=['spam://something/alarm'], + error_message='Unsupported action spam://something/alarm') + + def test_post_invalid_alarm_insufficient_data_actions(self): + self._do_post_alarm_invalid_action( + insufficient_data_actions=['spam://something/insufficient'], + error_message='Unsupported action spam://something/insufficient') + + @staticmethod + def _fake_urlsplit(*args, **kwargs): + raise Exception("Evil urlsplit!") + + def test_post_invalid_alarm_actions_format(self): + with mock.patch('oslo_utils.netutils.urlsplit', + self._fake_urlsplit): + self._do_post_alarm_invalid_action( + alarm_actions=['http://[::1'], + error_message='Unable to parse action http://[::1') + + def test_post_alarm_defaults(self): + to_check = { + 'enabled': True, + 'name': 'added_alarm_defaults', + 'state': 'insufficient data', + 'description': ('Alarm when ameter is eq a avg of ' + '300.0 over 60 seconds'), + 'type': 'threshold', + 'ok_actions': [], + 'alarm_actions': [], + 'insufficient_data_actions': [], + 'repeat_actions': False, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'project_id', + 'op': 'eq', + 'value': self.auth_headers['X-Project-Id']}], + 'threshold': 300.0, + 'comparison_operator': 'eq', + 'statistic': 'avg', + 'evaluation_periods': 1, + 'period': 60, + } + + } + self._add_default_threshold_rule(to_check) + + json = { + 'name': 'added_alarm_defaults', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(8, len(alarms)) + for alarm in alarms: + if alarm.name == 'added_alarm_defaults': + for key in to_check: + if key.endswith('_rule'): + storage_key = 'rule' + else: + storage_key = key + self.assertEqual(to_check[key], + getattr(alarm, storage_key)) + break + else: + self.fail("Alarm not found") + + def test_post_conflict(self): + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': '3', + 'period': '180', + } + } + + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + self.post_json('/alarms', params=json, status=409, + headers=self.auth_headers) + + def _do_test_post_alarm(self, exclude_outliers=None): + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'low', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': '3', + 'period': '180', + } + } + if exclude_outliers is not None: + json['threshold_rule']['exclude_outliers'] = exclude_outliers + + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + json['threshold_rule']['query'].append({ + 'field': 'project_id', 'op': 'eq', + 'value': self.auth_headers['X-Project-Id']}) + # to check to IntegerType type conversion + json['threshold_rule']['evaluation_periods'] = 3 + json['threshold_rule']['period'] = 180 + self._verify_alarm(json, alarms[0], 'added_alarm') + + def test_post_alarm_outlier_exclusion_set(self): + self._do_test_post_alarm(True) + + def test_post_alarm_outlier_exclusion_clear(self): + self._do_test_post_alarm(False) + + def test_post_alarm_outlier_exclusion_defaulted(self): + self._do_test_post_alarm() + + def test_post_alarm_noauth(self): + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'low', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': '3', + 'exclude_outliers': False, + 'period': '180', + } + } + self.post_json('/alarms', params=json, status=201) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + # to check to BoundedInt type conversion + json['threshold_rule']['evaluation_periods'] = 3 + json['threshold_rule']['period'] = 180 + if alarms[0].name == 'added_alarm': + for key in json: + if key.endswith('_rule'): + storage_key = 'rule' + else: + storage_key = key + self.assertEqual(getattr(alarms[0], storage_key), + json[key]) + else: + self.fail("Alarm not found") + + def _do_test_post_alarm_as_admin(self, explicit_project_constraint): + """Test the creation of an alarm as admin for another project.""" + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'user_id': 'auseridthatisnotmine', + 'project_id': 'aprojectidthatisnotmine', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + if explicit_project_constraint: + project_constraint = {'field': 'project_id', 'op': 'eq', + 'value': 'aprojectidthatisnotmine'} + json['threshold_rule']['query'].append(project_constraint) + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'admin' + self.post_json('/alarms', params=json, status=201, + headers=headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self.assertEqual('auseridthatisnotmine', alarms[0].user_id) + self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) + self._add_default_threshold_rule(json) + if alarms[0].name == 'added_alarm': + for key in json: + if key.endswith('_rule'): + storage_key = 'rule' + if explicit_project_constraint: + self.assertEqual(json[key], + getattr(alarms[0], storage_key)) + else: + query = getattr(alarms[0], storage_key).get('query') + self.assertEqual(2, len(query)) + implicit_constraint = { + u'field': u'project_id', + u'value': u'aprojectidthatisnotmine', + u'op': u'eq' + } + self.assertEqual(implicit_constraint, query[1]) + else: + self.assertEqual(json[key], getattr(alarms[0], key)) + else: + self.fail("Alarm not found") + + def test_post_alarm_as_admin_explicit_project_constraint(self): + """Test the creation of an alarm as admin for another project. + + With an explicit query constraint on the owner's project ID. + """ + self._do_test_post_alarm_as_admin(True) + + def test_post_alarm_as_admin_implicit_project_constraint(self): + """Test the creation of an alarm as admin for another project. + + Test without an explicit query constraint on the owner's project ID. + """ + self._do_test_post_alarm_as_admin(False) + + def test_post_alarm_as_admin_no_user(self): + """Test the creation of an alarm. + + Test the creation of an alarm as admin for another project but + forgetting to set the values. + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'project_id': 'aprojectidthatisnotmine', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}, + {'field': 'project_id', 'op': 'eq', + 'value': 'aprojectidthatisnotmine'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'admin' + self.post_json('/alarms', params=json, status=201, + headers=headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self.assertEqual(self.auth_headers['X-User-Id'], alarms[0].user_id) + self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) + self._verify_alarm(json, alarms[0], 'added_alarm') + + def test_post_alarm_as_admin_no_project(self): + """Test the creation of an alarm. + + Test the creation of an alarm as admin for another project but + forgetting to set the values. + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'user_id': 'auseridthatisnotmine', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}, + {'field': 'project_id', 'op': 'eq', + 'value': 'aprojectidthatisnotmine'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'admin' + self.post_json('/alarms', params=json, status=201, + headers=headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self.assertEqual('auseridthatisnotmine', alarms[0].user_id) + self.assertEqual(self.auth_headers['X-Project-Id'], + alarms[0].project_id) + self._verify_alarm(json, alarms[0], 'added_alarm') + + @staticmethod + def _alarm_representation_owned_by(identifiers): + json = { + 'name': 'added_alarm', + 'enabled': False, + 'type': 'threshold', + 'ok_actions': ['http://something/ok'], + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + for aspect, id in six.iteritems(identifiers): + json['%s_id' % aspect] = id + return json + + def _do_test_post_alarm_as_nonadmin_on_behalf_of_another(self, + identifiers): + """Test posting an alarm. + + Test that posting an alarm as non-admin on behalf of another + user/project fails with an explicit 401 instead of reverting + to the requestor's identity. + """ + json = self._alarm_representation_owned_by(identifiers) + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'demo' + resp = self.post_json('/alarms', params=json, status=401, + headers=headers) + aspect = 'user' if 'user' in identifiers else 'project' + params = dict(aspect=aspect, id=identifiers[aspect]) + self.assertEqual("Not Authorized to access %(aspect)s %(id)s" % params, + jsonutils.loads(resp.body)['error_message'] + ['faultstring']) + + def test_post_alarm_as_nonadmin_on_behalf_of_another_user(self): + identifiers = dict(user='auseridthatisnotmine') + self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) + + def test_post_alarm_as_nonadmin_on_behalf_of_another_project(self): + identifiers = dict(project='aprojectidthatisnotmine') + self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) + + def test_post_alarm_as_nonadmin_on_behalf_of_another_creds(self): + identifiers = dict(user='auseridthatisnotmine', + project='aprojectidthatisnotmine') + self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) + + def _do_test_post_alarm_as_nonadmin_on_behalf_of_self(self, identifiers): + """Test posting an alarm. + + Test posting an alarm as non-admin on behalf of own user/project + creates alarm associated with the requestor's identity. + """ + json = self._alarm_representation_owned_by(identifiers) + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'demo' + self.post_json('/alarms', params=json, status=201, headers=headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self.assertEqual(alarms[0].user_id, + self.auth_headers['X-User-Id']) + self.assertEqual(alarms[0].project_id, + self.auth_headers['X-Project-Id']) + + def test_post_alarm_as_nonadmin_on_behalf_of_own_user(self): + identifiers = dict(user=self.auth_headers['X-User-Id']) + self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) + + def test_post_alarm_as_nonadmin_on_behalf_of_own_project(self): + identifiers = dict(project=self.auth_headers['X-Project-Id']) + self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) + + def test_post_alarm_as_nonadmin_on_behalf_of_own_creds(self): + identifiers = dict(user=self.auth_headers['X-User-Id'], + project=self.auth_headers['X-Project-Id']) + self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) + + def test_post_alarm_combination(self): + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + if alarms[0].name == 'added_alarm': + for key in json: + if key.endswith('_rule'): + storage_key = 'rule' + else: + storage_key = key + self.assertEqual(json[key], getattr(alarms[0], storage_key)) + else: + self.fail("Alarm not found") + + def test_post_combination_alarm_as_user_with_unauthorized_alarm(self): + """Test posting a combination alarm. + + Test that post a combination alarm as normal user/project + with an alarm_id unauthorized for this project/user + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + an_other_user_auth = {'X-User-Id': str(uuid.uuid4()), + 'X-Project-Id': str(uuid.uuid4())} + resp = self.post_json('/alarms', params=json, status=404, + headers=an_other_user_auth) + self.assertEqual("Alarm a not found in project " + "%s" % + an_other_user_auth['X-Project-Id'], + jsonutils.loads(resp.body)['error_message'] + ['faultstring']) + + def test_post_combination_alarm_as_admin_on_behalf_of_an_other_user(self): + """Test posting a combination alarm. + + Test that post a combination alarm as admin on behalf of an other + user/project with an alarm_id unauthorized for this project/user + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'user_id': 'auseridthatisnotmine', + 'project_id': 'aprojectidthatisnotmine', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'admin' + resp = self.post_json('/alarms', params=json, status=404, + headers=headers) + self.assertEqual("Alarm a not found in project " + "aprojectidthatisnotmine", + jsonutils.loads(resp.body)['error_message'] + ['faultstring']) + + def test_post_combination_alarm_with_reasonable_description(self): + """Test posting a combination alarm. + + Test that post a combination alarm with two blanks around the + operator in alarm description. + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self.assertEqual(u'Combined state of alarms a and b', + alarms[0].description) + + def test_post_combination_alarm_as_admin_success_owner_unset(self): + self._do_post_combination_alarm_as_admin_success(False) + + def test_post_combination_alarm_as_admin_success_owner_set(self): + self._do_post_combination_alarm_as_admin_success(True) + + def test_post_combination_alarm_with_threshold_rule(self): + """Test the creation of an combination alarm with threshold rule.""" + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': '3', + 'period': '180', + } + } + resp = self.post_json('/alarms', params=json, + expect_errors=True, status=400, + headers=self.auth_headers) + self.assertEqual( + "combination_rule must be set for combination type alarm", + resp.json['error_message']['faultstring']) + + def test_post_threshold_alarm_with_combination_rule(self): + """Test the creation of an threshold alarm with combination rule.""" + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'threshold', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + resp = self.post_json('/alarms', params=json, + expect_errors=True, status=400, + headers=self.auth_headers) + self.assertEqual( + "threshold_rule must be set for threshold type alarm", + resp.json['error_message']['faultstring']) + + def _do_post_combination_alarm_as_admin_success(self, owner_is_set): + """Test posting a combination alarm. + + Test that post a combination alarm as admin on behalf of nobody + with an alarm_id of someone else, with owner set or not + """ + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['a', + 'b'], + 'operator': 'and', + } + } + an_other_admin_auth = {'X-User-Id': str(uuid.uuid4()), + 'X-Project-Id': str(uuid.uuid4()), + 'X-Roles': 'admin'} + if owner_is_set: + json['project_id'] = an_other_admin_auth['X-Project-Id'] + json['user_id'] = an_other_admin_auth['X-User-Id'] + + self.post_json('/alarms', params=json, status=201, + headers=an_other_admin_auth) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + if alarms[0].name == 'added_alarm': + for key in json: + if key.endswith('_rule'): + storage_key = 'rule' + else: + storage_key = key + self.assertEqual(json[key], getattr(alarms[0], storage_key)) + else: + self.fail("Alarm not found") + + def test_post_invalid_alarm_combination(self): + """Test that post a combination alarm with a not existing alarm id.""" + json = { + 'enabled': False, + 'name': 'added_alarm', + 'state': 'ok', + 'type': 'combination', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'combination_rule': { + 'alarm_ids': ['not_exists', + 'b'], + 'operator': 'and', + } + } + self.post_json('/alarms', params=json, status=404, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(0, len(alarms)) + + def test_post_alarm_combination_duplicate_alarm_ids(self): + """Test combination alarm doesn't allow duplicate alarm ids.""" + json_body = { + 'name': 'dup_alarm_id', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'a', 'd', 'a', 'c', 'c', 'b'], + } + } + self.post_json('/alarms', params=json_body, status=201, + headers=self.auth_headers) + alarms = list(self.alarm_conn.get_alarms(name='dup_alarm_id')) + self.assertEqual(1, len(alarms)) + self.assertEqual(['a', 'd', 'c', 'b'], + alarms[0].rule.get('alarm_ids')) + + def _test_post_alarm_combination_rule_less_than_two_alarms(self, + alarm_ids=None): + json_body = { + 'name': 'one_alarm_in_combination_rule', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': alarm_ids or [] + } + } + + resp = self.post_json('/alarms', params=json_body, + expect_errors=True, status=400, + headers=self.auth_headers) + self.assertEqual( + 'Alarm combination rule should contain at' + ' least two different alarm ids.', + resp.json['error_message']['faultstring']) + + def test_post_alarm_combination_rule_with_no_alarm(self): + self._test_post_alarm_combination_rule_less_than_two_alarms() + + def test_post_alarm_combination_rule_with_one_alarm(self): + self._test_post_alarm_combination_rule_less_than_two_alarms(['a']) + + def test_post_alarm_combination_rule_with_two_same_alarms(self): + self._test_post_alarm_combination_rule_less_than_two_alarms(['a', + 'a']) + + def test_post_alarm_with_duplicate_actions(self): + body = { + 'name': 'dup-alarm-actions', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': ['http://no.where', 'http://no.where'] + } + resp = self.post_json('/alarms', params=body, + headers=self.auth_headers) + self.assertEqual(201, resp.status_code) + alarms = list(self.alarm_conn.get_alarms(name='dup-alarm-actions')) + self.assertEqual(1, len(alarms)) + self.assertEqual(['http://no.where'], alarms[0].alarm_actions) + + def test_post_alarm_with_too_many_actions(self): + self.CONF.set_override('alarm_max_actions', 1, group='alarm') + body = { + 'name': 'alarm-with-many-actions', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': ['http://no.where', 'http://no.where2'] + } + resp = self.post_json('/alarms', params=body, expect_errors=True, + headers=self.auth_headers) + self.assertEqual(400, resp.status_code) + self.assertEqual("alarm_actions count exceeds maximum value 1", + resp.json['error_message']['faultstring']) + + def test_post_alarm_normal_user_set_log_actions(self): + body = { + 'name': 'log_alarm_actions', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': ['log://'] + } + resp = self.post_json('/alarms', params=body, expect_errors=True, + headers=self.auth_headers) + self.assertEqual(401, resp.status_code) + expected_msg = ("You are not authorized to create action: log://") + self.assertEqual(expected_msg, + resp.json['error_message']['faultstring']) + + def test_post_alarm_normal_user_set_test_actions(self): + body = { + 'name': 'test_alarm_actions', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': ['test://'] + } + resp = self.post_json('/alarms', params=body, expect_errors=True, + headers=self.auth_headers) + self.assertEqual(401, resp.status_code) + expected_msg = ("You are not authorized to create action: test://") + self.assertEqual(expected_msg, + resp.json['error_message']['faultstring']) + + def test_post_alarm_admin_user_set_log_test_actions(self): + body = { + 'name': 'admin_alarm_actions', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': ['test://', 'log://'] + } + headers = self.auth_headers + headers['X-Roles'] = 'admin' + self.post_json('/alarms', params=body, status=201, + headers=headers) + alarms = list(self.alarm_conn.get_alarms(name='admin_alarm_actions')) + self.assertEqual(1, len(alarms)) + self.assertEqual(['test://', 'log://'], + alarms[0].alarm_actions) + + def test_post_alarm_without_actions(self): + body = { + 'name': 'alarm_actions_none', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['a', 'b'], + }, + 'alarm_actions': None + } + headers = self.auth_headers + headers['X-Roles'] = 'admin' + self.post_json('/alarms', params=body, status=201, + headers=headers) + alarms = list(self.alarm_conn.get_alarms(name='alarm_actions_none')) + self.assertEqual(1, len(alarms)) + + # FIXME(sileht): This should really returns [] not None + # but the mongodb and sql just store the json dict as is... + # migration script for sql will be a mess because we have + # to parse all JSON :( + # I guess we assume that wsme convert the None input to [] + # because of the array type, but it won't... + self.assertIsNone(alarms[0].alarm_actions) + + def test_post_alarm_trust(self): + json = { + 'name': 'added_alarm_defaults', + 'type': 'threshold', + 'ok_actions': ['trust+http://my.server:1234/foo'], + 'threshold_rule': { + 'meter_name': 'ameter', + 'threshold': 300.0 + } + } + auth = mock.Mock() + trust_client = mock.Mock() + with mock.patch('ceilometer.keystone_client.get_v3_client') as client: + client.return_value = mock.Mock( + auth_ref=mock.Mock(user_id='my_user')) + with mock.patch('keystoneclient.v3.client.Client') as sub_client: + sub_client.return_value = trust_client + trust_client.trusts.create.return_value = mock.Mock(id='5678') + self.post_json('/alarms', params=json, status=201, + headers=self.auth_headers, + extra_environ={'keystone.token_auth': auth}) + trust_client.trusts.create.assert_called_once_with( + trustor_user=self.auth_headers['X-User-Id'], + trustee_user='my_user', + project=self.auth_headers['X-Project-Id'], + impersonation=True, + role_names=[]) + alarms = list(self.alarm_conn.get_alarms()) + for alarm in alarms: + if alarm.name == 'added_alarm_defaults': + self.assertEqual( + ['trust+http://5678:delete@my.server:1234/foo'], + alarm.ok_actions) + break + else: + self.fail("Alarm not found") + + with mock.patch('ceilometer.keystone_client.get_v3_client') as client: + client.return_value = mock.Mock( + auth_ref=mock.Mock(user_id='my_user')) + with mock.patch('keystoneclient.v3.client.Client') as sub_client: + sub_client.return_value = trust_client + self.delete('/alarms/%s' % alarm.alarm_id, + headers=self.auth_headers, + status=204, + extra_environ={'keystone.token_auth': auth}) + trust_client.trusts.delete.assert_called_once_with('5678') + + def test_put_alarm(self): + json = { + 'enabled': False, + 'name': 'name_put', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name1', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + self.put_json('/alarms/%s' % alarm_id, + params=json, + headers=self.auth_headers) + alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, + enabled=False))[0] + json['threshold_rule']['query'].append({ + 'field': 'project_id', 'op': 'eq', + 'value': self.auth_headers['X-Project-Id']}) + self._verify_alarm(json, alarm) + + def test_put_alarm_as_admin(self): + json = { + 'user_id': 'myuserid', + 'project_id': 'myprojectid', + 'enabled': False, + 'name': 'name_put', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}, + {'field': 'project_id', 'op': 'eq', + 'value': 'myprojectid'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + headers = {} + headers.update(self.auth_headers) + headers['X-Roles'] = 'admin' + + data = self.get_json('/alarms', + headers=headers, + q=[{'field': 'name', + 'value': 'name1', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + self.put_json('/alarms/%s' % alarm_id, + params=json, + headers=headers) + alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, + enabled=False))[0] + self.assertEqual('myuserid', alarm.user_id) + self.assertEqual('myprojectid', alarm.project_id) + self._verify_alarm(json, alarm) + + def test_put_alarm_wrong_field(self): + json = { + 'this_can_not_be_correct': 'ha', + 'enabled': False, + 'name': 'name1', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name1', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + resp = self.put_json('/alarms/%s' % alarm_id, + expect_errors=True, + params=json, + headers=self.auth_headers) + self.assertEqual(400, resp.status_code) + + def test_put_alarm_with_existing_name(self): + """Test that update a threshold alarm with an existing name.""" + json = { + 'enabled': False, + 'name': 'name1', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name2', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + resp = self.put_json('/alarms/%s' % alarm_id, + expect_errors=True, status=409, + params=json, + headers=self.auth_headers) + self.assertEqual( + 'Alarm with name=name1 exists', + resp.json['error_message']['faultstring']) + + def test_put_invalid_alarm_actions(self): + json = { + 'enabled': False, + 'name': 'name1', + 'state': 'ok', + 'type': 'threshold', + 'severity': 'critical', + 'ok_actions': ['spam://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [{'field': 'metadata.field', + 'op': 'eq', + 'value': '5', + 'type': 'string'}], + 'comparison_operator': 'le', + 'statistic': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'period': 180, + } + } + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name2', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + resp = self.put_json('/alarms/%s' % alarm_id, + expect_errors=True, status=400, + params=json, + headers=self.auth_headers) + self.assertEqual( + 'Unsupported action spam://something/ok', + resp.json['error_message']['faultstring']) + + def test_put_alarm_combination_cannot_specify_itself(self): + json = { + 'name': 'name4', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['d', 'a'], + } + } + + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name4', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + resp = self.put_json('/alarms/%s' % alarm_id, + expect_errors=True, status=400, + params=json, + headers=self.auth_headers) + + msg = 'Cannot specify alarm %s itself in combination rule' % alarm_id + self.assertEqual(msg, resp.json['error_message']['faultstring']) + + def _test_put_alarm_combination_rule_less_than_two_alarms(self, + alarm_ids=None): + json_body = { + 'name': 'name4', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': alarm_ids or [] + } + } + + data = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name4', + }]) + self.assertEqual(1, len(data)) + alarm_id = data[0]['alarm_id'] + + resp = self.put_json('/alarms/%s' % alarm_id, params=json_body, + expect_errors=True, status=400, + headers=self.auth_headers) + self.assertEqual( + 'Alarm combination rule should contain at' + ' least two different alarm ids.', + resp.json['error_message']['faultstring']) + + def test_put_alarm_combination_rule_with_no_alarm(self): + self._test_put_alarm_combination_rule_less_than_two_alarms() + + def test_put_alarm_combination_rule_with_one_alarm(self): + self._test_put_alarm_combination_rule_less_than_two_alarms(['a']) + + def test_put_alarm_combination_rule_with_two_same_alarm_itself(self): + self._test_put_alarm_combination_rule_less_than_two_alarms(['d', + 'd']) + + def test_put_combination_alarm_with_duplicate_ids(self): + """Test combination alarm doesn't allow duplicate alarm ids.""" + alarms = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'name4', + }]) + self.assertEqual(1, len(alarms)) + alarm_id = alarms[0]['alarm_id'] + + json_body = { + 'name': 'name4', + 'type': 'combination', + 'combination_rule': { + 'alarm_ids': ['c', 'a', 'b', 'a', 'c', 'b'], + } + } + self.put_json('/alarms/%s' % alarm_id, + params=json_body, status=200, + headers=self.auth_headers) + + alarms = list(self.alarm_conn.get_alarms(alarm_id=alarm_id)) + self.assertEqual(1, len(alarms)) + self.assertEqual(['c', 'a', 'b'], alarms[0].rule.get('alarm_ids')) + + def test_put_alarm_trust(self): + data = self._get_alarm('a') + data.update({'ok_actions': ['trust+http://something/ok']}) + trust_client = mock.Mock() + with mock.patch('ceilometer.keystone_client.get_v3_client') as client: + client.return_value = mock.Mock( + auth_ref=mock.Mock(user_id='my_user')) + with mock.patch('keystoneclient.v3.client.Client') as sub_client: + sub_client.return_value = trust_client + trust_client.trusts.create.return_value = mock.Mock(id='5678') + self.put_json('/alarms/%s' % data['alarm_id'], + params=data, + headers=self.auth_headers) + data = self._get_alarm('a') + self.assertEqual( + ['trust+http://5678:delete@something/ok'], data['ok_actions']) + + data.update({'ok_actions': ['http://no-trust-something/ok']}) + + with mock.patch('ceilometer.keystone_client.get_v3_client') as client: + client.return_value = mock.Mock( + auth_ref=mock.Mock(user_id='my_user')) + with mock.patch('keystoneclient.v3.client.Client') as sub_client: + sub_client.return_value = trust_client + self.put_json('/alarms/%s' % data['alarm_id'], + params=data, + headers=self.auth_headers) + trust_client.trusts.delete.assert_called_once_with('5678') + + data = self._get_alarm('a') + self.assertEqual( + ['http://no-trust-something/ok'], data['ok_actions']) + + def test_delete_alarm(self): + data = self.get_json('/alarms') + self.assertEqual(7, len(data)) + + resp = self.delete('/alarms/%s' % data[0]['alarm_id'], + headers=self.auth_headers, + status=204) + self.assertEqual(b'', resp.body) + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(6, len(alarms)) + + def test_get_state_alarm(self): + data = self.get_json('/alarms') + self.assertEqual(7, len(data)) + + resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'], + headers=self.auth_headers) + self.assertEqual(resp, data[0]['state']) + + def test_set_state_alarm(self): + data = self.get_json('/alarms') + self.assertEqual(7, len(data)) + + resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'], + headers=self.auth_headers, + params='alarm') + alarms = list(self.alarm_conn.get_alarms(alarm_id=data[0]['alarm_id'])) + self.assertEqual(1, len(alarms)) + self.assertEqual('alarm', alarms[0].state) + self.assertEqual('alarm', resp.json) + + def test_set_invalid_state_alarm(self): + data = self.get_json('/alarms') + self.assertEqual(7, len(data)) + + self.put_json('/alarms/%s/state' % data[0]['alarm_id'], + headers=self.auth_headers, + params='not valid', + status=400) + + def _get_alarm(self, id): + data = self.get_json('/alarms') + match = [a for a in data if a['alarm_id'] == id] + self.assertEqual(1, len(match), 'alarm %s not found' % id) + return match[0] + + def _get_alarm_history(self, alarm, auth_headers=None, query=None, + expect_errors=False, status=200): + url = '/alarms/%s/history' % alarm['alarm_id'] + if query: + url += '?q.op=%(op)s&q.value=%(value)s&q.field=%(field)s' % query + resp = self.get_json(url, + headers=auth_headers or self.auth_headers, + expect_errors=expect_errors) + if expect_errors: + self.assertEqual(status, resp.status_code) + return resp + + def _update_alarm(self, alarm, updated_data, auth_headers=None): + data = self._get_alarm(alarm['alarm_id']) + data.update(updated_data) + self.put_json('/alarms/%s' % alarm['alarm_id'], + params=data, + headers=auth_headers or self.auth_headers) + + def _delete_alarm(self, alarm, auth_headers=None): + self.delete('/alarms/%s' % alarm['alarm_id'], + headers=auth_headers or self.auth_headers, + status=204) + + def _assert_is_subset(self, expected, actual): + for k, v in six.iteritems(expected): + self.assertEqual(v, actual.get(k), 'mismatched field: %s' % k) + self.assertIsNotNone(actual['event_id']) + + def _assert_in_json(self, expected, actual): + actual = jsonutils.dumps(jsonutils.loads(actual), sort_keys=True) + for k, v in six.iteritems(expected): + fragment = jsonutils.dumps({k: v}, sort_keys=True)[1:-1] + self.assertIn(fragment, actual, + '%s not in %s' % (fragment, actual)) + + def test_record_alarm_history_config(self): + self.CONF.set_override('record_history', False, group='alarm') + alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self._update_alarm(alarm, dict(name='renamed')) + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self.CONF.set_override('record_history', True, group='alarm') + self._update_alarm(alarm, dict(name='foobar')) + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + + def test_record_alarm_history_severity(self): + alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self.assertEqual('critical', alarm['severity']) + + self._update_alarm(alarm, dict(severity='low')) + new_alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + self.assertEqual(jsonutils.dumps({'severity': 'low'}), + history[0]['detail']) + self.assertEqual('low', new_alarm['severity']) + + def test_redundant_update_alarm_property_no_history_change(self): + alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self.assertEqual('critical', alarm['severity']) + + self._update_alarm(alarm, dict(severity='low')) + new_alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + self.assertEqual(jsonutils.dumps({'severity': 'low'}), + history[0]['detail']) + self.assertEqual('low', new_alarm['severity']) + + self._update_alarm(alarm, dict(severity='low')) + updated_alarm = self._get_alarm('a') + updated_history = self._get_alarm_history(updated_alarm) + self.assertEqual(1, len(updated_history)) + self.assertEqual(jsonutils.dumps({'severity': 'low'}), + updated_history[0]['detail']) + self.assertEqual(history, updated_history) + + def test_get_recorded_alarm_history_on_create(self): + new_alarm = { + 'name': 'new_alarm', + 'type': 'threshold', + 'threshold_rule': { + 'meter_name': 'ameter', + 'query': [], + 'comparison_operator': 'le', + 'statistic': 'max', + 'threshold': 42.0, + 'period': 60, + 'evaluation_periods': 1, + } + } + self.post_json('/alarms', params=new_alarm, status=201, + headers=self.auth_headers) + + alarms = self.get_json('/alarms', + q=[{'field': 'name', + 'value': 'new_alarm', + }]) + self.assertEqual(1, len(alarms)) + alarm = alarms[0] + + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + on_behalf_of=alarm['project_id'], + project_id=alarm['project_id'], + type='creation', + user_id=alarm['user_id']), + history[0]) + self._add_default_threshold_rule(new_alarm) + new_alarm['rule'] = new_alarm['threshold_rule'] + del new_alarm['threshold_rule'] + new_alarm['rule']['query'].append({ + 'field': 'project_id', 'op': 'eq', + 'value': self.auth_headers['X-Project-Id']}) + self._assert_in_json(new_alarm, history[0]['detail']) + + def _do_test_get_recorded_alarm_history_on_update(self, + data, + type, + detail, + auth=None): + alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self._update_alarm(alarm, data, auth) + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + project_id = auth['X-Project-Id'] if auth else alarm['project_id'] + user_id = auth['X-User-Id'] if auth else alarm['user_id'] + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + detail=detail, + on_behalf_of=alarm['project_id'], + project_id=project_id, + type=type, + user_id=user_id), + history[0]) + + def test_get_recorded_alarm_history_rule_change(self): + data = dict(name='renamed') + detail = '{"name": "renamed"}' + self._do_test_get_recorded_alarm_history_on_update(data, + 'rule change', + detail) + + def test_get_recorded_alarm_history_state_transition_on_behalf_of(self): + # credentials for new non-admin user, on who's behalf the alarm + # is created + member_user = str(uuid.uuid4()) + member_project = str(uuid.uuid4()) + member_auth = {'X-Roles': 'member', + 'X-User-Id': member_user, + 'X-Project-Id': member_project} + new_alarm = { + 'name': 'new_alarm', + 'type': 'threshold', + 'state': 'ok', + 'threshold_rule': { + 'meter_name': 'other_meter', + 'query': [{'field': 'project_id', + 'op': 'eq', + 'value': member_project}], + 'comparison_operator': 'le', + 'statistic': 'max', + 'threshold': 42.0, + 'evaluation_periods': 1, + 'period': 60 + } + } + self.post_json('/alarms', params=new_alarm, status=201, + headers=member_auth) + alarm = self.get_json('/alarms', headers=member_auth)[0] + + # effect a state transition as a new administrative user + admin_user = str(uuid.uuid4()) + admin_project = str(uuid.uuid4()) + admin_auth = {'X-Roles': 'admin', + 'X-User-Id': admin_user, + 'X-Project-Id': admin_project} + data = dict(state='alarm') + self._update_alarm(alarm, data, auth_headers=admin_auth) + + self._add_default_threshold_rule(new_alarm) + new_alarm['rule'] = new_alarm['threshold_rule'] + del new_alarm['threshold_rule'] + + # ensure that both the creation event and state transition + # are visible to the non-admin alarm owner and admin user alike + for auth in [member_auth, admin_auth]: + history = self._get_alarm_history(alarm, auth_headers=auth) + self.assertEqual(2, len(history), 'hist: %s' % history) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + detail='{"state": "alarm"}', + on_behalf_of=alarm['project_id'], + project_id=admin_project, + type='rule change', + user_id=admin_user), + history[0]) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + on_behalf_of=alarm['project_id'], + project_id=member_project, + type='creation', + user_id=member_user), + history[1]) + self._assert_in_json(new_alarm, history[1]['detail']) + + # ensure on_behalf_of cannot be constrained in an API call + query = dict(field='on_behalf_of', + op='eq', + value=alarm['project_id']) + self._get_alarm_history(alarm, auth_headers=auth, query=query, + expect_errors=True, status=400) + + def test_get_recorded_alarm_history_segregation(self): + data = dict(name='renamed') + detail = '{"name": "renamed"}' + self._do_test_get_recorded_alarm_history_on_update(data, + 'rule change', + detail) + auth = {'X-Roles': 'member', + 'X-User-Id': str(uuid.uuid4()), + 'X-Project-Id': str(uuid.uuid4())} + history = self._get_alarm_history(self._get_alarm('a'), auth) + self.assertEqual([], history) + + def test_get_recorded_alarm_history_preserved_after_deletion(self): + alarm = self._get_alarm('a') + history = self._get_alarm_history(alarm) + self.assertEqual([], history) + self._update_alarm(alarm, dict(name='renamed')) + history = self._get_alarm_history(alarm) + self.assertEqual(1, len(history)) + alarm = self._get_alarm('a') + self.delete('/alarms/%s' % alarm['alarm_id'], + headers=self.auth_headers, + status=204) + history = self._get_alarm_history(alarm) + self.assertEqual(2, len(history)) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + on_behalf_of=alarm['project_id'], + project_id=alarm['project_id'], + type='deletion', + user_id=alarm['user_id']), + history[0]) + alarm['rule'] = alarm['threshold_rule'] + del alarm['threshold_rule'] + self._assert_in_json(alarm, history[0]['detail']) + detail = '{"name": "renamed"}' + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + detail=detail, + on_behalf_of=alarm['project_id'], + project_id=alarm['project_id'], + type='rule change', + user_id=alarm['user_id']), + history[1]) + + def test_get_alarm_history_ordered_by_recentness(self): + alarm = self._get_alarm('a') + for i in moves.xrange(10): + self._update_alarm(alarm, dict(name='%s' % i)) + alarm = self._get_alarm('a') + self._delete_alarm(alarm) + history = self._get_alarm_history(alarm) + self.assertEqual(11, len(history), 'hist: %s' % history) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + type='deletion'), + history[0]) + alarm['rule'] = alarm['threshold_rule'] + del alarm['threshold_rule'] + self._assert_in_json(alarm, history[0]['detail']) + for i in moves.xrange(1, 10): + detail = '{"name": "%s"}' % (10 - i) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + detail=detail, + type='rule change'), + history[i]) + + def test_get_alarm_history_constrained_by_timestamp(self): + alarm = self._get_alarm('a') + self._update_alarm(alarm, dict(name='renamed')) + after = datetime.datetime.utcnow().isoformat() + query = dict(field='timestamp', op='gt', value=after) + history = self._get_alarm_history(alarm, query=query) + self.assertEqual(0, len(history)) + query['op'] = 'le' + history = self._get_alarm_history(alarm, query=query) + self.assertEqual(1, len(history)) + detail = '{"name": "renamed"}' + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + detail=detail, + on_behalf_of=alarm['project_id'], + project_id=alarm['project_id'], + type='rule change', + user_id=alarm['user_id']), + history[0]) + + def test_get_alarm_history_constrained_by_type(self): + alarm = self._get_alarm('a') + self._delete_alarm(alarm) + query = dict(field='type', op='eq', value='deletion') + history = self._get_alarm_history(alarm, query=query) + self.assertEqual(1, len(history)) + self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], + on_behalf_of=alarm['project_id'], + project_id=alarm['project_id'], + type='deletion', + user_id=alarm['user_id']), + history[0]) + alarm['rule'] = alarm['threshold_rule'] + del alarm['threshold_rule'] + self._assert_in_json(alarm, history[0]['detail']) + + def test_get_alarm_history_constrained_by_alarm_id_failed(self): + alarm = self._get_alarm('b') + query = dict(field='alarm_id', op='eq', value='b') + resp = self._get_alarm_history(alarm, query=query, + expect_errors=True, status=400) + msg = ('Unknown argument: "alarm_id": unrecognized' + " field in query: [], valid keys: ['project', " + "'search_offset', 'severity', 'timestamp'," + " 'type', 'user']") + msg = msg.format(key=u'alarm_id', value=u'b') + self.assertEqual(msg, + resp.json['error_message']['faultstring']) + + def test_get_alarm_history_constrained_by_not_supported_rule(self): + alarm = self._get_alarm('b') + query = dict(field='abcd', op='eq', value='abcd') + resp = self._get_alarm_history(alarm, query=query, + expect_errors=True, status=400) + msg = ('Unknown argument: "abcd": unrecognized' + " field in query: [], valid keys: ['project', " + "'search_offset', 'severity', 'timestamp'," + " 'type', 'user']") + msg = msg.format(key=u'abcd', value=u'abcd') + self.assertEqual(msg, + resp.json['error_message']['faultstring']) + + def test_get_nonexistent_alarm_history(self): + # the existence of alarm history is independent of the + # continued existence of the alarm itself + history = self._get_alarm_history(dict(alarm_id='foobar')) + self.assertEqual([], history) + + def test_alarms_sends_notification(self): + # Hit the AlarmsController ... + json = { + 'name': 'sent_notification', + 'type': 'threshold', + 'severity': 'low', + 'threshold_rule': { + 'meter_name': 'ameter', + 'comparison_operator': 'gt', + 'threshold': 2.0, + 'statistic': 'avg', + } + + } + endpoint = mock.MagicMock() + target = oslo_messaging.Target(topic="notifications") + listener = messaging.get_notification_listener( + self.transport, [target], [endpoint]) + listener.start() + endpoint.info.side_effect = lambda *args: listener.stop() + self.post_json('/alarms', params=json, headers=self.auth_headers) + listener.wait() + + class PayloadMatcher(object): + def __eq__(self, payload): + return (payload['detail']['name'] == 'sent_notification' and + payload['type'] == 'creation' and + payload['detail']['rule']['meter_name'] == 'ameter' and + set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', + 'project_id', 'timestamp', + 'user_id']).issubset(payload.keys())) + + endpoint.info.assert_called_once_with( + {'resource_uuid': None, + 'domain': None, + 'project_domain': None, + 'auth_token': None, + 'is_admin': False, + 'user': None, + 'tenant': None, + 'read_only': False, + 'show_deleted': False, + 'user_identity': '- - - - -', + 'request_id': mock.ANY, + 'user_domain': None}, + 'ceilometer.api', 'alarm.creation', + PayloadMatcher(), mock.ANY) + + def test_alarm_sends_notification(self): + # Hit the AlarmController (with alarm_id supplied) ... + data = self.get_json('/alarms') + del_alarm_name = "name1" + for d in data: + if d['name'] == del_alarm_name: + del_alarm_id = d['alarm_id'] + + with mock.patch.object(messaging, 'get_notifier') as get_notifier: + notifier = get_notifier.return_value + + self.delete('/alarms/%s' % del_alarm_id, + headers=self.auth_headers, status=204) + get_notifier.assert_called_once_with(mock.ANY, + publisher_id='ceilometer.api') + calls = notifier.info.call_args_list + self.assertEqual(1, len(calls)) + args, _ = calls[0] + context, event_type, payload = args + self.assertEqual('alarm.deletion', event_type) + self.assertEqual(del_alarm_name, payload['detail']['name']) + self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', + 'project_id', 'timestamp', 'type', + 'user_id']).issubset(payload.keys())) + + @mock.patch('ceilometer.keystone_client.get_client') + def test_post_gnocchi_resources_alarm(self, __): + json = { + 'enabled': False, + 'name': 'name_post', + 'state': 'ok', + 'type': 'gnocchi_resources_threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'gnocchi_resources_threshold_rule': { + 'metric': 'ameter', + 'comparison_operator': 'le', + 'aggregation_method': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'granularity': 180, + 'resource_type': 'instance', + 'resource_id': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e', + } + } + + with mock.patch('requests.get', + side_effect=requests.ConnectionError()): + resp = self.post_json('/alarms', params=json, + headers=self.auth_headers, + expect_errors=True) + self.assertEqual(503, resp.status_code, resp.body) + + with mock.patch('requests.get', + return_value=mock.Mock(status_code=500, + body="my_custom_error", + text="my_custom_error")): + resp = self.post_json('/alarms', params=json, + headers=self.auth_headers, + expect_errors=True) + self.assertEqual(503, resp.status_code, resp.body) + self.assertIn('my_custom_error', + resp.json['error_message']['faultstring']) + + cap_result = mock.Mock(status_code=201, + text=jsonutils.dumps( + {'aggregation_methods': ['count']})) + resource_result = mock.Mock(status_code=200, text="blob") + with mock.patch('requests.get', side_effect=[cap_result, + resource_result] + ) as gnocchi_get: + self.post_json('/alarms', params=json, headers=self.auth_headers) + + gnocchi_url = self.CONF.alarms.gnocchi_url + capabilities_url = urlparse.urljoin(gnocchi_url, + '/v1/capabilities') + resource_url = urlparse.urljoin( + gnocchi_url, + '/v1/resource/instance/209ef69c-c10c-4efb-90ff-46f4b2d90d2e' + ) + + expected = [mock.call(capabilities_url, + headers=mock.ANY), + mock.call(resource_url, + headers=mock.ANY)] + self.assertEqual(expected, gnocchi_get.mock_calls) + + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self._verify_alarm(json, alarms[0]) + + @mock.patch('ceilometer.keystone_client.get_client') + def test_post_gnocchi_metrics_alarm(self, __): + json = { + 'enabled': False, + 'name': 'name_post', + 'state': 'ok', + 'type': 'gnocchi_aggregation_by_metrics_threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'gnocchi_aggregation_by_metrics_threshold_rule': { + 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', + '009d4faf-c275-46f0-8f2d-670b15bac2b0'], + 'comparison_operator': 'le', + 'aggregation_method': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'granularity': 180, + } + } + + cap_result = mock.Mock(status_code=200, + text=jsonutils.dumps( + {'aggregation_methods': ['count']})) + with mock.patch('requests.get', return_value=cap_result): + self.post_json('/alarms', params=json, headers=self.auth_headers) + + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + self._verify_alarm(json, alarms[0]) + + @mock.patch('ceilometer.keystone_client.get_client') + def test_post_gnocchi_aggregation_alarm_project_constraint(self, __): + self.CONF.set_override('gnocchi_url', 'http://localhost:8041', + group='alarms') + json = { + 'enabled': False, + 'name': 'project_constraint', + 'state': 'ok', + 'type': 'gnocchi_aggregation_by_resources_threshold', + 'severity': 'critical', + 'ok_actions': ['http://something/ok'], + 'alarm_actions': ['http://something/alarm'], + 'insufficient_data_actions': ['http://something/no'], + 'repeat_actions': True, + 'gnocchi_aggregation_by_resources_threshold_rule': { + 'metric': 'ameter', + 'comparison_operator': 'le', + 'aggregation_method': 'count', + 'threshold': 50, + 'evaluation_periods': 3, + 'granularity': 180, + 'resource_type': 'instance', + 'query': '{"=": {"server_group": "my_autoscaling_group"}}', + } + } + + cap_result = mock.Mock(status_code=201, + text=jsonutils.dumps( + {'aggregation_methods': ['count']})) + resource_result = mock.Mock(status_code=200, text="blob") + query_check_result = mock.Mock(status_code=200, text="blob") + + expected_query = ('{"and": [{"=": {"created_by_project_id": "%s"}}, ' + '{"=": {"server_group": "my_autoscaling_group"}}]}' % + self.auth_headers['X-Project-Id']) + + with mock.patch('requests.get', + side_effect=[cap_result, resource_result]): + with mock.patch('requests.post', + side_effect=[query_check_result]) as fake_post: + + self.post_json('/alarms', params=json, + headers=self.auth_headers) + + self.assertEqual([mock.call( + url=('http://localhost:8041/v1/aggregation/' + 'resource/instance/metric/ameter'), + headers={'Content-Type': 'application/json', + 'X-Auth-Token': mock.ANY}, + params={'aggregation': 'count'}, + data=expected_query)], + fake_post.mock_calls), + + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + + json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = ( + expected_query) + self._verify_alarm(json, alarms[0]) + + +class TestAlarmsQuotas(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestAlarmsQuotas, self).setUp() + + self.auth_headers = {'X-User-Id': str(uuid.uuid4()), + 'X-Project-Id': str(uuid.uuid4())} + + def _test_alarm_quota(self): + alarm = { + 'name': 'alarm', + 'type': 'threshold', + 'user_id': self.auth_headers['X-User-Id'], + 'project_id': self.auth_headers['X-Project-Id'], + 'threshold_rule': { + 'meter_name': 'testmeter', + 'query': [], + 'comparison_operator': 'le', + 'statistic': 'max', + 'threshold': 42.0, + 'period': 60, + 'evaluation_periods': 1, + } + } + + resp = self.post_json('/alarms', params=alarm, + headers=self.auth_headers) + self.assertEqual(201, resp.status_code) + alarms = self.get_json('/alarms') + self.assertEqual(1, len(alarms)) + + alarm['name'] = 'another_user_alarm' + resp = self.post_json('/alarms', params=alarm, + expect_errors=True, + headers=self.auth_headers) + self.assertEqual(403, resp.status_code) + faultstring = 'Alarm quota exceeded for user' + self.assertIn(faultstring, + resp.json['error_message']['faultstring']) + + alarms = self.get_json('/alarms') + self.assertEqual(1, len(alarms)) + + def test_alarms_quotas(self): + self.CONF.set_override('user_alarm_quota', 1, group='alarm') + self.CONF.set_override('project_alarm_quota', 1, group='alarm') + self._test_alarm_quota() + + def test_project_alarms_quotas(self): + self.CONF.set_override('project_alarm_quota', 1, group='alarm') + self._test_alarm_quota() + + def test_user_alarms_quotas(self): + self.CONF.set_override('user_alarm_quota', 1, group='alarm') + self._test_alarm_quota() + + def test_larger_limit_project_alarms_quotas(self): + self.CONF.set_override('user_alarm_quota', 1, group='alarm') + self.CONF.set_override('project_alarm_quota', 2, group='alarm') + self._test_alarm_quota() + + def test_larger_limit_user_alarms_quotas(self): + self.CONF.set_override('user_alarm_quota', 2, group='alarm') + self.CONF.set_override('project_alarm_quota', 1, group='alarm') + self._test_alarm_quota() + + def test_larger_limit_user_alarm_quotas_multitenant_user(self): + self.CONF.set_override('user_alarm_quota', 2, group='alarm') + self.CONF.set_override('project_alarm_quota', 1, group='alarm') + + def _test(field, value): + query = [{ + 'field': field, + 'op': 'eq', + 'value': value + }] + alarms = self.get_json('/alarms', q=query) + self.assertEqual(1, len(alarms)) + + alarm = { + 'name': 'alarm', + 'type': 'threshold', + 'user_id': self.auth_headers['X-User-Id'], + 'project_id': self.auth_headers['X-Project-Id'], + 'threshold_rule': { + 'meter_name': 'testmeter', + 'query': [], + 'comparison_operator': 'le', + 'statistic': 'max', + 'threshold': 42.0, + 'period': 60, + 'evaluation_periods': 1, + } + } + + resp = self.post_json('/alarms', params=alarm, + headers=self.auth_headers) + + self.assertEqual(201, resp.status_code) + _test('project_id', self.auth_headers['X-Project-Id']) + + self.auth_headers['X-Project-Id'] = str(uuid.uuid4()) + alarm['name'] = 'another_user_alarm' + alarm['project_id'] = self.auth_headers['X-Project-Id'] + resp = self.post_json('/alarms', params=alarm, + headers=self.auth_headers) + + self.assertEqual(201, resp.status_code) + _test('project_id', self.auth_headers['X-Project-Id']) + + alarms = self.get_json('/alarms') + self.assertEqual(2, len(alarms)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_api_upgrade.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_api_upgrade.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_api_upgrade.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_api_upgrade.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,119 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneclient import exceptions +import mock +from oslotest import mockpatch + +from ceilometer.tests.functional.api import v2 + + +class TestAPIUpgradePath(v2.FunctionalTest): + def _setup_osloconfig_options(self): + self.CONF.set_override('gnocchi_is_enabled', True, group='api') + self.CONF.set_override('aodh_is_enabled', True, group='api') + self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', + group='api') + + def _setup_keystone_mock(self): + self.CONF.set_override('gnocchi_is_enabled', None, group='api') + self.CONF.set_override('aodh_is_enabled', None, group='api') + self.CONF.set_override('aodh_url', None, group='api') + self.ks = mock.Mock() + self.ks.service_catalog.url_for.side_effect = self._url_for + self.useFixture(mockpatch.Patch( + 'ceilometer.keystone_client.get_client', return_value=self.ks)) + + @staticmethod + def _url_for(service_type=None): + if service_type == 'metric': + return 'http://gnocchi/' + elif service_type == 'alarming': + return 'http://alarm-endpoint:8008/' + raise exceptions.EndpointNotFound() + + def _do_test_gnocchi_enabled_without_database_backend(self): + self.CONF.set_override('dispatcher', 'gnocchi') + for endpoint in ['meters', 'samples', 'resources']: + response = self.app.get(self.PATH_PREFIX + '/' + endpoint, + status=410) + self.assertIn(b'Gnocchi API', response.body) + + headers_events = {"X-Roles": "admin", + "X-User-Id": "user1", + "X-Project-Id": "project1"} + for endpoint in ['events', 'event_types']: + self.app.get(self.PATH_PREFIX + '/' + endpoint, + headers=headers_events, + status=200) + + response = self.post_json('/query/samples', + params={ + "filter": '{"=": {"type": "creation"}}', + "orderby": '[{"timestamp": "DESC"}]', + "limit": 3 + }, status=410) + self.assertIn(b'Gnocchi API', response.body) + + def _do_test_alarm_redirect(self): + response = self.app.get(self.PATH_PREFIX + '/alarms', + expect_errors=True) + + self.assertEqual(307, response.status_code) + self.assertEqual("http://alarm-endpoint:8008/v2/alarms", + response.headers['Location']) + + response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', + expect_errors=True) + + self.assertEqual(307, response.status_code) + self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", + response.headers['Location']) + + response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', + expect_errors=True) + + self.assertEqual(307, response.status_code) + self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", + response.headers['Location']) + + response = self.post_json('/query/alarms', + params={ + "filter": '{"=": {"type": "creation"}}', + "orderby": '[{"timestamp": "DESC"}]', + "limit": 3 + }, status=307) + self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", + response.headers['Location']) + + def test_gnocchi_enabled_without_database_backend_keystone(self): + self._setup_keystone_mock() + self._do_test_gnocchi_enabled_without_database_backend() + self.ks.service_catalog.url_for.assert_has_calls([ + mock.call(service_type="alarming"), + mock.call(service_type="metric")], + any_order=True) + + def test_gnocchi_enabled_without_database_backend_configoptions(self): + self._setup_osloconfig_options() + self._do_test_gnocchi_enabled_without_database_backend() + + def test_alarm_redirect_keystone(self): + self._setup_keystone_mock() + self._do_test_alarm_redirect() + self.assertEqual([mock.call(service_type="alarming")], + self.ks.service_catalog.url_for.mock_calls) + + def test_alarm_redirect_configoptions(self): + self._setup_osloconfig_options() + self._do_test_alarm_redirect() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_app.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_app.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_app.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_app.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,182 @@ +# +# Copyright 2013 IBM Corp. +# Copyright 2013 Julien Danjou +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test basic ceilometer-api app +""" +import json + +import mock +import six +import wsme + +from ceilometer import i18n +from ceilometer.tests.functional.api import v2 + + +class TestPecanApp(v2.FunctionalTest): + + def test_pecan_extension_guessing_unset(self): + # check Pecan does not assume .jpg is an extension + response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') + self.assertEqual('application/json', response.content_type) + + +class TestApiMiddleware(v2.FunctionalTest): + + no_lang_translated_error = 'No lang translated error' + en_US_translated_error = 'en-US translated error' + + def _fake_translate(self, message, user_locale): + if user_locale is None: + return self.no_lang_translated_error + else: + return self.en_US_translated_error + + def test_json_parsable_error_middleware_404(self): + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "application/json"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "application/json,application/xml"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "application/xml;q=0.8, \ + application/json"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + response = self.get_json('/invalid_path', + expect_errors=True + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "text/html,*/*"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + + def test_json_parsable_error_middleware_translation_400(self): + # Ensure translated messages get placed properly into json faults + with mock.patch.object(i18n, 'translate', + side_effect=self._fake_translate): + response = self.post_json('/alarms', params={'name': 'foobar', + 'type': 'threshold'}, + expect_errors=True, + headers={"Accept": + "application/json"} + ) + self.assertEqual(400, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) + self.assertEqual(self.no_lang_translated_error, + response.json['error_message']['faultstring']) + + def test_xml_parsable_error_middleware_404(self): + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "application/xml,*/*"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/xml", response.content_type) + self.assertEqual('error_message', response.xml.tag) + response = self.get_json('/invalid_path', + expect_errors=True, + headers={"Accept": + "application/json;q=0.8 \ + ,application/xml"} + ) + self.assertEqual(404, response.status_int) + self.assertEqual("application/xml", response.content_type) + self.assertEqual('error_message', response.xml.tag) + + def test_xml_parsable_error_middleware_translation_400(self): + # Ensure translated messages get placed properly into xml faults + with mock.patch.object(i18n, 'translate', + side_effect=self._fake_translate): + response = self.post_json('/alarms', params={'name': 'foobar', + 'type': 'threshold'}, + expect_errors=True, + headers={"Accept": + "application/xml,*/*"} + ) + self.assertEqual(400, response.status_int) + self.assertEqual("application/xml", response.content_type) + self.assertEqual('error_message', response.xml.tag) + fault = response.xml.findall('./error/faultstring') + for fault_string in fault: + self.assertEqual(self.no_lang_translated_error, fault_string.text) + + def test_best_match_language(self): + # Ensure that we are actually invoking language negotiation + with mock.patch.object(i18n, 'translate', + side_effect=self._fake_translate): + response = self.post_json('/alarms', params={'name': 'foobar', + 'type': 'threshold'}, + expect_errors=True, + headers={"Accept": + "application/xml,*/*", + "Accept-Language": + "en-US"} + ) + + self.assertEqual(400, response.status_int) + self.assertEqual("application/xml", response.content_type) + self.assertEqual('error_message', response.xml.tag) + fault = response.xml.findall('./error/faultstring') + for fault_string in fault: + self.assertEqual(self.en_US_translated_error, fault_string.text) + + def test_translated_then_untranslated_error(self): + resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) + self.assertEqual(404, resp.status_code) + body = resp.body + if six.PY3: + body = body.decode('utf-8') + self.assertEqual("Alarm alarm-id-3 not found", + json.loads(body)['error_message'] + ['faultstring']) + + with mock.patch('ceilometer.api.controllers.' + 'v2.base.AlarmNotFound') as CustomErrorClass: + CustomErrorClass.return_value = wsme.exc.ClientSideError( + "untranslated_error", status_code=404) + resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) + + self.assertEqual(404, resp.status_code) + body = resp.body + if six.PY3: + body = body.decode('utf-8') + self.assertEqual("untranslated_error", + json.loads(body)['error_message'] + ['faultstring']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_capabilities.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_capabilities.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_capabilities.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,39 @@ +# +# Copyright Ericsson AB 2014. All rights reserved +# +# Authors: Ildiko Vancsa +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testscenarios + +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 as tests_api + +load_tests = testscenarios.load_tests_apply_scenarios + + +class TestCapabilitiesController(tests_api.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestCapabilitiesController, self).setUp() + self.url = '/capabilities' + + def test_capabilities(self): + data = self.get_json(self.url) + # check that capabilities data contains both 'api' and 'storage' fields + self.assertIsNotNone(data) + self.assertNotEqual({}, data) + self.assertIn('api', data) + self.assertIn('storage', data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py 2015-09-03 13:06:00.000000000 +0000 @@ -0,0 +1,615 @@ +# +# Copyright Ericsson AB 2013. All rights reserved +# +# Authors: Ildiko Vancsa +# Balazs Gibizer +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests complex queries for samples +""" + +import datetime + +from oslo_utils import timeutils + +from ceilometer.alarm.storage import models +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 as tests_api + + +admin_header = {"X-Roles": "admin", + "X-Project-Id": + "project-id1"} +non_admin_header = {"X-Roles": "Member", + "X-Project-Id": + "project-id1"} + + +class TestQueryMetersController(tests_api.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestQueryMetersController, self).setUp() + self.url = '/query/samples' + + for cnt in [ + sample.Sample('meter.test', + 'cumulative', + '', + 1, + 'user-id1', + 'project-id1', + 'resource-id1', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server1', + 'tag': 'self.sample', + 'size': 456, + 'util': 0.25, + 'is_public': True}, + source='test_source'), + sample.Sample('meter.test', + 'cumulative', + '', + 2, + 'user-id2', + 'project-id2', + 'resource-id2', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server2', + 'tag': 'self.sample', + 'size': 123, + 'util': 0.75, + 'is_public': True}, + source='test_source'), + sample.Sample('meter.test', + 'cumulative', + '', + 3, + 'user-id3', + 'project-id3', + 'resource-id3', + timestamp=datetime.datetime(2012, 7, 2, 10, 42), + resource_metadata={'display_name': 'test-server3', + 'tag': 'self.sample', + 'size': 789, + 'util': 0.95, + 'is_public': True}, + source='test_source')]: + + msg = utils.meter_message_from_counter( + cnt, self.CONF.publisher.telemetry_secret) + self.conn.record_metering_data(msg) + + def test_query_fields_are_optional(self): + data = self.post_json(self.url, params={}) + self.assertEqual(3, len(data.json)) + + def test_query_with_isotime(self): + date_time = datetime.datetime(2012, 7, 2, 10, 41) + isotime = date_time.isoformat() + + data = self.post_json(self.url, + params={"filter": + '{">=": {"timestamp": "' + + isotime + '"}}'}) + + self.assertEqual(2, len(data.json)) + for sample_item in data.json: + result_time = timeutils.parse_isotime(sample_item['timestamp']) + result_time = result_time.replace(tzinfo=None) + self.assertTrue(result_time >= date_time) + + def test_non_admin_tenant_sees_only_its_own_project(self): + data = self.post_json(self.url, + params={}, + headers=non_admin_header) + for sample_item in data.json: + self.assertEqual("project-id1", sample_item['project_id']) + + def test_non_admin_tenant_cannot_query_others_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"project_id": "project-id2"}}'}, + expect_errors=True, + headers=non_admin_header) + + self.assertEqual(401, data.status_int) + self.assertIn(b"Not Authorized to access project project-id2", + data.body) + + def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"project_id": "project-id1"}}'}, + headers=non_admin_header) + + for sample_item in data.json: + self.assertEqual("project-id1", sample_item['project_id']) + + def test_admin_tenant_sees_every_project(self): + data = self.post_json(self.url, + params={}, + headers=admin_header) + + self.assertEqual(3, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], + (["project-id1", "project-id2", "project-id3"])) + + def test_admin_tenant_sees_every_project_with_complex_filter(self): + filter = ('{"OR": ' + + '[{"=": {"project_id": "project-id1"}}, ' + + '{"=": {"project_id": "project-id2"}}]}') + data = self.post_json(self.url, + params={"filter": filter}, + headers=admin_header) + + self.assertEqual(2, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], + (["project-id1", "project-id2"])) + + def test_admin_tenant_sees_every_project_with_in_filter(self): + filter = ('{"In": ' + + '{"project_id": ["project-id1", "project-id2"]}}') + data = self.post_json(self.url, + params={"filter": filter}, + headers=admin_header) + + self.assertEqual(2, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], + (["project-id1", "project-id2"])) + + def test_admin_tenant_can_query_any_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"project_id": "project-id2"}}'}, + headers=admin_header) + + self.assertEqual(1, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], set(["project-id2"])) + + def test_query_with_orderby(self): + data = self.post_json(self.url, + params={"orderby": '[{"project_id": "DESC"}]'}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["project-id3", "project-id2", "project-id1"], + [s["project_id"] for s in data.json]) + + def test_query_with_field_name_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"project": "project-id2"}}'}) + + self.assertEqual(1, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], set(["project-id2"])) + + def test_query_with_field_name_resource(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"resource": "resource-id2"}}'}) + + self.assertEqual(1, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['resource_id'], set(["resource-id2"])) + + def test_query_with_wrong_field_name(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"unknown": "resource-id2"}}'}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"is not valid under any of the given schemas", + data.body) + + def test_query_with_wrong_json(self): + data = self.post_json(self.url, + params={"filter": + '{"=": "resource": "resource-id2"}}'}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"Filter expression not valid", data.body) + + def test_query_with_field_name_user(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"user": "user-id2"}}'}) + + self.assertEqual(1, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['user_id'], set(["user-id2"])) + + def test_query_with_field_name_meter(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"meter": "meter.test"}}'}) + + self.assertEqual(3, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['meter'], set(["meter.test"])) + + def test_query_with_lower_and_upper_case_orderby(self): + data = self.post_json(self.url, + params={"orderby": '[{"project_id": "DeSc"}]'}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["project-id3", "project-id2", "project-id1"], + [s["project_id"] for s in data.json]) + + def test_query_with_user_field_name_orderby(self): + data = self.post_json(self.url, + params={"orderby": '[{"user": "aSc"}]'}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["user-id1", "user-id2", "user-id3"], + [s["user_id"] for s in data.json]) + + def test_query_with_volume_field_name_orderby(self): + data = self.post_json(self.url, + params={"orderby": '[{"volume": "deSc"}]'}) + + self.assertEqual(3, len(data.json)) + self.assertEqual([3, 2, 1], + [s["volume"] for s in data.json]) + + def test_query_with_missing_order_in_orderby(self): + data = self.post_json(self.url, + params={"orderby": '[{"project_id": ""}]'}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) + + def test_query_with_wrong_json_in_orderby(self): + data = self.post_json(self.url, + params={"orderby": '{"project_id": "desc"}]'}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"Order-by expression not valid: Extra data", data.body) + + def test_filter_with_metadata(self): + data = self.post_json(self.url, + params={"filter": + '{">=": {"metadata.util": 0.5}}'}) + + self.assertEqual(2, len(data.json)) + for sample_item in data.json: + self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) + + def test_filter_with_negation(self): + filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' + data = self.post_json(self.url, + params={"filter": filter_expr}) + + self.assertEqual(1, len(data.json)) + for sample_item in data.json: + self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) + + def test_limit_must_be_positive(self): + data = self.post_json(self.url, + params={"limit": 0}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"Limit must be positive", data.body) + + def test_default_limit(self): + self.CONF.set_override('default_api_return_limit', 1, group='api') + data = self.post_json(self.url, params={}) + self.assertEqual(1, len(data.json)) + + +class TestQueryAlarmsController(tests_api.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestQueryAlarmsController, self).setUp() + self.alarm_url = '/query/alarms' + + for state in ['ok', 'alarm', 'insufficient data']: + for date in [datetime.datetime(2013, 1, 1), + datetime.datetime(2013, 2, 2)]: + for id in [1, 2]: + alarm_id = "-".join([state, date.isoformat(), str(id)]) + project_id = "project-id%d" % id + alarm = models.Alarm(name=alarm_id, + type='threshold', + enabled=True, + alarm_id=alarm_id, + description='a', + state=state, + state_timestamp=date, + timestamp=date, + ok_actions=[], + insufficient_data_actions=[], + alarm_actions=[], + repeat_actions=True, + user_id="user-id%d" % id, + project_id=project_id, + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=2.0, + statistic='avg', + evaluation_periods=60, + period=1, + meter_name='meter.test', + query=[{'field': + 'project_id', + 'op': 'eq', + 'value': + project_id}]), + severity='critical') + self.alarm_conn.update_alarm(alarm) + + def test_query_all(self): + data = self.post_json(self.alarm_url, + params={}) + + self.assertEqual(12, len(data.json)) + + def test_filter_with_isotime_timestamp(self): + date_time = datetime.datetime(2013, 1, 1) + isotime = date_time.isoformat() + + data = self.post_json(self.alarm_url, + params={"filter": + '{">": {"timestamp": "' + + isotime + '"}}'}) + + self.assertEqual(6, len(data.json)) + for alarm in data.json: + result_time = timeutils.parse_isotime(alarm['timestamp']) + result_time = result_time.replace(tzinfo=None) + self.assertTrue(result_time > date_time) + + def test_filter_with_isotime_state_timestamp(self): + date_time = datetime.datetime(2013, 1, 1) + isotime = date_time.isoformat() + + data = self.post_json(self.alarm_url, + params={"filter": + '{">": {"state_timestamp": "' + + isotime + '"}}'}) + + self.assertEqual(6, len(data.json)) + for alarm in data.json: + result_time = timeutils.parse_isotime(alarm['state_timestamp']) + result_time = result_time.replace(tzinfo=None) + self.assertTrue(result_time > date_time) + + def test_non_admin_tenant_sees_only_its_own_project(self): + data = self.post_json(self.alarm_url, + params={}, + headers=non_admin_header) + for alarm in data.json: + self.assertEqual("project-id1", alarm['project_id']) + + def test_non_admin_tenant_cannot_query_others_project(self): + data = self.post_json(self.alarm_url, + params={"filter": + '{"=": {"project_id": "project-id2"}}'}, + expect_errors=True, + headers=non_admin_header) + + self.assertEqual(401, data.status_int) + self.assertIn(b"Not Authorized to access project project-id2", + data.body) + + def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): + data = self.post_json(self.alarm_url, + params={"filter": + '{"=": {"project_id": "project-id1"}}'}, + headers=non_admin_header) + + for alarm in data.json: + self.assertEqual("project-id1", alarm['project_id']) + + def test_admin_tenant_sees_every_project(self): + data = self.post_json(self.alarm_url, + params={}, + headers=admin_header) + + self.assertEqual(12, len(data.json)) + for alarm in data.json: + self.assertIn(alarm['project_id'], + (["project-id1", "project-id2"])) + + def test_admin_tenant_can_query_any_project(self): + data = self.post_json(self.alarm_url, + params={"filter": + '{"=": {"project_id": "project-id2"}}'}, + headers=admin_header) + + self.assertEqual(6, len(data.json)) + for alarm in data.json: + self.assertIn(alarm['project_id'], set(["project-id2"])) + + def test_query_with_field_project(self): + data = self.post_json(self.alarm_url, + params={"filter": + '{"=": {"project": "project-id2"}}'}) + + self.assertEqual(6, len(data.json)) + for sample_item in data.json: + self.assertIn(sample_item['project_id'], set(["project-id2"])) + + def test_query_with_field_user_in_orderby(self): + data = self.post_json(self.alarm_url, + params={"filter": '{"=": {"state": "alarm"}}', + "orderby": '[{"user": "DESC"}]'}) + + self.assertEqual(4, len(data.json)) + self.assertEqual(["user-id2", "user-id2", "user-id1", "user-id1"], + [s["user_id"] for s in data.json]) + + def test_query_with_filter_orderby_and_limit(self): + orderby = '[{"state_timestamp": "DESC"}]' + data = self.post_json(self.alarm_url, + params={"filter": '{"=": {"state": "alarm"}}', + "orderby": orderby, + "limit": 3}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["2013-02-02T00:00:00", + "2013-02-02T00:00:00", + "2013-01-01T00:00:00"], + [a["state_timestamp"] for a in data.json]) + for alarm in data.json: + self.assertEqual("alarm", alarm["state"]) + + def test_limit_must_be_positive(self): + data = self.post_json(self.alarm_url, + params={"limit": 0}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"Limit must be positive", data.body) + + def test_default_limit(self): + self.CONF.set_override('default_api_return_limit', 1, group='api') + data = self.post_json(self.alarm_url, params={}) + self.assertEqual(1, len(data.json)) + + +class TestQueryAlarmsHistoryController( + tests_api.FunctionalTest, tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestQueryAlarmsHistoryController, self).setUp() + self.url = '/query/alarms/history' + for id in [1, 2]: + for type in ["creation", "state transition"]: + for date in [datetime.datetime(2013, 1, 1), + datetime.datetime(2013, 2, 2)]: + event_id = "-".join([str(id), type, date.isoformat()]) + alarm_change = {"event_id": event_id, + "alarm_id": "alarm-id%d" % id, + "type": type, + "detail": "", + "user_id": "user-id%d" % id, + "project_id": "project-id%d" % id, + "on_behalf_of": "project-id%d" % id, + "timestamp": date} + + self.alarm_conn.record_alarm_change(alarm_change) + + def test_query_all(self): + data = self.post_json(self.url, + params={}) + + self.assertEqual(8, len(data.json)) + + def test_filter_with_isotime(self): + date_time = datetime.datetime(2013, 1, 1) + isotime = date_time.isoformat() + + data = self.post_json(self.url, + params={"filter": + '{">": {"timestamp":"' + + isotime + '"}}'}) + + self.assertEqual(4, len(data.json)) + for history in data.json: + result_time = timeutils.parse_isotime(history['timestamp']) + result_time = result_time.replace(tzinfo=None) + self.assertTrue(result_time > date_time) + + def test_non_admin_tenant_sees_only_its_own_project(self): + data = self.post_json(self.url, + params={}, + headers=non_admin_header) + for history in data.json: + self.assertEqual("project-id1", history['on_behalf_of']) + + def test_non_admin_tenant_cannot_query_others_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"on_behalf_of":' + + ' "project-id2"}}'}, + expect_errors=True, + headers=non_admin_header) + + self.assertEqual(401, data.status_int) + self.assertIn(b"Not Authorized to access project project-id2", + data.body) + + def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"on_behalf_of":' + + ' "project-id1"}}'}, + headers=non_admin_header) + + for history in data.json: + self.assertEqual("project-id1", history['on_behalf_of']) + + def test_admin_tenant_sees_every_project(self): + data = self.post_json(self.url, + params={}, + headers=admin_header) + + self.assertEqual(8, len(data.json)) + for history in data.json: + self.assertIn(history['on_behalf_of'], + (["project-id1", "project-id2"])) + + def test_query_with_filter_for_project_orderby_with_user(self): + data = self.post_json(self.url, + params={"filter": + '{"=": {"project": "project-id1"}}', + "orderby": '[{"user": "DESC"}]', + "limit": 3}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["user-id1", + "user-id1", + "user-id1"], + [h["user_id"] for h in data.json]) + for history in data.json: + self.assertEqual("project-id1", history['project_id']) + + def test_query_with_filter_orderby_and_limit(self): + data = self.post_json(self.url, + params={"filter": '{"=": {"type": "creation"}}', + "orderby": '[{"timestamp": "DESC"}]', + "limit": 3}) + + self.assertEqual(3, len(data.json)) + self.assertEqual(["2013-02-02T00:00:00", + "2013-02-02T00:00:00", + "2013-01-01T00:00:00"], + [h["timestamp"] for h in data.json]) + for history in data.json: + self.assertEqual("creation", history['type']) + + def test_limit_must_be_positive(self): + data = self.post_json(self.url, + params={"limit": 0}, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertIn(b"Limit must be positive", data.body) + + def test_default_limit(self): + self.CONF.set_override('default_api_return_limit', 1, group='api') + data = self.post_json(self.url, params={}) + self.assertEqual(1, len(data.json)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,195 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test listing raw events. +""" + +import datetime + +import mock +from oslo_utils import timeutils + +from ceilometer.storage import models +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestComputeDurationByResource(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestComputeDurationByResource, self).setUp() + # Create events relative to the range and pretend + # that the intervening events exist. + + self.early1 = datetime.datetime(2012, 8, 27, 7, 0) + self.early2 = datetime.datetime(2012, 8, 27, 17, 0) + + self.start = datetime.datetime(2012, 8, 28, 0, 0) + + self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) + self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) + + self.end = datetime.datetime(2012, 8, 28, 23, 59) + + self.late1 = datetime.datetime(2012, 8, 29, 9, 0) + self.late2 = datetime.datetime(2012, 8, 29, 19, 0) + + def _patch_get_interval(self, start, end): + def get_interval(sample_filter, period, groupby, aggregate): + self.assertIsNotNone(sample_filter.start_timestamp) + self.assertIsNotNone(sample_filter.end_timestamp) + if (sample_filter.start_timestamp > end or + sample_filter.end_timestamp < start): + return [] + duration_start = max(sample_filter.start_timestamp, start) + duration_end = min(sample_filter.end_timestamp, end) + duration = timeutils.delta_seconds(duration_start, duration_end) + return [ + models.Statistics( + unit='', + min=0, + max=0, + avg=0, + sum=0, + count=0, + period=None, + period_start=None, + period_end=None, + duration=duration, + duration_start=duration_start, + duration_end=duration_end, + groupby=None, + ) + ] + return mock.patch.object(type(self.conn), 'get_meter_statistics', + side_effect=get_interval) + + def _invoke_api(self): + return self.get_json('/meters/instance:m1.tiny/statistics', + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': self.start.isoformat()}, + {'field': 'timestamp', + 'op': 'le', + 'value': self.end.isoformat()}, + {'field': 'search_offset', + 'value': 10}]) + + def test_before_range(self): + with self._patch_get_interval(self.early1, self.early2): + data = self._invoke_api() + self.assertEqual([], data) + + def _assert_times_match(self, actual, expected): + if actual: + actual = timeutils.parse_isotime(actual) + actual = actual.replace(tzinfo=None) + self.assertEqual(expected, actual) + + def test_overlap_range_start(self): + with self._patch_get_interval(self.early1, self.middle1): + data = self._invoke_api() + self._assert_times_match(data[0]['duration_start'], self.start) + self._assert_times_match(data[0]['duration_end'], self.middle1) + self.assertEqual(8 * 60 * 60, data[0]['duration']) + + def test_within_range(self): + with self._patch_get_interval(self.middle1, self.middle2): + data = self._invoke_api() + self._assert_times_match(data[0]['duration_start'], self.middle1) + self._assert_times_match(data[0]['duration_end'], self.middle2) + self.assertEqual(10 * 60 * 60, data[0]['duration']) + + def test_within_range_zero_duration(self): + with self._patch_get_interval(self.middle1, self.middle1): + data = self._invoke_api() + self._assert_times_match(data[0]['duration_start'], self.middle1) + self._assert_times_match(data[0]['duration_end'], self.middle1) + self.assertEqual(0, data[0]['duration']) + + def test_overlap_range_end(self): + with self._patch_get_interval(self.middle2, self.late1): + data = self._invoke_api() + self._assert_times_match(data[0]['duration_start'], self.middle2) + self._assert_times_match(data[0]['duration_end'], self.end) + self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) + + def test_after_range(self): + with self._patch_get_interval(self.late1, self.late2): + data = self._invoke_api() + self.assertEqual([], data) + + def test_without_end_timestamp(self): + statistics = [ + models.Statistics( + unit=None, + count=0, + min=None, + max=None, + avg=None, + duration=None, + duration_start=self.late1, + duration_end=self.late2, + sum=0, + period=None, + period_start=None, + period_end=None, + groupby=None, + ) + ] + with mock.patch.object(type(self.conn), 'get_meter_statistics', + return_value=statistics): + data = self.get_json('/meters/instance:m1.tiny/statistics', + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': self.late1.isoformat()}, + {'field': 'resource_id', + 'value': 'resource-id'}, + {'field': 'search_offset', + 'value': 10}]) + self._assert_times_match(data[0]['duration_start'], self.late1) + self._assert_times_match(data[0]['duration_end'], self.late2) + + def test_without_start_timestamp(self): + statistics = [ + models.Statistics( + unit=None, + count=0, + min=None, + max=None, + avg=None, + duration=None, + duration_start=self.early1, + duration_end=self.early2, + sum=0, + period=None, + period_start=None, + period_end=None, + groupby=None, + ) + ] + + with mock.patch.object(type(self.conn), 'get_meter_statistics', + return_value=statistics): + data = self.get_json('/meters/instance:m1.tiny/statistics', + q=[{'field': 'timestamp', + 'op': 'le', + 'value': self.early2.isoformat()}, + {'field': 'resource_id', + 'value': 'resource-id'}, + {'field': 'search_offset', + 'value': 10}]) + self._assert_times_match(data[0]['duration_start'], self.early1) + self._assert_times_match(data[0]['duration_end'], self.early2) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_event_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_event_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_event_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_event_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,595 @@ +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test event, event_type and trait retrieval.""" + +import datetime +import uuid + +import webtest.app + +from ceilometer.event.storage import models +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + +USER_ID = uuid.uuid4().hex +PROJ_ID = uuid.uuid4().hex +HEADERS = {"X-Roles": "admin", + "X-User-Id": USER_ID, + "X-Project-Id": PROJ_ID} + + +class EventTestBase(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(EventTestBase, self).setUp() + self._generate_models() + + def _generate_models(self): + event_models = [] + base = 0 + self.s_time = datetime.datetime(2013, 12, 31, 5, 0) + self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) + for event_type in ['Foo', 'Bar', 'Zoo']: + trait_models = [models.Trait(name, type, value) + for name, type, value in [ + ('trait_A', models.Trait.TEXT_TYPE, + "my_%s_text" % event_type), + ('trait_B', models.Trait.INT_TYPE, + base + 1), + ('trait_C', models.Trait.FLOAT_TYPE, + float(base) + 0.123456), + ('trait_D', models.Trait.DATETIME_TYPE, + self.trait_time)]] + + # Message ID for test will be 'base'. So, message ID for the first + # event will be '0', the second '100', and so on. + # trait_time in first event will be equal to self.trait_time + # (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so + # second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on. + event_models.append( + models.Event(message_id=str(base), + event_type=event_type, + generated=self.trait_time, + traits=trait_models, + raw={'status': {'nested': 'started'}})) + base += 100 + self.trait_time += datetime.timedelta(days=1) + self.event_conn.record_events(event_models) + + +class TestEventTypeAPI(EventTestBase): + + PATH = '/event_types' + + def test_event_types(self): + data = self.get_json(self.PATH, headers=HEADERS) + for event_type in ['Foo', 'Bar', 'Zoo']: + self.assertIn(event_type, data) + + +class TestTraitAPI(EventTestBase): + + PATH = '/event_types/%s/traits' + + def test_get_traits_for_event(self): + path = self.PATH % "Foo" + data = self.get_json(path, headers=HEADERS) + + self.assertEqual(4, len(data)) + + def test_get_event_invalid_path(self): + data = self.get_json('/event_types/trait_A/', headers=HEADERS, + expect_errors=True) + self.assertEqual(404, data.status_int) + + def test_get_traits_for_non_existent_event(self): + path = self.PATH % "NO_SUCH_EVENT_TYPE" + data = self.get_json(path, headers=HEADERS) + + self.assertEqual([], data) + + def test_get_trait_data_for_event(self): + path = (self.PATH % "Foo") + "/trait_A" + data = self.get_json(path, headers=HEADERS) + self.assertEqual(1, len(data)) + self.assertEqual("trait_A", data[0]['name']) + + path = (self.PATH % "Foo") + "/trait_B" + data = self.get_json(path, headers=HEADERS) + self.assertEqual(1, len(data)) + self.assertEqual("trait_B", data[0]['name']) + self.assertEqual("1", data[0]['value']) + + path = (self.PATH % "Foo") + "/trait_D" + data = self.get_json(path, headers=HEADERS) + self.assertEqual(1, len(data)) + self.assertEqual("trait_D", data[0]['name']) + self.assertEqual((self.trait_time - datetime.timedelta(days=3)). + isoformat(), data[0]['value']) + + def test_get_trait_data_for_non_existent_event(self): + path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A" + data = self.get_json(path, headers=HEADERS) + + self.assertEqual([], data) + + def test_get_trait_data_for_non_existent_trait(self): + path = (self.PATH % "Foo") + "/no_such_trait" + data = self.get_json(path, headers=HEADERS) + + self.assertEqual([], data) + + +class TestEventAPI(EventTestBase): + + PATH = '/events' + + def test_get_events(self): + data = self.get_json(self.PATH, headers=HEADERS) + self.assertEqual(3, len(data)) + # We expect to get native UTC generated time back + trait_time = self.s_time + for event in data: + expected_generated = trait_time.isoformat() + self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo']) + self.assertEqual(4, len(event['traits'])) + self.assertEqual({'status': {'nested': 'started'}}, event['raw']), + self.assertEqual(expected_generated, event['generated']) + for trait_name in ['trait_A', 'trait_B', + 'trait_C', 'trait_D']: + self.assertIn(trait_name, map(lambda x: x['name'], + event['traits'])) + trait_time += datetime.timedelta(days=1) + + def test_get_event_by_message_id(self): + event = self.get_json(self.PATH + "/100", headers=HEADERS) + expected_traits = [{'name': 'trait_A', + 'type': 'string', + 'value': 'my_Bar_text'}, + {'name': 'trait_B', + 'type': 'integer', + 'value': '101'}, + {'name': 'trait_C', + 'type': 'float', + 'value': '100.123456'}, + {'name': 'trait_D', + 'type': 'datetime', + 'value': '2014-01-01T05:00:00'}] + self.assertEqual('100', event['message_id']) + self.assertEqual('Bar', event['event_type']) + self.assertEqual('2014-01-01T05:00:00', event['generated']) + self.assertEqual(expected_traits, event['traits']) + + def test_get_event_by_message_id_no_such_id(self): + data = self.get_json(self.PATH + "/DNE", headers=HEADERS, + expect_errors=True) + self.assertEqual(404, data.status_int) + + def test_get_events_filter_event_type(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'event_type', + 'value': 'Foo'}]) + self.assertEqual(1, len(data)) + + def test_get_events_filter_trait_no_type(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text'}]) + self.assertEqual(1, len(data)) + self.assertEqual('Foo', data[0]['event_type']) + + def test_get_events_filter_trait_empty_type(self): + return + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': ''}]) + self.assertEqual(1, len(data)) + self.assertEqual('Foo', data[0]['event_type']) + + def test_get_events_filter_trait_invalid_type(self): + resp = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'whats-up'}], + expect_errors=True) + self.assertEqual(400, resp.status_code) + self.assertEqual("The data type whats-up is not supported. The " + "supported data type list is: [\'integer\', " + "\'float\', \'string\', \'datetime\']", + resp.json['error_message']['faultstring']) + + def test_get_events_filter_operator_invalid_type(self): + resp = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'op': 'whats-up'}], + expect_errors=True) + self.assertEqual(400, resp.status_code) + self.assertEqual("operator whats-up is not supported. the " + "supported operators are: (\'lt\', \'le\', " + "\'eq\', \'ne\', \'ge\', \'gt\')", + resp.json['error_message']['faultstring']) + + def test_get_events_filter_text_trait(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'string'}]) + self.assertEqual(1, len(data)) + self.assertEqual('Foo', data[0]['event_type']) + + def test_get_events_filter_int_trait(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '101', + 'type': 'integer'}]) + self.assertEqual(1, len(data)) + self.assertEqual('Bar', data[0]['event_type']) + + traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B'] + self.assertEqual(1, len(traits)) + self.assertEqual('integer', traits[0]['type']) + self.assertEqual('101', traits[0]['value']) + + def test_get_events_filter_float_trait(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '200.123456', + 'type': 'float'}]) + self.assertEqual(1, len(data)) + self.assertEqual('Zoo', data[0]['event_type']) + + traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C'] + self.assertEqual(1, len(traits)) + self.assertEqual('float', traits[0]['type']) + self.assertEqual('200.123456', traits[0]['value']) + + def test_get_events_filter_datetime_trait(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2014-01-01T05:00:00', + 'type': 'datetime'}]) + self.assertEqual(1, len(data)) + traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D'] + self.assertEqual(1, len(traits)) + self.assertEqual('datetime', traits[0]['type']) + self.assertEqual('2014-01-01T05:00:00', traits[0]['value']) + + def test_get_events_multiple_filters(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '1', + 'type': 'integer'}, + {'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'string'}]) + self.assertEqual(1, len(data)) + self.assertEqual('Foo', data[0]['event_type']) + + def test_get_events_multiple_filters_no_matches(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '101', + 'type': 'integer'}, + {'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'string'}]) + + self.assertEqual(0, len(data)) + + def test_get_events_not_filters(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[]) + self.assertEqual(3, len(data)) + + def test_get_events_filter_op_string(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'string', + 'op': 'eq'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Bar_text', + 'type': 'string', + 'op': 'lt'}]) + self.assertEqual(0, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Zoo_text', + 'type': 'string', + 'op': 'le'}]) + self.assertEqual(3, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Foo_text', + 'type': 'string', + 'op': 'ne'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Bar_text', + 'type': 'string', + 'op': 'gt'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_A', + 'value': 'my_Zoo_text', + 'type': 'string', + 'op': 'ge'}]) + self.assertEqual(1, len(data)) + + def test_get_events_filter_op_integer(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '101', + 'type': 'integer', + 'op': 'eq'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '201', + 'type': 'integer', + 'op': 'lt'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '1', + 'type': 'integer', + 'op': 'le'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '101', + 'type': 'integer', + 'op': 'ne'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '201', + 'type': 'integer', + 'op': 'gt'}]) + self.assertEqual(0, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '1', + 'type': 'integer', + 'op': 'ge'}]) + self.assertEqual(3, len(data)) + + def test_get_events_filter_op_float(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '100.123456', + 'type': 'float', + 'op': 'eq'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '200.123456', + 'type': 'float', + 'op': 'lt'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '0.123456', + 'type': 'float', + 'op': 'le'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '100.123456', + 'type': 'float', + 'op': 'ne'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '200.123456', + 'type': 'float', + 'op': 'gt'}]) + self.assertEqual(0, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_C', + 'value': '0.123456', + 'type': 'float', + 'op': 'ge'}]) + self.assertEqual(3, len(data)) + + def test_get_events_filter_op_datatime(self): + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2014-01-01T05:00:00', + 'type': 'datetime', + 'op': 'eq'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2014-01-02T05:00:00', + 'type': 'datetime', + 'op': 'lt'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2013-12-31T05:00:00', + 'type': 'datetime', + 'op': 'le'}]) + self.assertEqual(1, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2014-01-01T05:00:00', + 'type': 'datetime', + 'op': 'ne'}]) + self.assertEqual(2, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2014-01-02T05:00:00', + 'type': 'datetime', + 'op': 'gt'}]) + self.assertEqual(0, len(data)) + data = self.get_json(self.PATH, headers=HEADERS, + q=[{'field': 'trait_D', + 'value': '2013-12-31T05:00:00', + 'type': 'datetime', + 'op': 'ge'}]) + self.assertEqual(3, len(data)) + + def test_get_events_filter_wrong_op(self): + self.assertRaises(webtest.app.AppError, + self.get_json, self.PATH, headers=HEADERS, + q=[{'field': 'trait_B', + 'value': '1', + 'type': 'integer', + 'op': 'el'}]) + + +class AclRestrictedEventTestBase(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(AclRestrictedEventTestBase, self).setUp() + self.admin_user_id = uuid.uuid4().hex + self.admin_proj_id = uuid.uuid4().hex + self.user_id = uuid.uuid4().hex + self.proj_id = uuid.uuid4().hex + self._generate_models() + + def _generate_models(self): + event_models = [] + self.s_time = datetime.datetime(2013, 12, 31, 5, 0) + event_models.append( + models.Event(message_id='1', + event_type='empty_ev', + generated=self.s_time, + traits=[models.Trait('random', + models.Trait.TEXT_TYPE, + 'blah')], + raw={})) + event_models.append( + models.Event(message_id='2', + event_type='admin_ev', + generated=self.s_time, + traits=[models.Trait('project_id', + models.Trait.TEXT_TYPE, + self.admin_proj_id), + models.Trait('user_id', + models.Trait.TEXT_TYPE, + self.admin_user_id)], + raw={})) + event_models.append( + models.Event(message_id='3', + event_type='user_ev', + generated=self.s_time, + traits=[models.Trait('project_id', + models.Trait.TEXT_TYPE, + self.proj_id), + models.Trait('user_id', + models.Trait.TEXT_TYPE, + self.user_id)], + raw={})) + self.event_conn.record_events(event_models) + + def test_non_admin_access(self): + a_headers = {"X-Roles": "member", + "X-User-Id": self.user_id, + "X-Project-Id": self.proj_id} + data = self.get_json('/events', headers=a_headers) + self.assertEqual(1, len(data)) + self.assertEqual('user_ev', data[0]['event_type']) + + def test_non_admin_access_single(self): + a_headers = {"X-Roles": "member", + "X-User-Id": self.user_id, + "X-Project-Id": self.proj_id} + data = self.get_json('/events/3', headers=a_headers) + self.assertEqual('user_ev', data['event_type']) + + def test_non_admin_access_incorrect_user(self): + a_headers = {"X-Roles": "member", + "X-User-Id": 'blah', + "X-Project-Id": self.proj_id} + data = self.get_json('/events', headers=a_headers) + self.assertEqual(0, len(data)) + + def test_non_admin_access_incorrect_proj(self): + a_headers = {"X-Roles": "member", + "X-User-Id": self.user_id, + "X-Project-Id": 'blah'} + data = self.get_json('/events', headers=a_headers) + self.assertEqual(0, len(data)) + + def test_non_admin_access_single_invalid(self): + a_headers = {"X-Roles": "member", + "X-User-Id": self.user_id, + "X-Project-Id": self.proj_id} + data = self.get_json('/events/1', headers=a_headers, + expect_errors=True) + self.assertEqual(404, data.status_int) + + +class EventRestrictionTestBase(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(EventRestrictionTestBase, self).setUp() + self.CONF.set_override('default_api_return_limit', 10, group='api') + self._generate_models() + + def _generate_models(self): + event_models = [] + base = 0 + self.s_time = datetime.datetime(2013, 12, 31, 5, 0) + self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) + for i in range(20): + trait_models = [models.Trait(name, type, value) + for name, type, value in [ + ('trait_A', models.Trait.TEXT_TYPE, + "my_text"), + ('trait_B', models.Trait.INT_TYPE, + base + 1), + ('trait_C', models.Trait.FLOAT_TYPE, + float(base) + 0.123456), + ('trait_D', models.Trait.DATETIME_TYPE, + self.trait_time)]] + + event_models.append( + models.Event(message_id=str(uuid.uuid4()), + event_type='foo.bar', + generated=self.trait_time, + traits=trait_models, + raw={'status': {'nested': 'started'}})) + self.trait_time += datetime.timedelta(seconds=1) + self.event_conn.record_events(event_models) + + +class TestEventRestriction(EventRestrictionTestBase): + + def test_get_limit(self): + data = self.get_json('/events?limit=1', headers=HEADERS) + self.assertEqual(1, len(data)) + + def test_get_limit_negative(self): + self.assertRaises(webtest.app.AppError, + self.get_json, '/events?limit=-2', headers=HEADERS) + + def test_get_limit_bigger(self): + data = self.get_json('/events?limit=100', headers=HEADERS) + self.assertEqual(20, len(data)) + + def test_get_default_limit(self): + data = self.get_json('/events', headers=HEADERS) + self.assertEqual(10, len(data)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_list_events_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_list_events_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_list_events_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_list_events_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,158 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test listing raw events. +""" + +import datetime + +import mock +from oslo_utils import timeutils +import six + +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestListEvents(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestListEvents, self).setUp() + patcher = mock.patch.object(timeutils, 'utcnow') + self.addCleanup(patcher.stop) + self.mock_utcnow = patcher.start() + self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42) + self.sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project1', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + 'dict_properties': {'key': 'value'}, + 'not_ignored_list': ['returned'], + }, + source='test_source', + ) + msg = utils.meter_message_from_counter( + self.sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + self.sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project2', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='source2', + ) + msg2 = utils.meter_message_from_counter( + self.sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + def test_all(self): + data = self.get_json('/meters/instance') + self.assertEqual(2, len(data)) + for s in data: + self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at']) + + def test_all_trailing_slash(self): + data = self.get_json('/meters/instance/') + self.assertEqual(2, len(data)) + + def test_empty_project(self): + data = self.get_json('/meters/instance', + q=[{'field': 'project_id', + 'value': 'no-such-project', + }]) + self.assertEqual([], data) + + def test_by_project(self): + data = self.get_json('/meters/instance', + q=[{'field': 'project_id', + 'value': 'project1', + }]) + self.assertEqual(1, len(data)) + + def test_empty_resource(self): + data = self.get_json('/meters/instance', + q=[{'field': 'resource_id', + 'value': 'no-such-resource', + }]) + self.assertEqual([], data) + + def test_by_resource(self): + data = self.get_json('/meters/instance', + q=[{'field': 'resource_id', + 'value': 'resource-id', + }]) + self.assertEqual(1, len(data)) + + def test_empty_source(self): + data = self.get_json('/meters/instance', + q=[{'field': 'source', + 'value': 'no-such-source', + }]) + self.assertEqual(0, len(data)) + + def test_by_source(self): + data = self.get_json('/meters/instance', + q=[{'field': 'source', + 'value': 'test_source', + }]) + self.assertEqual(1, len(data)) + + def test_empty_user(self): + data = self.get_json('/meters/instance', + q=[{'field': 'user_id', + 'value': 'no-such-user', + }]) + self.assertEqual([], data) + + def test_by_user(self): + data = self.get_json('/meters/instance', + q=[{'field': 'user_id', + 'value': 'user-id', + }]) + self.assertEqual(1, len(data)) + + def test_metadata(self): + data = self.get_json('/meters/instance', + q=[{'field': 'resource_id', + 'value': 'resource-id', + }]) + sample = data[0] + self.assertIn('resource_metadata', sample) + self.assertEqual( + [('dict_properties.key', 'value'), + ('display_name', 'test-server'), + ('not_ignored_list', "['returned']"), + ('tag', 'self.sample'), + ], + list(sorted(six.iteritems(sample['resource_metadata'])))) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,791 @@ +# +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test listing meters. +""" + +import base64 +import datetime + +from oslo_serialization import jsonutils +import six +import webtest.app + +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestListEmptyMeters(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def test_empty(self): + data = self.get_json('/meters') + self.assertEqual([], data) + + +class TestValidateUserInput(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def test_list_meters_query_float_metadata(self): + self.assertRaises(webtest.app.AppError, self.get_json, + '/meters/meter.test', + q=[{'field': 'metadata.util', + 'op': 'eq', + 'value': '0.7.5', + 'type': 'float'}]) + self.assertRaises(webtest.app.AppError, self.get_json, + '/meters/meter.test', + q=[{'field': 'metadata.util', + 'op': 'eq', + 'value': 'abacaba', + 'type': 'boolean'}]) + self.assertRaises(webtest.app.AppError, self.get_json, + '/meters/meter.test', + q=[{'field': 'metadata.util', + 'op': 'eq', + 'value': '45.765', + 'type': 'integer'}]) + + +class TestListMetersRestriction(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestListMetersRestriction, self).setUp() + self.CONF.set_override('default_api_return_limit', 3, group='api') + for x in range(5): + for i in range(5): + s = sample.Sample( + 'volume.size%s' % x, + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id', + timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + + datetime.timedelta(seconds=i)), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_meter_limit(self): + data = self.get_json('/meters?limit=1') + self.assertEqual(1, len(data)) + + def test_meter_limit_negative(self): + self.assertRaises(webtest.app.AppError, + self.get_json, + '/meters?limit=-2') + + def test_meter_limit_bigger(self): + data = self.get_json('/meters?limit=42') + self.assertEqual(5, len(data)) + + def test_meter_default_limit(self): + data = self.get_json('/meters') + self.assertEqual(3, len(data)) + + def test_old_sample_limit(self): + data = self.get_json('/meters/volume.size0?limit=1') + self.assertEqual(1, len(data)) + + def test_old_sample_limit_negative(self): + self.assertRaises(webtest.app.AppError, + self.get_json, + '/meters/volume.size0?limit=-2') + + def test_old_sample_limit_bigger(self): + data = self.get_json('/meters/volume.size0?limit=42') + self.assertEqual(5, len(data)) + + def test_old_sample_default_limit(self): + data = self.get_json('/meters/volume.size0') + self.assertEqual(3, len(data)) + + def test_sample_limit(self): + data = self.get_json('/samples?limit=1') + self.assertEqual(1, len(data)) + + def test_sample_limit_negative(self): + self.assertRaises(webtest.app.AppError, + self.get_json, + '/samples?limit=-2') + + def test_sample_limit_bigger(self): + data = self.get_json('/samples?limit=42') + self.assertEqual(25, len(data)) + + def test_sample_default_limit(self): + data = self.get_json('/samples') + self.assertEqual(3, len(data)) + + +class TestListMeters(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestListMeters, self).setUp() + self.messages = [] + for cnt in [ + sample.Sample( + 'meter.test', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + 'size': 123, + 'util': 0.75, + 'is_public': True}, + source='test_source'), + sample.Sample( + 'meter.test', + 'cumulative', + '', + 3, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 11, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample1', + 'size': 0, + 'util': 0.47, + 'is_public': False}, + source='test_source'), + sample.Sample( + 'meter.mine', + 'gauge', + '', + 1, + 'user-id', + 'project-id', + 'resource-id2', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + 'size': 456, + 'util': 0.64, + 'is_public': False}, + source='test_source'), + sample.Sample( + 'meter.test', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id2', + 'resource-id3', + timestamp=datetime.datetime(2012, 7, 2, 10, 42), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample3', + 'size': 0, + 'util': 0.75, + 'is_public': False}, + source='test_source'), + sample.Sample( + 'meter.test.new', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample3', + 'size': 0, + 'util': 0.75, + 'is_public': False}, + source='test_source'), + + sample.Sample( + 'meter.mine', + 'gauge', + '', + 1, + 'user-id4', + 'project-id2', + 'resource-id4', + timestamp=datetime.datetime(2012, 7, 2, 10, 43), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample4', + 'properties': { + 'prop_1': 'prop_value', + 'prop_2': {'sub_prop_1': + 'sub_prop_value'}, + 'prop.3': {'$sub_prop.2': + 'sub_prop_value2'} + }, + 'size': 0, + 'util': 0.58, + 'is_public': True}, + source='test_source1'), + sample.Sample( + u'meter.accent\xe9\u0437', + 'gauge', + '', + 1, + 'user-id4', + 'project-id2', + 'resource-id4', + timestamp=datetime.datetime(2014, 7, 2, 10, 43), + resource_metadata={}, + source='test_source1')]: + msg = utils.meter_message_from_counter( + cnt, self.CONF.publisher.telemetry_secret) + self.messages.append(msg) + self.conn.record_metering_data(msg) + + def test_list_meters(self): + data = self.get_json('/meters') + self.assertEqual(6, len(data)) + self.assertEqual(set(['resource-id', + 'resource-id2', + 'resource-id3', + 'resource-id4']), + set(r['resource_id'] for r in data)) + self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', + u'meter.accent\xe9\u0437']), + set(r['name'] for r in data)) + self.assertEqual(set(['test_source', 'test_source1']), + set(r['source'] for r in data)) + + def test_meters_query_with_timestamp(self): + date_time = datetime.datetime(2012, 7, 2, 10, 41) + isotime = date_time.isoformat() + resp = self.get_json('/meters', + q=[{'field': 'timestamp', + 'op': 'gt', + 'value': isotime}], + expect_errors=True) + self.assertEqual(400, resp.status_code) + self.assertEqual('Unknown argument: "timestamp": ' + 'not valid for this resource', + jsonutils.loads(resp.body)['error_message'] + ['faultstring']) + + def test_list_samples(self): + data = self.get_json('/samples') + self.assertEqual(7, len(data)) + + def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): + resp = self.get_json('/samples', + q=[{'field': 'non_valid_field_name', + 'op': 'gt', + 'value': 3}], + expect_errors=True) + resp_string = jsonutils.loads(resp.body) + fault_string = resp_string['error_message']['faultstring'] + msg = ('Unknown argument: "non_valid_field_name"' + ': unrecognized field in query: ' + '[= res['first_sample_timestamp']) + self.assertIn('last_sample_timestamp', res) + self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) + + def test_instance_no_metadata(self): + timestamp = datetime.datetime(2012, 7, 2, 10, 40) + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=timestamp, + resource_metadata=None, + source='test', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + data = self.get_json('/resources') + self.assertEqual(1, len(data)) + self._verify_resource_timestamps(data[0], timestamp, timestamp) + + def test_instances(self): + timestamps = { + 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), + 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), + } + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=timestamps['resource-id'], + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id-alternate', + timestamp=timestamps['resource-id-alternate'], + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='test', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources') + self.assertEqual(2, len(data)) + for res in data: + timestamp = timestamps.get(res['resource_id']) + self._verify_resource_timestamps(res, timestamp, timestamp) + + def test_instance_multiple_samples(self): + timestamps = [ + datetime.datetime(2012, 7, 2, 10, 41), + datetime.datetime(2012, 7, 2, 10, 42), + datetime.datetime(2012, 7, 2, 10, 40), + ] + for timestamp in timestamps: + datapoint = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=timestamp, + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample-%s' % timestamp, + }, + source='test', + ) + msg = utils.meter_message_from_counter( + datapoint, + self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + data = self.get_json('/resources') + self.assertEqual(1, len(data)) + self._verify_resource_timestamps(data[0], + timestamps[-1], timestamps[1]) + + def test_instances_one(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='test', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources/resource-id') + self.assertEqual('resource-id', data['resource_id']) + + def test_with_source(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='not-test', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources', q=[{'field': 'source', + 'value': 'test_list_resources', + }]) + ids = [r['resource_id'] for r in data] + self.assertEqual(['resource-id'], ids) + sources = [r['source'] for r in data] + self.assertEqual(['test_list_resources'], sources) + + def test_with_invalid_resource_id(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id-1', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id', + 'resource-id-2', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='test_list_resources', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + resp1 = self.get_json('/resources/resource-id-1') + self.assertEqual("resource-id-1", resp1["resource_id"]) + + resp2 = self.get_json('/resources/resource-id-2') + self.assertEqual("resource-id-2", resp2["resource_id"]) + + resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) + self.assertEqual(404, resp3.status_code) + json_data = resp3.body + if six.PY3: + json_data = json_data.decode('utf-8') + self.assertEqual("Resource resource-id-3 Not Found", + json.loads(json_data)['error_message'] + ['faultstring']) + + def test_with_user(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='not-test', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources', q=[{'field': 'user_id', + 'value': 'user-id', + }]) + ids = [r['resource_id'] for r in data] + self.assertEqual(['resource-id'], ids) + + def test_with_project(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + sample2 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id2', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample2', + }, + source='not-test', + ) + msg2 = utils.meter_message_from_counter( + sample2, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources', q=[{'field': 'project_id', + 'value': 'project-id', + }]) + ids = [r['resource_id'] for r in data] + self.assertEqual(['resource-id'], ids) + + def test_with_user_non_admin(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id2', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample1', + }, + source='not-test', + ) + msg2 = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources', + headers={"X-Roles": "Member", + "X-Project-Id": "project-id2"}) + ids = set(r['resource_id'] for r in data) + self.assertEqual(set(['resource-id-alternate']), ids) + + def test_with_user_wrong_tenant(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id2', + 'project-id2', + 'resource-id-alternate', + timestamp=datetime.datetime(2012, 7, 2, 10, 41), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample1', + }, + source='not-test', + ) + msg2 = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg2) + + data = self.get_json('/resources', + headers={"X-Roles": "Member", + "X-Project-Id": "project-wrong"}) + ids = set(r['resource_id'] for r in data) + self.assertEqual(set(), ids) + + def test_metadata(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + 'dict_properties': {'key.$1': {'$key': 'val'}}, + 'not_ignored_list': ['returned'], + }, + source='test', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + data = self.get_json('/resources') + metadata = data[0]['metadata'] + self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), + (u'display_name', u'test-server'), + (u'not_ignored_list', u"['returned']"), + (u'tag', u'self.sample')], + list(sorted(six.iteritems(metadata)))) + + def test_resource_meter_links(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + data = self.get_json('/resources') + links = data[0]['links'] + self.assertEqual(2, len(links)) + self.assertEqual('self', links[0]['rel']) + self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') + in links[0]['href']) + self.assertEqual('instance', links[1]['rel']) + self.assertTrue((self.PATH_PREFIX + '/meters/instance?' + 'q.field=resource_id&q.value=resource-id') + in links[1]['href']) + + def test_resource_skip_meter_links(self): + sample1 = sample.Sample( + 'instance', + 'cumulative', + '', + 1, + 'user-id', + 'project-id', + 'resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.sample', + }, + source='test_list_resources', + ) + msg = utils.meter_message_from_counter( + sample1, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + data = self.get_json('/resources?meter_links=0') + links = data[0]['links'] + self.assertEqual(len(links), 1) + self.assertEqual(links[0]['rel'], 'self') + self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') + in links[0]['href']) + + +class TestListResourcesRestriction(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(TestListResourcesRestriction, self).setUp() + self.CONF.set_override('default_api_return_limit', 10, group='api') + for i in range(20): + s = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id%s' % i, + timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + + datetime.timedelta(seconds=i)), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_resource_limit(self): + data = self.get_json('/resources?limit=1') + self.assertEqual(1, len(data)) + + def test_resource_limit_negative(self): + self.assertRaises(webtest.app.AppError, self.get_json, + '/resources?limit=-2') + + def test_resource_limit_bigger(self): + data = self.get_json('/resources?limit=42') + self.assertEqual(20, len(data)) + + def test_resource_default_limit(self): + data = self.get_json('/resources') + self.assertEqual(10, len(data)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,349 @@ +# +# Copyright 2013 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test listing raw events. +""" + +import copy +import datetime + +import mock +from oslo_utils import timeutils +from oslotest import mockpatch + +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestPostSamples(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + def fake_notifier_sample(self, ctxt, event_type, payload): + samples = payload['samples'] + for m in samples: + del m['message_signature'] + self.published.append(samples) + + def setUp(self): + self.published = [] + notifier = mock.Mock() + notifier.info.side_effect = self.fake_notifier_sample + self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', + return_value=notifier)) + super(TestPostSamples, self).setUp() + + def test_one(self): + s1 = [{'counter_name': 'apples', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + data = self.post_json('/meters/apples/', s1) + + # timestamp not given so it is generated. + s1[0]['timestamp'] = data.json[0]['timestamp'] + # Ignore message id that is randomly generated + s1[0]['message_id'] = data.json[0]['message_id'] + # source is generated if not provided. + s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] + + self.assertEqual(s1, data.json) + self.assertEqual(s1[0], self.published[0][0]) + + def test_nested_metadata(self): + s1 = [{'counter_name': 'apples', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'nest.name1': 'value1', + 'name2': 'value2', + 'nest.name2': 'value3'}}] + + data = self.post_json('/meters/apples/', s1) + + # timestamp not given so it is generated. + s1[0]['timestamp'] = data.json[0]['timestamp'] + # Ignore message id that is randomly generated + s1[0]['message_id'] = data.json[0]['message_id'] + # source is generated if not provided. + s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] + + unwound = copy.copy(s1[0]) + unwound['resource_metadata'] = {'nest': {'name1': 'value1', + 'name2': 'value3'}, + 'name2': 'value2'} + # only the published sample should be unwound, not the representation + # in the API response + self.assertEqual(s1[0], data.json[0]) + self.assertEqual(unwound, self.published[0][0]) + + def test_invalid_counter_type(self): + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'INVALID_TYPE', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'closedstack', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + + data = self.post_json('/meters/my_counter_name/', s1, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertEqual(0, len(self.published)) + + def test_messsage_id_provided(self): + """Do not accept sample with message_id.""" + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'message_id': 'evil', + 'source': 'closedstack', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + + data = self.post_json('/meters/my_counter_name/', s1, + expect_errors=True) + + self.assertEqual(400, data.status_int) + self.assertEqual(0, len(self.published)) + + def test_wrong_project_id(self): + """Do not accept cross posting samples to different projects.""" + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'closedstack', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + + data = self.post_json('/meters/my_counter_name/', s1, + expect_errors=True, + headers={ + "X-Roles": "Member", + "X-Tenant-Name": "lu-tenant", + "X-Project-Id": + "bc23a9d531064583ace8f67dad60f6bb", + }) + + self.assertEqual(400, data.status_int) + self.assertEqual(0, len(self.published)) + + def test_multiple_samples(self): + """Send multiple samples. + + The usecase here is to reduce the chatter and send the counters + at a slower cadence. + """ + samples = [] + for x in range(6): + dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) + s = {'counter_name': 'apples', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': float(x * 3), + 'source': 'evil', + 'timestamp': dt.isoformat(), + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': str(x), + 'name2': str(x + 4)}} + samples.append(s) + + data = self.post_json('/meters/apples/', samples) + + for x, s in enumerate(samples): + # source is modified to include the project_id. + s['source'] = '%s:%s' % (s['project_id'], + s['source']) + # Ignore message id that is randomly generated + s['message_id'] = data.json[x]['message_id'] + + # remove tzinfo to compare generated timestamp + # with the provided one + c = data.json[x] + timestamp = timeutils.parse_isotime(c['timestamp']) + c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() + + # do the same on the pipeline + msg = self.published[0][x] + timestamp = timeutils.parse_isotime(msg['timestamp']) + msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() + + self.assertEqual(s, c) + self.assertEqual(s, self.published[0][x]) + + def test_missing_mandatory_fields(self): + """Do not accept posting samples with missing mandatory fields.""" + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'closedstack', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + + # one by one try posting without a mandatory field. + for m in ['counter_volume', 'counter_unit', 'counter_type', + 'resource_id', 'counter_name']: + s_broke = copy.copy(s1) + del s_broke[0][m] + print('posting without %s' % m) + data = self.post_json('/meters/my_counter_name', s_broke, + expect_errors=True) + self.assertEqual(400, data.status_int) + + def test_multiple_project_id_and_admin(self): + """Allow admin is allowed to set multiple project_id.""" + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'closedstack', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + }, + {'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 2, + 'source': 'closedstack', + 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + data = self.post_json('/meters/my_counter_name/', s1, + headers={"X-Roles": "admin"}) + + self.assertEqual(201, data.status_int) + for x, s in enumerate(s1): + # source is modified to include the project_id. + s['source'] = '%s:%s' % (s['project_id'], + 'closedstack') + # Ignore message id that is randomly generated + s['message_id'] = data.json[x]['message_id'] + # timestamp not given so it is generated. + s['timestamp'] = data.json[x]['timestamp'] + s.setdefault('resource_metadata', dict()) + self.assertEqual(s, data.json[x]) + self.assertEqual(s, self.published[0][x]) + + def test_multiple_samples_multiple_sources(self): + """Test posting with special conditions. + + Do accept a single post with some multiples sources with some of them + null. + """ + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'paperstack', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + }, + {'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 5, + 'source': 'waterstack', + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + }, + {'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 2, + 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', + 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + data = self.post_json('/meters/my_counter_name/', s1, + expect_errors=True) + self.assertEqual(201, data.status_int) + for x, s in enumerate(s1): + # source is modified to include the project_id. + s['source'] = '%s:%s' % ( + s['project_id'], + s.get('source', self.CONF.sample_source) + ) + # Ignore message id that is randomly generated + s['message_id'] = data.json[x]['message_id'] + # timestamp not given so it is generated. + s['timestamp'] = data.json[x]['timestamp'] + s.setdefault('resource_metadata', dict()) + self.assertEqual(s, data.json[x]) + self.assertEqual(s, self.published[0][x]) + + def test_missing_project_user_id(self): + """Ensure missing project & user IDs are defaulted appropriately.""" + s1 = [{'counter_name': 'my_counter_name', + 'counter_type': 'gauge', + 'counter_unit': 'instance', + 'counter_volume': 1, + 'source': 'closedstack', + 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', + 'resource_metadata': {'name1': 'value1', + 'name2': 'value2'}}] + + project_id = 'bc23a9d531064583ace8f67dad60f6bb' + user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff' + data = self.post_json('/meters/my_counter_name/', s1, + expect_errors=True, + headers={ + 'X-Roles': 'chief-bottle-washer', + 'X-Project-Id': project_id, + 'X-User-Id': user_id, + }) + + self.assertEqual(201, data.status_int) + for x, s in enumerate(s1): + # source is modified to include the project_id. + s['source'] = '%s:%s' % (project_id, + s['source']) + # Ignore message id that is randomly generated + s['message_id'] = data.json[x]['message_id'] + # timestamp not given so it is generated. + s['timestamp'] = data.json[x]['timestamp'] + s['user_id'] = user_id + s['project_id'] = project_id + + self.assertEqual(s, data.json[x]) + self.assertEqual(s, self.published[0][x]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,1661 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test events statistics retrieval.""" + +import datetime + +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.api import v2 + + +class TestMaxProjectVolume(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/volume.size/statistics' + + def setUp(self): + super(TestMaxProjectVolume, self).setUp() + for i in range(3): + s = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id-%s' % i, + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_no_time_bounds(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }]) + self.assertEqual(7, data[0]['max']) + self.assertEqual(3, data[0]['count']) + + def test_start_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + ]) + self.assertEqual(7, data[0]['max']) + self.assertEqual(2, data[0]['count']) + + def test_start_timestamp_after(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T12:34:00', + }, + ]) + self.assertEqual([], data) + + def test_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:30:00', + }, + ]) + self.assertEqual(5, data[0]['max']) + self.assertEqual(1, data[0]['count']) + + def test_end_timestamp_before(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T09:54:00', + }, + ]) + self.assertEqual([], data) + + def test_start_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:32:00', + }, + ]) + self.assertEqual(6, data[0]['max']) + self.assertEqual(1, data[0]['count']) + + +class TestMaxResourceVolume(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/volume.size/statistics' + + def setUp(self): + super(TestMaxResourceVolume, self).setUp() + for i in range(3): + s = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id', + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_no_time_bounds(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }]) + self.assertEqual(7, data[0]['max']) + self.assertEqual(3, data[0]['count']) + + def test_no_time_bounds_with_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'resource_id', + 'value': 'resource-id'}], + period=3600) + self.assertEqual(3, len(data)) + self.assertEqual(set([u'2012-09-25T10:30:00', + u'2012-09-25T12:32:00', + u'2012-09-25T11:31:00']), + set(x['duration_start'] for x in data)) + self.assertEqual(3600, data[0]['period']) + self.assertEqual(set([u'2012-09-25T10:30:00', + u'2012-09-25T11:30:00', + u'2012-09-25T12:30:00']), + set(x['period_start'] for x in data)) + + def test_period_with_negative_value(self): + resp = self.get_json(self.PATH, expect_errors=True, + q=[{'field': 'resource_id', + 'value': 'resource-id'}], + period=-1) + self.assertEqual(400, resp.status_code) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + def test_period_with_large_value(self): + resp = self.get_json(self.PATH, expect_errors=True, + q=[{'field': 'user_id', + 'value': 'user-id'}], + period=10000000000000) + self.assertEqual(400, resp.status_code) + self.assertIn(b"Invalid period", resp.body) + + def test_start_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + ]) + self.assertEqual(7, data[0]['max']) + self.assertEqual(2, data[0]['count']) + + def test_start_timestamp_after(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T12:34:00', + }, + ]) + self.assertEqual([], data) + + def test_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:30:00', + }, + ]) + self.assertEqual(5, data[0]['max']) + self.assertEqual(1, data[0]['count']) + + def test_end_timestamp_before(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T09:54:00', + }, + ]) + self.assertEqual([], data) + + def test_start_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:32:00', + }, + ]) + self.assertEqual(6, data[0]['max']) + self.assertEqual(1, data[0]['count']) + + +class TestSumProjectVolume(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/volume.size/statistics' + + def setUp(self): + super(TestSumProjectVolume, self).setUp() + for i in range(3): + s = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id-%s' % i, + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_no_time_bounds(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }]) + expected = 5 + 6 + 7 + self.assertEqual(expected, data[0]['sum']) + self.assertEqual(3, data[0]['count']) + + def test_start_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + ]) + expected = 6 + 7 + self.assertEqual(expected, data[0]['sum']) + self.assertEqual(2, data[0]['count']) + + def test_start_timestamp_after(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T12:34:00', + }, + ]) + self.assertEqual([], data) + + def test_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:30:00', + }, + ]) + self.assertEqual(5, data[0]['sum']) + self.assertEqual(1, data[0]['count']) + + def test_end_timestamp_before(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T09:54:00', + }, + ]) + self.assertEqual([], data) + + def test_start_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'project_id', + 'value': 'project1', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:32:00', + }, + ]) + self.assertEqual(6, data[0]['sum']) + self.assertEqual(1, data[0]['count']) + + +class TestSumResourceVolume(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/volume.size/statistics' + + def setUp(self): + super(TestSumResourceVolume, self).setUp() + for i in range(3): + s = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id', + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.sample', + }, + source='source1', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_no_time_bounds(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }]) + self.assertEqual(5 + 6 + 7, data[0]['sum']) + self.assertEqual(3, data[0]['count']) + + def test_no_time_bounds_with_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'resource_id', + 'value': 'resource-id'}], + period=1800) + self.assertEqual(3, len(data)) + self.assertEqual(set([u'2012-09-25T10:30:00', + u'2012-09-25T12:32:00', + u'2012-09-25T11:31:00']), + set(x['duration_start'] for x in data)) + self.assertEqual(1800, data[0]['period']) + self.assertEqual(set([u'2012-09-25T10:30:00', + u'2012-09-25T11:30:00', + u'2012-09-25T12:30:00']), + set(x['period_start'] for x in data)) + + def test_start_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }]) + self.assertEqual(6 + 7, data[0]['sum']) + self.assertEqual(2, data[0]['count']) + + def test_start_timestamp_with_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'resource_id', + 'value': 'resource-id'}, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T10:15:00'}], + period=7200) + self.assertEqual(2, len(data)) + self.assertEqual(set([u'2012-09-25T10:30:00', + u'2012-09-25T12:32:00']), + set(x['duration_start'] for x in data)) + self.assertEqual(7200, data[0]['period']) + self.assertEqual(set([u'2012-09-25T10:15:00', + u'2012-09-25T12:15:00']), + set(x['period_start'] for x in data)) + + def test_start_timestamp_after(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T12:34:00', + }]) + self.assertEqual([], data) + + def test_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T11:30:00', + }]) + self.assertEqual(5, data[0]['sum']) + self.assertEqual(1, data[0]['count']) + + def test_end_timestamp_before(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'le', + 'value': '2012-09-25T09:54:00', + }]) + self.assertEqual([], data) + + def test_start_end_timestamp(self): + data = self.get_json(self.PATH, q=[{'field': 'resource_id', + 'value': 'resource-id', + }, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2012-09-25T11:30:00', + }, + {'field': 'timestamp', + 'op': 'lt', + 'value': '2012-09-25T11:32:00', + }]) + self.assertEqual(6, data[0]['sum']) + self.assertEqual(1, data[0]['count']) + + +class TestGroupByInstance(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/instance/statistics' + + def setUp(self): + super(TestGroupByInstance, self).setUp() + + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-2', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-3', 'project': 'project-1', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-3'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_CUMULATIVE, + unit='s', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], }, + source=test_sample['source'], + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_group_by_user(self): + data = self.get_json(self.PATH, groupby=['user_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['user_id']), groupby_keys_set) + self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'user_id': 'user-1'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'user_id': 'user-2'}: + self.assertEqual(4, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(8, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'user_id': 'user-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + + def test_group_by_resource(self): + data = self.get_json(self.PATH, groupby=['resource_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), + groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'resource_id': 'resource-1'}: + self.assertEqual(3, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'resource_id': 'resource-2'}: + self.assertEqual(3, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'resource_id': 'resource-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + + def test_group_by_project(self): + data = self.get_json(self.PATH, groupby=['project_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'project_id': 'project-1'}: + self.assertEqual(5, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(10, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'project_id': 'project-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(3, r['avg']) + + def test_group_by_unknown_field(self): + response = self.get_json(self.PATH, + expect_errors=True, + groupby=['wtf']) + self.assertEqual(400, response.status_code) + + def test_group_by_multiple_regular(self): + data = self.get_json(self.PATH, groupby=['user_id', 'resource_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) + self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', + 'resource-2', 'resource-3']), + groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'user_id': 'user-1', + 'resource_id': 'resource-1'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'user_id': 'user-2', + 'resource_id': 'resource-1'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'user_id': 'user-2', + 'resource_id': 'resource-2'}: + self.assertEqual(3, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'user_id': 'user-3', + 'resource_id': 'resource-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + else: + self.assertNotEqual(grp, {'user_id': 'user-1', + 'resource_id': 'resource-2'}) + self.assertNotEqual(grp, {'user_id': 'user-1', + 'resource_id': 'resource-3'}) + self.assertNotEqual(grp, {'user_id': 'user-2', + 'resource_id': 'resource-3'}) + self.assertNotEqual(grp, {'user_id': 'user-3', + 'resource_id': 'resource-1'}) + self.assertNotEqual(grp, {'user_id': 'user-3', + 'resource_id': 'resource-2'}) + + def test_group_by_with_query_filter(self): + data = self.get_json(self.PATH, + q=[{'field': 'project_id', + 'op': 'eq', + 'value': 'project-1'}], + groupby=['resource_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), + groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'resource_id': 'resource-1'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'resource_id': 'resource-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(1, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(1, r['avg']) + elif grp == {'resource_id': 'resource-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + + def test_group_by_with_query_filter_multiple(self): + data = self.get_json(self.PATH, + q=[{'field': 'user_id', + 'op': 'eq', + 'value': 'user-2'}, + {'field': 'source', + 'op': 'eq', + 'value': 'source-1'}], + groupby=['project_id', 'resource_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2', + 'resource-1', 'resource-2']), + groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'project_id': 'project-1', + 'resource_id': 'resource-1'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'project_id': 'project-1', + 'resource_id': 'resource-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(1, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(1, r['avg']) + elif grp == {'project_id': 'project-2', + 'resource_id': 'resource-2'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + else: + self.assertNotEqual(grp, {'project_id': 'project-2', + 'resource_id': 'resource-1'}) + + def test_group_by_with_period(self): + data = self.get_json(self.PATH, + groupby=['project_id'], + period=7200) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set(sub_dict['period_start'] for sub_dict in data) + period_start_valid = set([u'2013-08-01T10:11:00', + u'2013-08-01T14:11:00', + u'2013-08-01T16:11:00']) + self.assertEqual(period_start_valid, period_start_set) + + for r in data: + grp = r['groupby'] + period_start = r['period_start'] + if (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T10:11:00'): + self.assertEqual(3, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(4260, r['duration']) + self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) + self.assertEqual(u'2013-08-01T11:22:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) + elif (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T14:11:00'): + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(4260, r['duration']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) + self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) + elif (grp == {'project_id': 'project-2'} and + period_start == u'2013-08-01T14:11:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) + self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) + elif (grp == {'project_id': 'project-2'} and + period_start == u'2013-08-01T16:11:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) + else: + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-1'}, + u'2013-08-01T16:11:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T10:11:00']) + + def test_group_by_with_query_filter_and_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'source', + 'op': 'eq', + 'value': 'source-1'}], + groupby=['project_id'], + period=7200) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set(sub_dict['period_start'] for sub_dict in data) + period_start_valid = set([u'2013-08-01T10:11:00', + u'2013-08-01T14:11:00', + u'2013-08-01T16:11:00']) + self.assertEqual(period_start_valid, period_start_set) + + for r in data: + grp = r['groupby'] + period_start = r['period_start'] + if (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T10:11:00'): + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(1, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(1, r['avg']) + self.assertEqual(1740, r['duration']) + self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) + self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) + elif (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T14:11:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) + elif (grp == {'project_id': 'project-2'} and + period_start == u'2013-08-01T16:11:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) + else: + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-1'}, + u'2013-08-01T16:11:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T10:11:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T14:11:00']) + + def test_group_by_start_timestamp_after(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T17:28:01'}], + groupby=['project_id']) + self.assertEqual([], data) + + def test_group_by_end_timestamp_before(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T10:10:59'}], + groupby=['project_id']) + self.assertEqual([], data) + + def test_group_by_start_timestamp(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T14:58:00'}], + groupby=['project_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'project_id': 'project-1'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'project_id': 'project-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(3, r['avg']) + + def test_group_by_end_timestamp(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T11:45:00'}], + groupby=['project_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'project_id': 'project-1'}: + self.assertEqual(3, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(2, r['avg']) + + def test_group_by_start_end_timestamp(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T08:17:03'}, + {'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T23:59:59'}], + groupby=['project_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'project_id': 'project-1'}: + self.assertEqual(5, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(10, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'project_id': 'project-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(6, r['sum']) + self.assertEqual(3, r['avg']) + + def test_group_by_start_end_timestamp_with_query_filter(self): + data = self.get_json(self.PATH, + q=[{'field': 'project_id', + 'op': 'eq', + 'value': 'project-1'}, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T11:01:00'}, + {'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T20:00:00'}], + groupby=['resource_id']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'resource_id': 'resource-1'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'resource_id': 'resource-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + + def test_group_by_start_end_timestamp_with_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T14:00:00'}, + {'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T17:00:00'}], + groupby=['project_id'], + period=3600) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set(sub_dict['period_start'] for sub_dict in data) + period_start_valid = set([u'2013-08-01T14:00:00', + u'2013-08-01T15:00:00', + u'2013-08-01T16:00:00']) + self.assertEqual(period_start_valid, period_start_set) + + for r in data: + grp = r['groupby'] + period_start = r['period_start'] + if (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T14:00:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) + self.assertEqual(3600, r['period']) + self.assertEqual(u'2013-08-01T15:00:00', r['period_end']) + elif (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T16:00:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T16:10:00', r['duration_start']) + self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) + self.assertEqual(3600, r['period']) + self.assertEqual(u'2013-08-01T17:00:00', r['period_end']) + elif (grp == {'project_id': 'project-2'} and + period_start == u'2013-08-01T15:00:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) + self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) + self.assertEqual(3600, r['period']) + self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) + else: + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-1'}, + u'2013-08-01T15:00:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T14:00:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T16:00:00']) + + def test_group_by_start_end_timestamp_with_query_filter_and_period(self): + data = self.get_json(self.PATH, + q=[{'field': 'source', + 'op': 'eq', + 'value': 'source-1'}, + {'field': 'timestamp', + 'op': 'ge', + 'value': '2013-08-01T10:00:00'}, + {'field': 'timestamp', + 'op': 'le', + 'value': '2013-08-01T18:00:00'}], + groupby=['project_id'], + period=7200) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set(sub_dict['period_start'] for sub_dict in data) + period_start_valid = set([u'2013-08-01T10:00:00', + u'2013-08-01T14:00:00', + u'2013-08-01T16:00:00']) + self.assertEqual(period_start_valid, period_start_set) + + for r in data: + grp = r['groupby'] + period_start = r['period_start'] + if (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T10:00:00'): + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(1, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(1, r['avg']) + self.assertEqual(1740, r['duration']) + self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) + self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T12:00:00', r['period_end']) + elif (grp == {'project_id': 'project-1'} and + period_start == u'2013-08-01T14:00:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(2, r['sum']) + self.assertEqual(2, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) + self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) + elif (grp == {'project_id': 'project-2'} and + period_start == u'2013-08-01T16:00:00'): + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + self.assertEqual(0, r['duration']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) + self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) + self.assertEqual(7200, r['period']) + self.assertEqual(u'2013-08-01T18:00:00', r['period_end']) + else: + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-1'}, + u'2013-08-01T16:00:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T10:00:00']) + self.assertNotEqual([grp, period_start], + [{'project_id': 'project-2'}, + u'2013-08-01T14:00:00']) + + +@tests_db.run_with('mongodb', 'hbase', 'db2') +class TestGroupBySource(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + # FIXME(terriyu): We have to put test_group_by_source in its own class + # because SQLAlchemy currently doesn't support group by source statistics. + # When group by source is supported in SQLAlchemy, this test should be + # moved to TestGroupByInstance with all the other group by statistics + # tests. + + PATH = '/meters/instance/statistics' + + def setUp(self): + super(TestGroupBySource, self).setUp() + + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-2', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-3', 'project': 'project-1', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-3'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_CUMULATIVE, + unit='s', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], }, + source=test_sample['source'], + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def tearDown(self): + self.conn.clear() + super(TestGroupBySource, self).tearDown() + + def test_group_by_source(self): + data = self.get_json(self.PATH, groupby=['source']) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['source']), groupby_keys_set) + self.assertEqual(set(['source-1', 'source-2', 'source-3']), + groupby_vals_set) + + for r in data: + grp = r['groupby'] + if grp == {'source': 'source-1'}: + self.assertEqual(4, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(1, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(8, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'source': 'source-2'}: + self.assertEqual(2, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(2, r['min']) + self.assertEqual(2, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(2, r['avg']) + elif grp == {'source': 'source-3'}: + self.assertEqual(1, r['count']) + self.assertEqual('s', r['unit']) + self.assertEqual(4, r['min']) + self.assertEqual(4, r['max']) + self.assertEqual(4, r['sum']) + self.assertEqual(4, r['avg']) + + +class TestSelectableAggregates(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + PATH = '/meters/instance/statistics' + + def setUp(self): + super(TestSelectableAggregates, self).setUp() + + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 5, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 9, 'user': 'user-3', 'project': 'project-3', + 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', + 'source': 'source'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_GAUGE, + unit='instance', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], }, + source=test_sample['source'], + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def _do_test_per_tenant_selectable_standard_aggregate(self, + aggregate, + expected_values): + agg_args = {'aggregate.func': aggregate} + data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + projects = ['project-1', 'project-2', 'project-3'] + self.assertEqual(set(projects), groupby_vals_set) + + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + for r in data: + grp = r['groupby'] + for project in projects: + if grp == {'project_id': project}: + expected = expected_values[projects.index(project)] + self.assertEqual('instance', r['unit']) + self.assertAlmostEqual(r[aggregate], expected) + self.assertIn('aggregate', r) + self.assertIn(aggregate, r['aggregate']) + self.assertAlmostEqual(r['aggregate'][aggregate], expected) + for a in standard_aggregates - set([aggregate]): + self.assertNotIn(a, r) + + def test_per_tenant_selectable_max(self): + self._do_test_per_tenant_selectable_standard_aggregate('max', + [5, 4, 9]) + + def test_per_tenant_selectable_min(self): + self._do_test_per_tenant_selectable_standard_aggregate('min', + [2, 1, 9]) + + def test_per_tenant_selectable_sum(self): + self._do_test_per_tenant_selectable_standard_aggregate('sum', + [9, 9, 9]) + + def test_per_tenant_selectable_avg(self): + self._do_test_per_tenant_selectable_standard_aggregate('avg', + [3, 2.25, 9]) + + def test_per_tenant_selectable_count(self): + self._do_test_per_tenant_selectable_standard_aggregate('count', + [3, 4, 1]) + + def test_per_tenant_selectable_parameterized_aggregate(self): + agg_args = {'aggregate.func': 'cardinality', + 'aggregate.param': 'resource_id'} + data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + projects = ['project-1', 'project-2', 'project-3'] + self.assertEqual(set(projects), groupby_vals_set) + + aggregate = 'cardinality/resource_id' + expected_values = [2.0, 3.0, 1.0] + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + for r in data: + grp = r['groupby'] + for project in projects: + if grp == {'project_id': project}: + expected = expected_values[projects.index(project)] + self.assertEqual('instance', r['unit']) + self.assertNotIn(aggregate, r) + self.assertIn('aggregate', r) + self.assertIn(aggregate, r['aggregate']) + self.assertEqual(expected, r['aggregate'][aggregate]) + for a in standard_aggregates: + self.assertNotIn(a, r) + + def test_large_quantum_selectable_parameterized_aggregate(self): + # add a large number of datapoints that won't impact on cardinality + # if the computation logic is tolerant of different DB behavior on + # larger numbers of samples per-period + for i in range(200): + s = sample.Sample( + 'instance', + sample.TYPE_GAUGE, + unit='instance', + volume=i * 1.0, + user_id='user-1', + project_id='project-1', + resource_id='resource-1', + timestamp=datetime.datetime(2013, 8, 1, 11, i % 60), + resource_metadata={'flavor': 'm1.tiny', + 'event': 'event-1', }, + source='source', + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + agg_args = {'aggregate.func': 'cardinality', + 'aggregate.param': 'resource_id'} + data = self.get_json(self.PATH, **agg_args) + + aggregate = 'cardinality/resource_id' + expected_value = 5.0 + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + r = data[0] + self.assertNotIn(aggregate, r) + self.assertIn('aggregate', r) + self.assertIn(aggregate, r['aggregate']) + self.assertEqual(expected_value, r['aggregate'][aggregate]) + for a in standard_aggregates: + self.assertNotIn(a, r) + + def test_repeated_unparameterized_aggregate(self): + agg_params = 'aggregate.func=count&aggregate.func=count' + data = self.get_json(self.PATH, override_params=agg_params) + + aggregate = 'count' + expected_value = 8.0 + standard_aggregates = set(['min', 'max', 'sum', 'avg']) + r = data[0] + self.assertIn(aggregate, r) + self.assertEqual(expected_value, r[aggregate]) + self.assertIn('aggregate', r) + self.assertIn(aggregate, r['aggregate']) + self.assertEqual(expected_value, r['aggregate'][aggregate]) + for a in standard_aggregates: + self.assertNotIn(a, r) + + def test_fully_repeated_parameterized_aggregate(self): + agg_params = ('aggregate.func=cardinality&' + 'aggregate.param=resource_id&' + 'aggregate.func=cardinality&' + 'aggregate.param=resource_id&') + data = self.get_json(self.PATH, override_params=agg_params) + + aggregate = 'cardinality/resource_id' + expected_value = 5.0 + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + r = data[0] + self.assertIn('aggregate', r) + self.assertNotIn(aggregate, r) + self.assertIn(aggregate, r['aggregate']) + self.assertEqual(expected_value, r['aggregate'][aggregate]) + for a in standard_aggregates: + self.assertNotIn(a, r) + + def test_partially_repeated_parameterized_aggregate(self): + agg_params = ('aggregate.func=cardinality&' + 'aggregate.param=resource_id&' + 'aggregate.func=cardinality&' + 'aggregate.param=project_id&') + data = self.get_json(self.PATH, override_params=agg_params) + + expected_values = {'cardinality/resource_id': 5.0, + 'cardinality/project_id': 3.0} + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + r = data[0] + self.assertIn('aggregate', r) + for aggregate in expected_values.keys(): + self.assertNotIn(aggregate, r) + self.assertIn(aggregate, r['aggregate']) + self.assertEqual(expected_values[aggregate], + r['aggregate'][aggregate]) + for a in standard_aggregates: + self.assertNotIn(a, r) + + def test_bad_selectable_parameterized_aggregate(self): + agg_args = {'aggregate.func': 'cardinality', + 'aggregate.param': 'injection_attack'} + resp = self.get_json(self.PATH, status=[400], + groupby=['project_id'], **agg_args) + self.assertIn('error_message', resp) + self.assertEqual(resp['error_message'].get('faultcode'), + 'Client') + self.assertEqual(resp['error_message'].get('faultstring'), + 'Bad aggregate: cardinality.injection_attack') + + +@tests_db.run_with('mongodb', 'hbase', 'db2') +class TestUnparameterizedAggregates(v2.FunctionalTest, + tests_db.MixinTestsWithBackendScenarios): + + # We put the stddev test case in a separate class so that we + # can easily exclude the sqlalchemy scenario, as sqlite doesn't + # support the stddev_pop function and fails ungracefully with + # OperationalError when it is used. However we still want to + # test the corresponding functionality in the mongo driver. + # For hbase & db2, the skip on NotImplementedError logic works + # in the usual way. + + PATH = '/meters/instance/statistics' + + def setUp(self): + super(TestUnparameterizedAggregates, self).setUp() + + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 5, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source'}, + {'volume': 9, 'user': 'user-3', 'project': 'project-3', + 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', + 'source': 'source'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_GAUGE, + unit='instance', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], }, + source=test_sample['source'], + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_per_tenant_selectable_unparameterized_aggregate(self): + agg_args = {'aggregate.func': 'stddev'} + data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) + groupby_keys_set = set(x for sub_dict in data + for x in sub_dict['groupby'].keys()) + groupby_vals_set = set(x for sub_dict in data + for x in sub_dict['groupby'].values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + projects = ['project-1', 'project-2', 'project-3'] + self.assertEqual(set(projects), groupby_vals_set) + + aggregate = 'stddev' + expected_values = [1.4142, 1.0897, 0.0] + standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) + for r in data: + grp = r['groupby'] + for project in projects: + if grp == {'project_id': project}: + expected = expected_values[projects.index(project)] + self.assertEqual('instance', r['unit']) + self.assertNotIn(aggregate, r) + self.assertIn('aggregate', r) + self.assertIn(aggregate, r['aggregate']) + self.assertAlmostEqual(r['aggregate'][aggregate], + expected, + places=4) + for a in standard_aggregates: + self.assertNotIn(a, r) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/fixtures.py ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/fixtures.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/fixtures.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/fixtures.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,153 @@ +# +# Copyright 2015 Red Hat. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Fixtures used during Gabbi-based test runs.""" + +import datetime +import os +import random +from unittest import case +import uuid + +from gabbi import fixture +from oslo_config import fixture as fixture_config +from oslo_policy import opts + +from ceilometer.event.storage import models +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer import service +from ceilometer import storage + + +# TODO(chdent): For now only MongoDB is supported, because of easy +# database name handling and intentional focus on the API, not the +# data store. +ENGINES = ['MONGODB'] + + +class ConfigFixture(fixture.GabbiFixture): + """Establish the relevant configuration for a test run.""" + + def start_fixture(self): + """Set up config.""" + + self.conf = None + + # Determine the database connection. + db_url = None + for engine in ENGINES: + try: + db_url = os.environ['CEILOMETER_TEST_%s_URL' % engine] + except KeyError: + pass + if db_url is None: + raise case.SkipTest('No database connection configured') + + service.prepare_service(argv=[], config_files=[]) + conf = fixture_config.Config().conf + self.conf = conf + opts.set_defaults(self.conf) + conf.import_group('api', 'ceilometer.api.controllers.v2.root') + conf.import_opt('store_events', 'ceilometer.notification', + group='notification') + conf.set_override('policy_file', + os.path.abspath('etc/ceilometer/policy.json'), + group='oslo_policy') + + # A special pipeline is required to use the direct publisher. + conf.set_override('pipeline_cfg_file', + 'etc/ceilometer/gabbi_pipeline.yaml') + + database_name = '%s-%s' % (db_url, str(uuid.uuid4())) + conf.set_override('connection', database_name, group='database') + conf.set_override('metering_connection', '', group='database') + conf.set_override('event_connection', '', group='database') + conf.set_override('alarm_connection', '', group='database') + + conf.set_override('pecan_debug', True, group='api') + conf.set_override('gnocchi_is_enabled', False, group='api') + conf.set_override('aodh_is_enabled', False, group='api') + + conf.set_override('store_events', True, group='notification') + + def stop_fixture(self): + """Reset the config and remove data.""" + if self.conf: + storage.get_connection_from_config(self.conf).clear() + self.conf.reset() + + +class SampleDataFixture(fixture.GabbiFixture): + """Instantiate some sample data for use in testing.""" + + def start_fixture(self): + """Create some samples.""" + conf = fixture_config.Config().conf + self.conn = storage.get_connection_from_config(conf) + timestamp = datetime.datetime.utcnow() + project_id = str(uuid.uuid4()) + self.source = str(uuid.uuid4()) + resource_metadata = {'farmed_by': 'nancy'} + + for name in ['cow', 'pig', 'sheep']: + resource_metadata.update({'breed': name}), + c = sample.Sample(name='livestock', + type='gauge', + unit='head', + volume=int(10 * random.random()), + user_id='farmerjon', + project_id=project_id, + resource_id=project_id, + timestamp=timestamp, + resource_metadata=resource_metadata, + source=self.source) + data = utils.meter_message_from_counter( + c, conf.publisher.telemetry_secret) + self.conn.record_metering_data(data) + + def stop_fixture(self): + """Destroy the samples.""" + # NOTE(chdent): print here for sake of info during testing. + # This will go away eventually. + print('resource', + self.conn.db.resource.remove({'source': self.source})) + print('meter', self.conn.db.meter.remove({'source': self.source})) + + +class EventDataFixture(fixture.GabbiFixture): + """Instantiate some sample event data for use in testing.""" + + def start_fixture(self): + """Create some events.""" + conf = fixture_config.Config().conf + self.conn = storage.get_connection_from_config(conf, 'event') + events = [] + name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] + for ix, name in enumerate(name_list): + timestamp = datetime.datetime.utcnow() + message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix) + traits = [models.Trait('type', 1, name), + models.Trait('ate', 2, ix)] + event = models.Event(message_id, + 'cookies_{}'.format(name), + timestamp, + traits, {'nested': {'inside': 'value'}}) + events.append(event) + self.conn.record_events(events) + + def stop_fixture(self): + """Destroy the events.""" + self.conn.db.event.remove({'event_type': '/^cookies_/'}) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/alarms.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/alarms.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/alarms.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/alarms.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,139 @@ +# Requests to cover the basic endpoints for alarms. + +fixtures: + - ConfigFixture + +tests: +- name: list alarms none + desc: Lists alarms, none yet exist + url: /v2/alarms + method: GET + response_strings: + - "[]" + +- name: try to PUT an alarm + desc: what does PUT do + url: /v2/alarms + method: PUT + request_headers: + content-type: application/json + data: + name: added_alarm_defaults2 + type: threshold + threshold_rule: + meter_name: ameter + threshold: 300.0 + status: 405 + response_headers: + allow: GET, POST + +# TODO(chdent): A POST should return a location header. +- name: createAlarm + xfail: true + desc: Creates an alarm. + url: /v2/alarms + method: POST + request_headers: + content-type: application/json + data: + ok_actions: null + name: added_alarm_defaults + type: threshold + threshold_rule: + meter_name: ameter + threshold: 300.0 + status: 201 + response_headers: + location: /$SCHEME://$NETLOC/v2/alarms/ + content-type: application/json; charset=UTF-8 + response_json_paths: + $.severity: low + $.threshold_rule.threshold: 300.0 + $.threshold_rule.comparison_operator: eq + +- name: showAlarm + desc: Shows information for a specified alarm. + url: /v2/alarms/$RESPONSE['$.alarm_id'] + method: GET + response_json_paths: + $.severity: low + $.alarm_id: $RESPONSE['$.alarm_id'] + $.threshold_rule.threshold: 300.0 + $.threshold_rule.comparison_operator: eq + response_headers: + content-type: application/json; charset=UTF-8 + +- name: updateAlarm + desc: Updates a specified alarm. + url: /v2/alarms/$RESPONSE['$.alarm_id'] + method: PUT + request_headers: + content-type: application/json + data: + name: added_alarm_defaults + type: threshold + severity: moderate + threshold_rule: + meter_name: ameter + threshold: 200.0 +# TODO(chdent): why do we have a response, why not status: 204? +# status: 204 + response_json_paths: + $.threshold_rule.threshold: 200.0 + $.severity: moderate + $.state: insufficient data + +- name: showAlarmHistory + desc: Assembles the history for a specified alarm. + url: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change + method: GET + response_json_paths: + $[0].type: rule change + +- name: updateAlarmState + desc: Sets the state of a specified alarm. + url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state + request_headers: + content-type: application/json + data: '"alarm"' + method: PUT +# TODO(chdent): really? Of what possible use is this? + response_json_paths: + $: alarm + +# Get a list of alarms so we can extract an id for the next test +- name: list alarms for data + desc: Lists alarms, only one + url: /v2/alarms + method: GET + response_json_paths: + $[0].name: added_alarm_defaults + +- name: showAlarmState + desc: Gets the state of a specified alarm. + url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state + method: GET + response_headers: + content-type: application/json; charset=UTF-8 + response_json_paths: + $: alarm + +- name: list alarms one + desc: Lists alarms, only one + url: /v2/alarms + method: GET + response_json_paths: + $[0].name: added_alarm_defaults + +- name: deleteAlarm + desc: Deletes a specified alarm. + url: /v2/alarms/$RESPONSE['$[0].alarm_id'] + method: DELETE + status: 204 + +- name: list alarms none end + desc: Lists alarms, none now exist + url: /v2/alarms + method: GET + response_strings: + - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,218 @@ +# These test run against the Events API with no data preloaded into the +# datastore. This allows us to verify that requests are still processed +# normally even if data is missing for that endpoint. +fixtures: +- ConfigFixture + +tests: + +# this attempts to get all the events and expects an empty list back +- name: get all events + url: /v2/events + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_strings: + - "[]" + +# this attempts to get all the events with no role/user/project +# info in header and expects a 403 +- name: get events with bad headers + url: /v2/events + status: 403 + +# this attempts to get all the events with no user/project +# info in header and expects a 403 +- name: get events with admin only header + url: /v2/events + request_headers: + X-Roles: admin + status: 403 + +# this attempts to get all the events with no project +# info in header and expects a 403 +- name: get events with no project header + url: /v2/events + request_headers: + X-Roles: admin + X-User-Id: user1 + status: 403 + +# this attempts to get all the events with no user +# info in header and expects a 403 +- name: get events with no user header + url: /v2/events + request_headers: + X-Roles: admin + X-Project-Id: project1 + status: 403 + +# this attempts to get all the events with invalid parameters and expects a 400 +- name: get events with bad params + url: /v2/events?bad_Stuff_here + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 400 + +# this attempts to query the events with the correct parameterized query syntax +# and expects an empty list +- name: get events that match query + url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + response_strings: + - "[]" + +# this attempts to query the events with the correct data query syntax and +# expects an empty list +- name: get events that match query via request data + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + q: + - field: event_type + op: eq + type: string + value: cookies_chocolate.chip + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_strings: + - "[]" + +# this attempts to query the events with the correct parameterized query syntax +# but a bad field name and expects an empty list +- name: get events that match bad query + url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + response_strings: + - "[]" + +# this attempts to query the events with the correct data query syntax and +# a bad field name and expects an empty list +- name: get events that match bad query via request data + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + q: + - field: bad_field + op: eq + type: string + value: cookies_chocolate.chip + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_strings: + - "[]" + +# this attempts to query the events with the wrong data query syntax missing the +# q object but supplying the field list and a bad field name and expects a 400 +- name: get events that match bad query via request data malformed list + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + - field: bad_field + op: eq + type: string + value: cookies_chocolate.chip + status: 400 + +# this attempts to query the events with the wrong data query syntax missing the +# q object but supplying the field list along with a bad content-type. Should +# return a 400 +- name: get events that match bad query via request data wrong type + url: /v2/events + request_headers: + content-type: text/plain + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + "field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True" + status: 415 + +# Get a single event by message_id no data is present so should return a 404 +- name: get a single event + url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 404 + +# Get all the event types should return an empty list +- name: get all event types + url: /v2/event_types + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types + response_strings: + - "[]" + +# Get a single event type by name, this API is unused and should return a 404 +- name: get event types for good event_type unused api + url: /v2/event_types/cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 404 + +# Get all traits for an event type should return an empty list +- name: get all traits for event type + url: /v2/event_types/cookies_chocolate.chip/traits + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits + response_strings: + - "[]" + +# Get all traits named ate for an event type should return an empty list +- name: get all traits named ate for event type + url: /v2/event_types/cookies_chocolate.chip/traits/ate + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/ate + response_strings: + - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,254 @@ +# These test run against the Events API with data preloaded into the datastore. +fixtures: +- ConfigFixture +- EventDataFixture + +tests: + +# this attempts to get all the events and checks to make sure they are valid +- name: get all events + url: /v2/events + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_json_paths: + $.[0].event_type: cookies_chocolate.chip + $.[0].traits.[0].value: chocolate.chip + $.[0].traits.[1].value: '0' + $.[0].raw.nested.inside: value + $.[1].event_type: cookies_peanut.butter + $.[1].traits.[0].name: type + $.[1].traits.[1].name: ate + $.[1].raw.nested.inside: value + $.[2].event_type: cookies_sugar + $.[2].traits.[0].type: string + $.[2].traits.[1].type: integer + $.[2].raw.nested.inside: value + +# this attempts to get all the events with invalid parameters and expects a 400 +- name: get events with bad params + url: /v2/events?bad_Stuff_here + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 400 + +# this attempts to query the events with the correct parameterized query syntax +# and expects a matching event +- name: get events that match query + url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + response_json_paths: + $.[0].event_type: cookies_chocolate.chip + $.[0].traits.[0].value: chocolate.chip + +# this attempts to query the events with the correct data query syntax and +# expects a matching event +- name: get events that match query via data + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + q: + - field: event_type + op: eq + type: string + value: cookies_chocolate.chip + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_json_paths: + $.[0].event_type: cookies_chocolate.chip + $.[0].traits.[0].value: chocolate.chip + +# this attempts to query the events with the correct parameterized query syntax +# but a bad field name and expects an empty list +- name: get events that match bad query + url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip + response_strings: + - "[]" + +# this attempts to query the events with the correct data query syntax and +# a bad field name and expects an empty list +- name: get events that match bad query via data + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + q: + - field: bad_field + op: eq + type: string + value: cookies_chocolate.chip + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events + response_strings: + - "[]" + +# this attempts to query the events with the wrong data query syntax missing the +# q object but supplying the field list and a bad field name and expects a 400 +- name: get events that match bad query via data list + url: /v2/events + request_headers: + content-type: application/json; charset=UTF-8 + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + data: + - field: bad_field + op: eq + type: string + value: cookies_chocolate.chip + status: 400 + +# Get a single event by message_id should return an event +- name: get a single event + url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 + response_json_paths: + $.event_type: cookies_chocolate.chip + $.traits.[0].value: chocolate.chip + $.traits.[1].value: '0' + +# Get a single event by message_id no data is present so should return a 404 +- name: get a single event that does not exist + url: /v2/events/bad-id + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 404 + +# Get all the event types should return a list of event types +- name: get all event types + url: /v2/event_types + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types + response_strings: + - cookies_chocolate.chip + - cookies_peanut.butter + - cookies_sugar + +# Get a single event type by valid name, this API is unused and should return a 404 +- name: get event types for good event_type unused api + url: /v2/event_types/cookies_chocolate.chip + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 404 + +# Get a single event type by invalid name, this API is unused and should return a 404 +- name: get event types for bad event_type unused api + url: /v2/event_types/bad_event_type + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + status: 404 + +# Get all traits for a valid event type should return an list of traits +- name: get all traits for event type + url: /v2/event_types/cookies_chocolate.chip/traits + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits + response_json_paths: + $.[0].type: string + $.[1].name: ate + +# Get all traits for an invalid event type should return an empty list +- name: get all traits names for event type bad event type + url: /v2/event_types/bad_event_type/traits + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/bad_event_type/traits + response_strings: + - "[]" + +# Get all traits of type ate for a valid event type should return an list of +# traits +- name: get all traits of type ate for event type + url: /v2/event_types/cookies_chocolate.chip/traits/ate + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/ate + response_json_paths: + $.[0].name: ate + $.[0].value: '0' + +# Get all traits of type ate for a invalid event type should return an empty +# list +- name: get all traits of type for event type bad event type + url: /v2/event_types/bad_event_type/traits/ate + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/bad_event_type/traits/ate + response_strings: + - "[]" + +# Get all traits of type bad_trait_name for a valid event type should return an +# empty list +- name: get all traits of type instances for event type bad trait name + url: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/bad_trait_name + response_strings: + - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/basic.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/basic.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/basic.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/basic.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,24 @@ +# +# Some simple tests just to confirm that the system works. +# +fixtures: + - ConfigFixture + +tests: + +# Root gives us some information on where to go from here. +- name: quick root check + url: / + response_headers: + content-type: application/json; charset=UTF-8 + response_strings: + - '"base": "application/json"' + response_json_paths: + versions.values.[0].status: stable + versions.values.[0].media-types.[0].base: application/json + +# NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! +- name: v2 visit + desc: this demonstrates a bug in the info in / + url: $RESPONSE['versions.values.[0].links.[0].href'] + status: 404 diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,15 @@ +# +# Explore the capabilities API +# +fixtures: + - ConfigFixture + +tests: + +- name: get capabilities + desc: retrieve capabilities for the mongo store + url: /v2/capabilities + response_json_paths: + $.alarm_storage.['storage:production_ready']: true + $.event_storage.['storage:production_ready']: true + $.storage.['storage:production_ready']: true diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,104 @@ +# Post a simple sample, sir, and the retrieve it in various ways. +fixtures: + - ConfigFixture + +tests: + +# POST one sample and verify its existence. + + - name: post sample for meter + desc: post a single sample + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: | + [ + { + "counter_name": "apples", + "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", + "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", + "counter_unit": "instance", + "counter_volume": 1, + "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", + "resource_metadata": { + "name2": "value2", + "name1": "value1" + }, + "counter_type": "gauge" + } + ] + + response_json_paths: + $.[0].counter_name: apples + status: 201 + response_headers: + content-type: application/json; charset=UTF-8 + +# When POSTing a sample perhaps we should get back a location header +# with the URI of the posted sample + + - name: post a sample expect location + desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 + xfail: true + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 + user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff + counter_unit: instance + counter_volume: 1 + resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + resource_metadata: + name2: value2 + name1: value1 + counter_type: gauge + response_headers: + location: /$SCHEME://$NETLOC/ + +# GET all the samples created for the apples meter + + - name: get samples for meter + desc: get all the samples at that meter + url: /v2/meters/apples + response_json_paths: + $.[0].counter_name: apples + $.[0].counter_volume: 1 + $.[0].resource_metadata.name2: value2 + +# POSTing a sample to a meter will implicitly create a resource + + - name: get resources + desc: get the resources that exist because of the sample + url: /v2/resources + response_json_paths: + $.[0].metadata.name2: value2 + +# NOTE(chdent): We assume that the first item in links is self. +# Need to determine how to express the more correct JSONPath here +# (if possible). + + - name: get resource + desc: get just one of those resources via self + url: $RESPONSE['$[0].links[0].href'] + response_json_paths: + $.metadata.name2: value2 + +# GET the created samples + + - name: get samples + desc: get all the created samples + url: /v2/samples + response_json_paths: + $.[0].metadata.name2: value2 + $.[0].meter: apples + + - name: get one sample + desc: get the one sample that exists + url: /v2/samples/$RESPONSE['$[0].id'] + response_json_paths: + $.metadata.name2: value2 + $.meter: apples diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,18 @@ +# +# Demonstrate a simple sample fixture. +# +fixtures: + - ConfigFixture + - SampleDataFixture + +tests: +- name: get fixture samples + desc: get all the samples at livestock + url: /v2/meters/livestock + response_json_paths: + $.[0].counter_name: livestock + $.[1].counter_name: livestock + $.[2].counter_name: livestock + $.[2].user_id: farmerjon + $.[0].resource_metadata.breed: cow + $.[1].resource_metadata.farmed_by: nancy diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/meters.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/meters.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/meters.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/meters.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,354 @@ +# +# Tests to explore and cover the /v2/meters section of the +# Ceilometer API. +# + +fixtures: + - ConfigFixture + +tests: + +# Generic HTTP health explorations of all meters. + + - name: empty meters list + url: /v2/meters + response_headers: + content-type: /application/json/ + response_strings: + - "[]" + + - name: meters list bad accept + url: /v2/meters + request_headers: + accept: text/plain + status: 406 + + - name: meters list bad method + url: /v2/meters + method: POST + status: 405 + response_headers: + allow: GET + + - name: try to delete meters + url: /v2/meters + method: DELETE + status: 405 + response_headers: + allow: GET + +# Generic HTTP health explorations of single meter. + + - name: get non exist meter + url: /v2/meters/noexist + response_strings: + - "[]" + + - name: meter bad accept + url: /v2/meters/noexist?direct=True + request_headers: + accept: text/plain + status: 406 + + - name: meter delete noexist + url: /v2/meters/noexist + method: DELETE + status: "404 || 405" + + - name: post meter no data + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: "" + status: 400 + + - name: post meter error is JSON + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: "" + status: 400 + response_headers: + content-type: /application/json/ + response_json_paths: + $.error_message.faultstring: "Samples should be included in request body" + + - name: post meter bad content-type + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: text/plain + data: hello + status: 415 + + - name: post bad samples to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + samples: + - red + - blue + - yellow + status: 400 + +# POST variations on a malformed sample + + - name: post limited counter to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_unit: instance + counter_volume: 1 + resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + status: 400 + response_strings: + - "Invalid input for field/attribute counter_name" + + - name: post mismatched counter name to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: cars + counter_type: gauge + counter_unit: instance + counter_volume: 1 + resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + status: 400 + response_strings: + - "Invalid input for field/attribute counter_name" + - "should be apples" + + - name: post counter no resource to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 1 + status: 400 + response_strings: + - "Invalid input for field/attribute resource_id" + - "Mandatory field missing." + + - name: post counter bad type to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: elevation + counter_unit: instance + counter_volume: 1 + resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + status: 400 + response_strings: + - "Invalid input for field/attribute counter_type." + - "The counter type must be: gauge, delta, cumulative" + +# Manipulate samples + + - name: post counter to meter + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 1 + resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + status: 201 + + - name: list apple samples + url: /v2/meters/apples + response_json_paths: + $[0].counter_volume: 1.0 + $[0].counter_name: apples + $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + + - name: list meters + url: /v2/meters + response_json_paths: + $[0].name: apples + $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 + $[0].type: gauge + $[-1].name: apples + + - name: negative limit on meter list + url: /v2/meters/apples?limit=-5 + status: 400 + response_strings: + - Limit must be positive + + - name: nan limit on meter list + url: /v2/meters/apples?limit=NaN + status: 400 + response_strings: + - unable to convert to int + + - name: post counter to meter different resource + url: /v2/meters/apples?direct=True + method: POST + status: 201 + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 2 + resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + + - name: query for resource + url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq + response_json_paths: + $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + +# Explore posting samples with less than perfect data. + + - name: post counter with bad timestamp + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 3 + resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + timestamp: "2013-01-bad 23:23:20" + status: 400 + response_strings: + - 'Invalid input for field/attribute samples' + + - name: post counter with good timestamp + url: /v2/meters/apples?direct=True + method: POST + status: 201 + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 3 + resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + timestamp: "2013-01-01 23:23:20" + + - name: post counter with wrong metadata + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 3 + resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + timestamp: "2013-01-01 23:23:20" + resource_metadata: "a string" + status: 400 + response_strings: + - "Invalid input for field/attribute samples" + + - name: post counter with empty metadata + url: /v2/meters/apples?direct=True + method: POST + status: 201 + request_headers: + content-type: application/json + data: + - counter_name: apples + counter_type: gauge + counter_unit: instance + counter_volume: 3 + resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa + timestamp: "2013-01-01 23:23:20" + resource_metadata: {} + +# Statistics + + - name: get sample statistics + url: /v2/meters/apples/statistics + response_json_paths: + $[0].groupby: null + $[0].unit: instance + $[0].sum: 9.0 + $[0].min: 1.0 + $[0].max: 3.0 + $[0].count: 4 + + - name: get incorrectly grouped sample statistics + url: /v2/meters/apples/statistics?groupby=house_id + status: 400 + response_strings: + - Invalid groupby fields + + - name: get grouped sample statistics + url: /v2/meters/apples/statistics?groupby=resource_id + response_json_paths: + $[1].max: 3.0 + $[0].max: 1.0 + + - name: get sample statistics bad period + url: /v2/meters/apples/statistics?period=seven + status: 400 + response_strings: + - unable to convert to int + + - name: get sample statistics negative period + url: /v2/meters/apples/statistics?period=-7 + status: 400 + response_strings: + - Period must be positive. + + - name: get sample statistics 600 period + url: /v2/meters/apples/statistics?period=600 + response_json_paths: + $[0].period: 600 + + - name: get sample statistics time limit not time + url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember + status: 400 + response_strings: + - invalid timestamp format + + - name: get sample statistics time limit gt + url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 + response_json_paths: + $[0].count: 2 + + - name: get sample statistics time limit lt + url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 + response_json_paths: + $[0].count: 2 + + - name: get sample statistics time limit bounded + url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 + response_strings: + - "[]" + + - name: get sample statistics select aggregate bad format + url: /v2/meters/apples/statistics?aggregate=max + status: 400 + + - name: get sample statistics select aggregate + url: /v2/meters/apples/statistics?aggregate.func=max + response_json_paths: + $[0].aggregate.max: 3.0 diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,59 @@ +# +# Explore and cover resources API with gabbi tests when there are no +# resources. +# + +fixtures: + - ConfigFixture + +tests: + +# Check for a list of resources, modifying the request in various +# ways. + + - name: list resources no extra + desc: Provide no additional header guidelines + url: /v2/resources + response_headers: + content-type: /application/json/ + response_strings: + - "[]" + + - name: list resources but get url wrong + url: /v2/resrces + status: 404 + + - name: list resources explicit accept + url: /v2/resources + request_headers: + accept: application/json + response_strings: + - "[]" + + - name: list resources bad accept + url: /v2/resources + request_headers: + accept: text/plain + status: 406 + + - name: list resources with bad query field + url: /v2/resources?q.field=id&q.value=cars + status: 400 + response_strings: + - unrecognized field in query + + - name: list resources with query + url: /v2/resources?q.field=resource&q.value=cars + response_strings: + - "[]" + + - name: list resource bad type meter links + url: /v2/resources?meter_links=yes%20please + status: 400 + response_strings: + - unable to convert to int + + - name: list resource meter links int + url: /v2/resources?meter_links=0 + response_strings: + - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,57 @@ +# +# Explore and cover resources API with gabbi tests when there are a +# small number of pre-existing resources +# + +fixtures: + - ConfigFixture + - SampleDataFixture + +tests: + + - name: list all resources + url: /v2/resources + response_json_paths: + $[0].user_id: farmerjon + $[0].links[1].rel: livestock + + - name: get one resource + desc: get a resource via the links in the first resource listed above + url: $RESPONSE['$[0].links[0].href'] + response_json_paths: + $.resource_id: $RESPONSE['$[0].resource_id'] + + - name: list resources limit user_id + url: /v2/resources?q.field=user_id&q.value=farmerjon + response_json_paths: + $[0].user_id: farmerjon + $[0].links[1].rel: livestock + + - name: list resources limit metadata + url: /v2/resources?q.field=metadata.breed&q.value=sheep + response_json_paths: + $[0].user_id: farmerjon + $[0].links[1].rel: livestock + + - name: list resources limit metadata no match + url: /v2/resources?q.field=metadata.breed&q.value=llamma + response_strings: + - "[]" + + - name: fail to get one resource + url: /v2/resources/nosirnothere + status: 404 + + - name: list resource meter links present + url: /v2/resources?meter_links=1 + response_json_paths: + $[0].links[0].rel: self + $[0].links[1].rel: livestock + $[0].links[-1].rel: livestock + + - name: list resource meter links not present + url: /v2/resources?meter_links=0 + desc: there is only one links entry when meter_links is 0 + response_json_paths: + $[0].links[0].rel: self + $[0].links[-1].rel: self diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits/samples.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits/samples.yaml 2015-09-03 13:06:00.000000000 +0000 @@ -0,0 +1,155 @@ +# +# Explore and test the samples controller, using samples supplied by +# the SampleDataFixture. +# + +fixtures: + - ConfigFixture + - SampleDataFixture + +tests: + +# Confirm all the samples are there and expected requests behave. +# TODO(chdent): There's a danger here that the ordering of multiple +# samples will not be consistent. + + - name: lists samples + url: /v2/samples + response_headers: + content-type: /application/json/ + response_json_paths: + $[0].meter: livestock + $[0].metadata.breed: cow + $[1].metadata.breed: pig + $[2].metadata.breed: sheep + + - name: get just one + url: /v2/samples/$RESPONSE['$[0].id'] + response_json_paths: + $.meter: livestock + $.metadata.breed: cow + + - name: list samples with limit + url: /v2/samples?limit=1 + response_json_paths: + $[0].meter: livestock + $[0].metadata.breed: cow + $[-1].metadata.breed: cow + + - name: list zero samples with zero limit + url: /v2/samples?limit=0 + status: 400 + + - name: list samples with query + url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq + response_json_paths: + $[0].meter: livestock + $[0].metadata.breed: cow + $[-1].metadata.breed: cow + + - name: query by user + url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq + response_json_paths: + $[0].user_id: $RESPONSE['$[0].user_id'] + + - name: query by user_id + url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq + response_json_paths: + $[0].user_id: $RESPONSE['$[0].user_id'] + + - name: query by project + url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq + response_json_paths: + $[0].project_id: $RESPONSE['$[0].project_id'] + + - name: query by project_id + url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq + response_json_paths: + $[0].project_id: $RESPONSE['$[0].project_id'] + +# Explore failure modes for listing samples + + - name: list samples with bad field + url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq + status: 400 + response_strings: + - timestamp + - project + - unrecognized field in query + + - name: list samples with bad metaquery field + url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq + status: 400 + response_strings: + - unrecognized field in query + + - name: bad limit value + url: /v2/samples?limit=happiness + status: 400 + response_strings: + - Invalid input for field/attribute limit + + - name: negative limit value 400 + url: /v2/samples?limit=-99 + status: 400 + + - name: negative limit value error message + url: /v2/samples?limit=-99 + status: 400 + response_headers: + content-type: /application/json/ + response_json_paths: + $.error_message.faultstring: Limit must be positive + + - name: bad accept + desc: try an unexpected content type + url: /v2/samples + request_headers: + accept: text/plain + status: 406 + + - name: complex good accept + desc: client sends complex accept do we adapt + url: /v2/samples + request_headers: + accept: text/plain, application/json; q=0.8 + + - name: complex bad accept + desc: client sends complex accept do we adapt + url: /v2/samples + request_headers: + accept: text/plain, application/binary; q=0.8 + status: 406 + + - name: bad method + url: /v2/samples + method: POST + status: 405 + response_headers: + allow: GET + +# Work with just one sample. + + - name: list one of the samples + url: /v2/samples?limit=1 + + - name: retrieve one sample + url: /v2/samples/$RESPONSE['$[0].id'] + response_headers: + content-type: /application/json/ + response_json_paths: + $.meter: livestock + + - name: retrieve sample with useless query + url: /v2/samples/$RESPONSE['$.id']?limit=5 + status: 400 + response_strings: + - "Unknown argument:" + + - name: attempt missing sample + url: /v2/samples/davesnothere + status: 404 + response_headers: + content-type: /application/json/ + response_json_paths: + $.error_message.faultstring: Sample davesnothere Not Found diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/api_events_with_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/api_events_with_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/api_events_with_data.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/api_events_with_data.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,17 @@ +# This test runs against the Events API and confirms the +# content-location header includes a prefix. +fixtures: +- ConfigFixture +- EventDataFixture + +tests: + +- name: get all events + url: /v2/events + request_headers: + X-Roles: admin + X-User-Id: user1 + X-Project-Id: project1 + response_headers: + content-type: application/json; charset=UTF-8 + content-location: /$SCHEME://.*/telemetry/v2/events/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Confirm root reports the right data including a prefixed URL +# +fixtures: + - ConfigFixture + +tests: + +# Root gives us some information on where to go from here. +- name: quick root check + url: / + response_headers: + content-type: application/json; charset=UTF-8 + response_strings: + - '"base": "application/json"' + response_json_paths: + versions.values.[0].status: stable + versions.values.[0].media-types.[0].base: application/json + response_strings: + - /telemetry/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,51 @@ +# Post a simple sample and confirm the created resource has +# reasonable URLs +fixtures: + - ConfigFixture + +tests: + +# POST one sample and verify its existence. + + - name: post sample for meter + desc: post a single sample + url: /v2/meters/apples?direct=True + method: POST + request_headers: + content-type: application/json + data: | + [ + { + "counter_name": "apples", + "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", + "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", + "counter_unit": "instance", + "counter_volume": 1, + "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", + "resource_metadata": { + "name2": "value2", + "name1": "value1" + }, + "counter_type": "gauge" + } + ] + + response_json_paths: + $.[0].counter_name: apples + status: 201 + response_headers: + content-type: application/json; charset=UTF-8 + + - name: get resources + desc: get the resources that exist because of the sample + url: /v2/resources + response_json_paths: + $.[0].metadata.name2: value2 + + - name: get resource + desc: get just one of those resources via self + url: $RESPONSE['$[0].links[0].href'] + response_json_paths: + $.metadata.name2: value2 + response_strings: + - /telemetry/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,24 @@ +# +# Explore and cover resources API with gabbi tests when there are a +# small number of pre-existing resources +# + +fixtures: + - ConfigFixture + - SampleDataFixture + +tests: + + - name: list all resources + url: /v2/resources + response_json_paths: + $[0].user_id: farmerjon + $[0].links[1].rel: livestock + response_strings: + - /telemetry/ + + - name: get one resource + desc: get a resource via the links in the first resource listed above + url: $RESPONSE['$[0].links[0].href'] + response_json_paths: + $.resource_id: $RESPONSE['$[0].resource_id'] diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,34 @@ +# +# Copyright 2015 Red Hat. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A module to exercise the Ceilometer API with gabbi with a URL prefix""" + +import os + +from gabbi import driver + +from ceilometer.api import app +from ceilometer.tests.functional.gabbi import fixtures as fixture_module + +TESTS_DIR = 'gabbits_prefix' + + +def load_tests(loader, tests, pattern): + """Provide a TestSuite to the discovery process.""" + test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) + return driver.build_tests(test_dir, loader, host=None, + prefix='/telemetry', + intercept=app.VersionSelectorApplication, + fixture_module=fixture_module) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/test_gabbi.py ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/test_gabbi.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/gabbi/test_gabbi.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/gabbi/test_gabbi.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,36 @@ +# +# Copyright 2015 Red Hat. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A test module to exercise the Ceilometer API with gabbi + +For the sake of exploratory development. +""" + +import os + +from gabbi import driver + +from ceilometer.api import app +from ceilometer.tests.functional.gabbi import fixtures as fixture_module + +TESTS_DIR = 'gabbits' + + +def load_tests(loader, tests, pattern): + """Provide a TestSuite to the discovery process.""" + test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) + return driver.build_tests(test_dir, loader, host=None, + intercept=app.VersionSelectorApplication, + fixture_module=fixture_module) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/hooks/post_test_hook.sh ceilometer-5.0.0~b3/ceilometer/tests/functional/hooks/post_test_hook.sh --- ceilometer-5.0.0~b2/ceilometer/tests/functional/hooks/post_test_hook.sh 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/hooks/post_test_hook.sh 2015-09-03 13:05:55.000000000 +0000 @@ -18,7 +18,7 @@ if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo .tox/functional/bin/python /usr/local/jenkins/slave_scripts/subunit2html.py $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html + sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz @@ -31,14 +31,18 @@ # Go to the ceilometer dir cd $CEILOMETER_DIR -sudo chown -R jenkins:stack $CEILOMETER_DIR +if [[ -z "$STACK_USER" ]]; then + export STACK_USER=stack +fi + +sudo chown -R $STACK_USER:stack $CEILOMETER_DIR # Run tests echo "Running ceilometer functional test suite" set +e # NOTE(ityaptin) Expected a script param which contains a backend name -CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u jenkins tox -efunctional +CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -efunctional EXIT_CODE=$? set -e diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/publisher/test_direct.py ceilometer-5.0.0~b3/ceilometer/tests/functional/publisher/test_direct.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/publisher/test_direct.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/publisher/test_direct.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,103 @@ +# +# Copyright 2015 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/direct.py +""" + +import datetime +import uuid + +from oslo_utils import netutils + +from ceilometer.event.storage import models as event +from ceilometer.publisher import direct +from ceilometer import sample +from ceilometer.tests import db as tests_db + + +class TestDirectPublisher(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + + resource_id = str(uuid.uuid4()) + + test_data = [ + sample.Sample( + name='alpha', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='beta', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='gamma', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.now().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + ] + + def test_direct_publisher(self): + """Test samples are saved.""" + self.CONF.set_override('connection', self.db_manager.url, + group='database') + parsed_url = netutils.urlsplit('direct://') + publisher = direct.DirectPublisher(parsed_url) + publisher.publish_samples(None, + self.test_data) + + meters = list(self.conn.get_meters(resource=self.resource_id)) + names = sorted([meter.name for meter in meters]) + + self.assertEqual(3, len(meters), 'There should be 3 samples') + self.assertEqual(['alpha', 'beta', 'gamma'], names) + + +class TestEventDirectPublisher(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + + test_data = [event.Event(message_id=str(uuid.uuid4()), + event_type='event_%d' % i, + generated=datetime.datetime.utcnow(), + traits=[], raw={}) + for i in range(0, 5)] + + def test_direct_publisher(self): + parsed_url = netutils.urlsplit('direct://') + publisher = direct.DirectPublisher(parsed_url) + publisher.publish_events(None, self.test_data) + + e_types = list(self.event_conn.get_event_types()) + self.assertEqual(5, len(e_types)) + self.assertEqual(['event_%d' % i for i in range(0, 5)], + sorted(e_types)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_db2.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_db2.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_db2.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_db2.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,157 @@ +# +# Copyright Ericsson AB 2014. All rights reserved +# +# Authors: Ildiko Vancsa +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/impl_db2.py + +.. note:: + In order to run the tests against another MongoDB server set the + environment variable CEILOMETER_TEST_DB2_URL to point to a DB2 + server before running the tests. + +""" + +import bson +import mock +from oslo_config import cfg +from oslo_utils import timeutils + +from ceilometer.alarm.storage import impl_db2 as impl_db2_alarm +from ceilometer.event.storage import impl_db2 as impl_db2_event +from ceilometer.storage import impl_db2 +from ceilometer.storage.mongo import utils as pymongo_utils +from ceilometer.tests import base as test_base + + +class CapabilitiesTest(test_base.BaseTestCase): + # Check the returned capabilities list, which is specific to each DB + # driver + + def test_capabilities(self): + expected_capabilities = { + 'meters': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'resources': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'samples': {'query': {'simple': True, + 'metadata': True, + 'complex': True}}, + 'statistics': {'groupby': True, + 'query': {'simple': True, + 'metadata': True, + 'complex': False}, + 'aggregation': {'standard': True, + 'selectable': { + 'max': False, + 'min': False, + 'sum': False, + 'avg': False, + 'count': False, + 'stddev': False, + 'cardinality': False}} + }, + } + + actual_capabilities = impl_db2.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_event_capabilities(self): + expected_capabilities = { + 'events': {'query': {'simple': True}}, + } + actual_capabilities = impl_db2_event.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_alarm_capabilities(self): + expected_capabilities = { + 'alarms': {'query': {'simple': True, + 'complex': True}, + 'history': {'query': {'simple': True, + 'complex': True}}}, + } + + actual_capabilities = impl_db2_alarm.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_storage_capabilities(self): + expected_capabilities = { + 'storage': {'production_ready': True}, + } + actual_capabilities = impl_db2.Connection.get_storage_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + +class ConnectionTest(test_base.BaseTestCase): + @mock.patch.object(impl_db2.Connection, '_generate_random_str') + @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') + @mock.patch.object(timeutils, 'utcnow') + @mock.patch.object(bson.objectid, 'ObjectId') + def test_upgrade(self, meter_id, timestamp, mongo_connect, + _generate_random_str): + conn_mock = mock.MagicMock() + conn_mock.server_info.return_value = {} + _generate_random_str.return_value = 'wew' * 247 + 'x' * 3 + conn_mock.ceilodb2.resource.index_information.return_value = {} + mongo_connect.return_value = conn_mock + meter_id.return_value = '54b8860d75bfe43b54e84ce7' + timestamp.return_value = 'timestamp' + cfg.CONF.set_override('db2nosql_resource_id_maxlen', + 256, + group='database') + impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') + resource_id = 'wew' * 247 + 'x' * 3 + conn_mock.ceilodb2.resource.insert_one.assert_called_with( + {'_id': resource_id, + 'no_key': resource_id}) + conn_mock.ceilodb2.meter.insert_one.assert_called_with( + {'_id': '54b8860d75bfe43b54e84ce7', + 'no_key': '54b8860d75bfe43b54e84ce7', + 'timestamp': 'timestamp'}) + + @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') + @mock.patch.object(bson.objectid, 'ObjectId') + def test_generate_random_str_with_less_config_len(self, objectid, + mongo_connect): + fake_str = '54b8860d75bfe43b54e84ce7' + conn_mock = mock.MagicMock() + conn_mock.server_info.return_value = {} + mongo_connect.return_value = conn_mock + objectid.return_value = fake_str + cfg.CONF.set_override('db2nosql_resource_id_maxlen', + 20, + group='database') + conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') + rand_str = conn._generate_random_str(20) + self.assertEqual(fake_str, rand_str) + + @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') + @mock.patch.object(bson.objectid, 'ObjectId') + def test_generate_random_str_with_default_config_len(self, objectid, + mongo_connect): + fake_str = '54b8860d75bfe43b54e84ce7' + conn_mock = mock.MagicMock() + conn_mock.server_info.return_value = {} + mongo_connect.return_value = conn_mock + objectid.return_value = fake_str + cfg.CONF.set_override('db2nosql_resource_id_maxlen', + 512, + group='database') + conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') + rand_str = conn._generate_random_str(512) + str_len = len(str(fake_str)) + expect_str = fake_str * int(512 / str_len) + 'x' * (512 % str_len) + self.assertEqual(expect_str, rand_str) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_hbase.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_hbase.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_hbase.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_hbase.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,119 @@ +# +# Copyright 2012, 2013 Dell Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/impl_hbase.py + +.. note:: + In order to run the tests against real HBase server set the environment + variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before + running the tests. Make sure the Thrift server is running on that server. + +""" +import mock + + +try: + import happybase # noqa +except ImportError: + import testtools.testcase + raise testtools.testcase.TestSkipped("happybase is needed") + +from ceilometer.alarm.storage import impl_hbase as hbase_alarm +from ceilometer.event.storage import impl_hbase as hbase_event +from ceilometer.storage import impl_hbase as hbase +from ceilometer.tests import base as test_base +from ceilometer.tests import db as tests_db + + +class ConnectionTest(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + + @tests_db.run_with('hbase') + def test_hbase_connection(self): + + class TestConn(object): + def __init__(self, host, port): + self.netloc = '%s:%s' % (host, port) + + def open(self): + pass + + def get_connection_pool(conf): + return TestConn(conf['host'], conf['port']) + + with mock.patch.object(hbase.Connection, '_get_connection_pool', + side_effect=get_connection_pool): + conn = hbase.Connection('hbase://test_hbase:9090') + self.assertIsInstance(conn.conn_pool, TestConn) + + +class CapabilitiesTest(test_base.BaseTestCase): + # Check the returned capabilities list, which is specific to each DB + # driver + + def test_capabilities(self): + expected_capabilities = { + 'meters': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'resources': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'samples': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'statistics': {'groupby': False, + 'query': {'simple': True, + 'metadata': True, + 'complex': False}, + 'aggregation': {'standard': True, + 'selectable': { + 'max': False, + 'min': False, + 'sum': False, + 'avg': False, + 'count': False, + 'stddev': False, + 'cardinality': False}} + }, + } + + actual_capabilities = hbase.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_alarm_capabilities(self): + expected_capabilities = { + 'alarms': {'query': {'simple': True, + 'complex': False}, + 'history': {'query': {'simple': True, + 'complex': False}}}, + } + + actual_capabilities = hbase_alarm.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_event_capabilities(self): + expected_capabilities = { + 'events': {'query': {'simple': True}}, + } + + actual_capabilities = hbase_event.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_storage_capabilities(self): + expected_capabilities = { + 'storage': {'production_ready': True}, + } + actual_capabilities = hbase.Connection.get_storage_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_log.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_log.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_log.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_log.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,29 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/impl_log.py +""" +from oslotest import base + +from ceilometer.storage import impl_log + + +class ConnectionTest(base.BaseTestCase): + @staticmethod + def test_get_connection(): + conn = impl_log.Connection(None) + conn.record_metering_data({'counter_name': 'test', + 'resource_id': __name__, + 'counter_volume': 1, + }) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_mongodb.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_mongodb.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_mongodb.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_mongodb.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,168 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/impl_mongodb.py + +.. note:: + In order to run the tests against another MongoDB server set the + environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB + server before running the tests. + +""" + +from ceilometer.alarm.storage import impl_mongodb as impl_mongodb_alarm +from ceilometer.event.storage import impl_mongodb as impl_mongodb_event +from ceilometer.storage import impl_mongodb +from ceilometer.tests import base as test_base +from ceilometer.tests import db as tests_db + + +@tests_db.run_with('mongodb') +class MongoDBConnection(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + def test_connection_pooling(self): + test_conn = impl_mongodb.Connection(self.db_manager.url) + self.assertEqual(self.conn.conn, test_conn.conn) + + def test_replica_set(self): + url = self.db_manager._url + '?replicaSet=foobar' + conn = impl_mongodb.Connection(url) + self.assertTrue(conn.conn) + + def test_recurse_sort_keys(self): + sort_keys = ['k1', 'k2', 'k3'] + marker = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} + flag = '$lt' + ret = impl_mongodb.Connection._recurse_sort_keys(sort_keys=sort_keys, + marker=marker, + flag=flag) + expect = {'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1': {'eq': 'v1'}} + self.assertEqual(expect, ret) + + +@tests_db.run_with('mongodb') +class IndexTest(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + + def _test_ttl_index_absent(self, conn, coll_name, ttl_opt): + # create a fake index and check it is deleted + coll = getattr(conn.db, coll_name) + index_name = '%s_ttl' % coll_name + self.CONF.set_override(ttl_opt, -1, group='database') + conn.upgrade() + self.assertNotIn(index_name, coll.index_information()) + + self.CONF.set_override(ttl_opt, 456789, group='database') + conn.upgrade() + self.assertEqual(456789, + coll.index_information() + [index_name]['expireAfterSeconds']) + + def test_meter_ttl_index_absent(self): + self._test_ttl_index_absent(self.conn, 'meter', + 'metering_time_to_live') + + def test_event_ttl_index_absent(self): + self._test_ttl_index_absent(self.event_conn, 'event', + 'event_time_to_live') + + def test_alarm_history_ttl_index_absent(self): + self._test_ttl_index_absent(self.alarm_conn, 'alarm_history', + 'alarm_history_time_to_live') + + def _test_ttl_index_present(self, conn, coll_name, ttl_opt): + coll = getattr(conn.db, coll_name) + self.CONF.set_override(ttl_opt, 456789, group='database') + conn.upgrade() + index_name = '%s_ttl' % coll_name + self.assertEqual(456789, + coll.index_information() + [index_name]['expireAfterSeconds']) + + self.CONF.set_override(ttl_opt, -1, group='database') + conn.upgrade() + self.assertNotIn(index_name, coll.index_information()) + + def test_meter_ttl_index_present(self): + self._test_ttl_index_present(self.conn, 'meter', + 'metering_time_to_live') + + def test_event_ttl_index_present(self): + self._test_ttl_index_present(self.event_conn, 'event', + 'event_time_to_live') + + def test_alarm_history_ttl_index_present(self): + self._test_ttl_index_present(self.alarm_conn, 'alarm_history', + 'alarm_history_time_to_live') + + +class CapabilitiesTest(test_base.BaseTestCase): + # Check the returned capabilities list, which is specific to each DB + # driver + + def test_capabilities(self): + expected_capabilities = { + 'meters': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'resources': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'samples': {'query': {'simple': True, + 'metadata': True, + 'complex': True}}, + 'statistics': {'groupby': True, + 'query': {'simple': True, + 'metadata': True, + 'complex': False}, + 'aggregation': {'standard': True, + 'selectable': { + 'max': True, + 'min': True, + 'sum': True, + 'avg': True, + 'count': True, + 'stddev': True, + 'cardinality': True}} + }, + } + + actual_capabilities = impl_mongodb.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_event_capabilities(self): + expected_capabilities = { + 'events': {'query': {'simple': True}}, + } + actual_capabilities = impl_mongodb_event.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_alarm_capabilities(self): + expected_capabilities = { + 'alarms': {'query': {'simple': True, + 'complex': True}, + 'history': {'query': {'simple': True, + 'complex': True}}}, + } + + actual_capabilities = impl_mongodb_alarm.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_storage_capabilities(self): + expected_capabilities = { + 'storage': {'production_ready': True}, + } + actual_capabilities = (impl_mongodb.Connection. + get_storage_capabilities()) + self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,191 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/impl_sqlalchemy.py + +.. note:: + In order to run the tests against real SQL server set the environment + variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running + the tests. + +""" + +import datetime + +import mock +from oslo_utils import timeutils +from six.moves import reprlib + +from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqla_alarm +from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event +from ceilometer.event.storage import models +from ceilometer.storage import impl_sqlalchemy +from ceilometer.storage.sqlalchemy import models as sql_models +from ceilometer.tests import base as test_base +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.storage \ + import test_storage_scenarios as scenarios + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql') +class CeilometerBaseTest(tests_db.TestBase): + + def test_ceilometer_base(self): + base = sql_models.CeilometerBase() + base['key'] = 'value' + self.assertEqual('value', base['key']) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql') +class EventTypeTest(tests_db.TestBase): + # EventType is a construct specific to sqlalchemy + # Not applicable to other drivers. + + def test_event_type_exists(self): + et1 = self.event_conn._get_or_create_event_type("foo") + self.assertTrue(et1.id >= 0) + et2 = self.event_conn._get_or_create_event_type("foo") + self.assertEqual(et2.id, et1.id) + self.assertEqual(et2.desc, et1.desc) + + def test_event_type_unique(self): + et1 = self.event_conn._get_or_create_event_type("foo") + self.assertTrue(et1.id >= 0) + et2 = self.event_conn._get_or_create_event_type("blah") + self.assertNotEqual(et1.id, et2.id) + self.assertNotEqual(et1.desc, et2.desc) + # Test the method __repr__ returns a string + self.assertTrue(reprlib.repr(et2)) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql') +class EventTest(tests_db.TestBase): + def _verify_data(self, trait, trait_table): + now = datetime.datetime.utcnow() + ev = models.Event('1', 'name', now, [trait], {}) + self.event_conn.record_events([ev]) + session = self.event_conn._engine_facade.get_session() + t_tables = [sql_models.TraitText, sql_models.TraitFloat, + sql_models.TraitInt, sql_models.TraitDatetime] + for table in t_tables: + if table == trait_table: + self.assertEqual(1, session.query(table).count()) + else: + self.assertEqual(0, session.query(table).count()) + + def test_string_traits(self): + model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text") + self._verify_data(model, sql_models.TraitText) + + def test_int_traits(self): + model = models.Trait("Foo", models.Trait.INT_TYPE, 100) + self._verify_data(model, sql_models.TraitInt) + + def test_float_traits(self): + model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456) + self._verify_data(model, sql_models.TraitFloat) + + def test_datetime_traits(self): + now = datetime.datetime.utcnow() + model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now) + self._verify_data(model, sql_models.TraitDatetime) + + def test_event_repr(self): + ev = sql_models.Event('msg_id', None, False, {}) + ev.id = 100 + self.assertTrue(reprlib.repr(ev)) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql') +class RelationshipTest(scenarios.DBTestBase): + # Note: Do not derive from SQLAlchemyEngineTestBase, since we + # don't want to automatically inherit all the Meter setup. + + @mock.patch.object(timeutils, 'utcnow') + def test_clear_metering_data_meta_tables(self, mock_utcnow): + mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) + self.conn.clear_expired_metering_data(3 * 60) + + session = self.conn._engine_facade.get_session() + self.assertEqual(5, session.query(sql_models.Sample).count()) + + resource_ids = (session.query(sql_models.Resource.internal_id) + .group_by(sql_models.Resource.internal_id)) + meta_tables = [sql_models.MetaText, sql_models.MetaFloat, + sql_models.MetaBigInt, sql_models.MetaBool] + s = set() + for table in meta_tables: + self.assertEqual(0, (session.query(table) + .filter(~table.id.in_(resource_ids)).count() + )) + s.update(session.query(table.id).all()) + self.assertEqual(set(resource_ids.all()), s) + + +class CapabilitiesTest(test_base.BaseTestCase): + # Check the returned capabilities list, which is specific to each DB + # driver + + def test_capabilities(self): + expected_capabilities = { + 'meters': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'resources': {'query': {'simple': True, + 'metadata': True, + 'complex': False}}, + 'samples': {'query': {'simple': True, + 'metadata': True, + 'complex': True}}, + 'statistics': {'groupby': True, + 'query': {'simple': True, + 'metadata': True, + 'complex': False}, + 'aggregation': {'standard': True, + 'selectable': { + 'max': True, + 'min': True, + 'sum': True, + 'avg': True, + 'count': True, + 'stddev': True, + 'cardinality': True}} + }, + } + + actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_event_capabilities(self): + expected_capabilities = { + 'events': {'query': {'simple': True}}, + } + actual_capabilities = impl_sqla_event.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_alarm_capabilities(self): + expected_capabilities = { + 'alarms': {'query': {'simple': True, + 'complex': True}, + 'history': {'query': {'simple': True, + 'complex': True}}}, + } + + actual_capabilities = impl_sqla_alarm.Connection.get_capabilities() + self.assertEqual(expected_capabilities, actual_capabilities) + + def test_storage_capabilities(self): + expected_capabilities = { + 'storage': {'production_ready': True}, + } + actual_capabilities = (impl_sqlalchemy. + Connection.get_storage_capabilities()) + self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_pymongo_base.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_pymongo_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_pymongo_base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_pymongo_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,155 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests the mongodb and db2 common functionality +""" + +import copy +import datetime + +import mock + +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import constants +from ceilometer.tests import db as tests_db +from ceilometer.tests.functional.storage import test_storage_scenarios + + +@tests_db.run_with('mongodb', 'db2') +class CompatibilityTest(test_storage_scenarios.DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def prepare_data(self): + def old_record_metering_data(self, data): + received_timestamp = datetime.datetime.utcnow() + self.db.resource.update( + {'_id': data['resource_id']}, + {'$set': {'project_id': data['project_id'], + 'user_id': data['user_id'], + # Current metadata being used and when it was + # last updated. + 'timestamp': data['timestamp'], + 'received_timestamp': received_timestamp, + 'metadata': data['resource_metadata'], + 'source': data['source'], + }, + '$addToSet': {'meter': {'counter_name': data['counter_name'], + 'counter_type': data['counter_type'], + }, + }, + }, + upsert=True, + ) + + record = copy.copy(data) + self.db.meter.insert(record) + + # Stubout with the old version DB schema, the one w/o 'counter_unit' + with mock.patch.object(self.conn, 'record_metering_data', + side_effect=old_record_metering_data): + self.counters = [] + c = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5, + 'user-id', + 'project1', + 'resource-id', + timestamp=datetime.datetime(2012, 9, 25, 10, 30), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.counter', + }, + source='test', + ) + self.counters.append(c) + msg = utils.meter_message_from_counter( + c, + secret='not-so-secret') + self.conn.record_metering_data(self.conn, msg) + + # Create the old format alarm with a dict instead of a + # array for matching_metadata + alarm = dict(alarm_id='0ld-4l3rt', + enabled=True, + name='old-alert', + description='old-alert', + timestamp=constants.MIN_DATETIME, + meter_name='cpu', + user_id='me', + project_id='and-da-boys', + comparison_operator='lt', + threshold=36, + statistic='count', + evaluation_periods=1, + period=60, + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=['http://nowhere/alarms'], + insufficient_data_actions=[], + repeat_actions=False, + matching_metadata={'key': 'value'}) + + self.alarm_conn.db.alarm.update( + {'alarm_id': alarm['alarm_id']}, + {'$set': alarm}, + upsert=True) + + alarm['alarm_id'] = 'other-kind-of-0ld-4l3rt' + alarm['name'] = 'other-old-alaert' + alarm['matching_metadata'] = [{'key': 'key1', 'value': 'value1'}, + {'key': 'key2', 'value': 'value2'}] + self.alarm_conn.db.alarm.update( + {'alarm_id': alarm['alarm_id']}, + {'$set': alarm}, + upsert=True) + + def test_alarm_get_old_format_matching_metadata_dict(self): + old = list(self.alarm_conn.get_alarms(name='old-alert'))[0] + self.assertEqual('threshold', old.type) + self.assertEqual([{'field': 'key', + 'op': 'eq', + 'value': 'value', + 'type': 'string'}], + old.rule['query']) + self.assertEqual(60, old.rule['period']) + self.assertEqual('cpu', old.rule['meter_name']) + self.assertEqual(1, old.rule['evaluation_periods']) + self.assertEqual('count', old.rule['statistic']) + self.assertEqual('lt', old.rule['comparison_operator']) + self.assertEqual(36, old.rule['threshold']) + + def test_alarm_get_old_format_matching_metadata_array(self): + old = list(self.alarm_conn.get_alarms(name='other-old-alaert'))[0] + self.assertEqual('threshold', old.type) + self.assertEqual(sorted([{'field': 'key1', + 'op': 'eq', + 'value': 'value1', + 'type': 'string'}, + {'field': 'key2', + 'op': 'eq', + 'value': 'value2', + 'type': 'string'}], + key=lambda obj: sorted(obj.items())), + sorted(old.rule['query'], + key=lambda obj: sorted(obj.items()))) + self.assertEqual('cpu', old.rule['meter_name']) + self.assertEqual(60, old.rule['period']) + self.assertEqual(1, old.rule['evaluation_periods']) + self.assertEqual('count', old.rule['statistic']) + self.assertEqual('lt', old.rule['comparison_operator']) + self.assertEqual(36, old.rule['threshold']) + + def test_counter_unit(self): + meters = list(self.conn.get_meters()) + self.assertEqual(1, len(meters)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_storage_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_storage_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/storage/test_storage_scenarios.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/storage/test_storage_scenarios.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,3705 @@ +# +# Copyright 2013 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" Base classes for DB backend implementation test +""" + +import datetime +import operator + +import mock +from oslo_config import cfg +from oslo_db import api +from oslo_db import exception as dbexc +from oslo_utils import timeutils +import pymongo + +import ceilometer +from ceilometer.alarm.storage import models as alarm_models +from ceilometer.event.storage import models as event_models +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer import storage +from ceilometer.tests import constants +from ceilometer.tests import db as tests_db + + +class DBTestBase(tests_db.TestBase): + @staticmethod + def create_side_effect(method, exception_type, test_exception): + def side_effect(*args, **kwargs): + if test_exception.pop(): + raise exception_type + else: + return method(*args, **kwargs) + return side_effect + + def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), + metadata=None, + name='instance', + sample_type=sample.TYPE_CUMULATIVE, unit='', + volume=1, user_id='user-id', + project_id='project-id', + resource_id='resource-id', source=None): + metadata = metadata or {'display_name': 'test-server', + 'tag': 'self.counter'} + s = sample.Sample( + name, sample_type, unit=unit, volume=volume, user_id=user_id, + project_id=project_id, resource_id=resource_id, + timestamp=timestamp, + resource_metadata=metadata, source=source + ) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret + ) + self.conn.record_metering_data(msg) + return msg + + def setUp(self): + super(DBTestBase, self).setUp() + patcher = mock.patch.object(timeutils, 'utcnow') + self.addCleanup(patcher.stop) + self.mock_utcnow = patcher.start() + self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) + self.prepare_data() + + def prepare_data(self): + original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), + (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), + (2012, 7, 2, 10, 43)] + + timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), + (2011, 5, 30, 18, 3), + (2012, 12, 1, 1, 25), + (2012, 2, 29, 6, 59), + (2013, 5, 31, 23, 7)] + timestamp_list = (original_timestamps + + timestamps_for_test_samples_default_order) + + self.msgs = [] + + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(2012, 7, 2, 10, 39), + source='test-1') + ) + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(*timestamp_list[0]), + source='test-1') + ) + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(*timestamp_list[1]), + resource_id='resource-id-alternate', + metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, + source='test-2') + ) + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(*timestamp_list[2]), + resource_id='resource-id-alternate', + user_id='user-id-alternate', + metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, + source='test-3') + ) + + start_idx = 3 + end_idx = len(timestamp_list) + + for i, ts in zip(range(start_idx - 1, end_idx - 1), + timestamp_list[start_idx:end_idx]): + self.msgs.append( + self.create_and_store_sample( + timestamp=datetime.datetime(*ts), + user_id='user-id-%s' % i, + project_id='project-id-%s' % i, + resource_id='resource-id-%s' % i, + metadata={ + 'display_name': 'test-server', + 'tag': 'counter-%s' % i + }, + source='test') + ) + + +class ResourceTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + def prepare_data(self): + super(ResourceTest, self).prepare_data() + + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(2012, 7, 2, 10, 39), + user_id='mongodb_test', + resource_id='resource-id-mongo_bad_key', + project_id='project-id-test', + metadata={'display.name': {'name.$1': 'test-server1', + '$name_2': 'test-server2'}, + 'tag': 'self.counter'}, + source='test-4' + )) + + def test_get_resources(self): + expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) + expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) + msgs_sources = [msg['source'] for msg in self.msgs] + resources = list(self.conn.get_resources()) + self.assertEqual(10, len(resources)) + for resource in resources: + if resource.resource_id != 'resource-id': + continue + self.assertEqual(expected_first_sample_timestamp, + resource.first_sample_timestamp) + self.assertEqual(expected_last_sample_timestamp, + resource.last_sample_timestamp) + self.assertEqual('resource-id', resource.resource_id) + self.assertEqual('project-id', resource.project_id) + self.assertIn(resource.source, msgs_sources) + self.assertEqual('user-id', resource.user_id) + self.assertEqual('test-server', resource.metadata['display_name']) + break + else: + self.fail('Never found resource-id') + + def test_get_resources_start_timestamp(self): + timestamp = datetime.datetime(2012, 7, 2, 10, 42) + expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', + 'resource-id-6', 'resource-id-8']) + + resources = list(self.conn.get_resources(start_timestamp=timestamp)) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(expected, set(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=timestamp, + start_timestamp_op='ge')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(expected, set(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=timestamp, + start_timestamp_op='gt')) + resource_ids = [r.resource_id for r in resources] + expected.remove('resource-id-2') + self.assertEqual(expected, set(resource_ids)) + + def test_get_resources_end_timestamp(self): + timestamp = datetime.datetime(2012, 7, 2, 10, 42) + expected = set(['resource-id', 'resource-id-alternate', + 'resource-id-5', 'resource-id-7', + 'resource-id-mongo_bad_key']) + + resources = list(self.conn.get_resources(end_timestamp=timestamp)) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(expected, set(resource_ids)) + + resources = list(self.conn.get_resources(end_timestamp=timestamp, + end_timestamp_op='lt')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(expected, set(resource_ids)) + + resources = list(self.conn.get_resources(end_timestamp=timestamp, + end_timestamp_op='le')) + resource_ids = [r.resource_id for r in resources] + expected.add('resource-id-2') + self.assertEqual(expected, set(resource_ids)) + + def test_get_resources_both_timestamps(self): + start_ts = datetime.datetime(2012, 7, 2, 10, 42) + end_ts = datetime.datetime(2012, 7, 2, 10, 43) + + resources = list(self.conn.get_resources(start_timestamp=start_ts, + end_timestamp=end_ts)) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(set(['resource-id-2']), set(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=start_ts, + end_timestamp=end_ts, + start_timestamp_op='ge', + end_timestamp_op='lt')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(set(['resource-id-2']), set(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=start_ts, + end_timestamp=end_ts, + start_timestamp_op='gt', + end_timestamp_op='lt')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(0, len(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=start_ts, + end_timestamp=end_ts, + start_timestamp_op='gt', + end_timestamp_op='le')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(set(['resource-id-3']), set(resource_ids)) + + resources = list(self.conn.get_resources(start_timestamp=start_ts, + end_timestamp=end_ts, + start_timestamp_op='ge', + end_timestamp_op='le')) + resource_ids = [r.resource_id for r in resources] + self.assertEqual(set(['resource-id-2', 'resource-id-3']), + set(resource_ids)) + + def test_get_resources_by_source(self): + resources = list(self.conn.get_resources(source='test-1')) + self.assertEqual(1, len(resources)) + ids = set(r.resource_id for r in resources) + self.assertEqual(set(['resource-id']), ids) + + def test_get_resources_by_user(self): + resources = list(self.conn.get_resources(user='user-id')) + self.assertTrue(len(resources) == 2 or len(resources) == 1) + ids = set(r.resource_id for r in resources) + # tolerate storage driver only reporting latest owner of resource + resources_ever_owned_by = set(['resource-id', + 'resource-id-alternate']) + resources_now_owned_by = set(['resource-id']) + self.assertTrue(ids == resources_ever_owned_by or + ids == resources_now_owned_by, + 'unexpected resources: %s' % ids) + + def test_get_resources_by_alternate_user(self): + resources = list(self.conn.get_resources(user='user-id-alternate')) + self.assertEqual(1, len(resources)) + # only a single resource owned by this user ever + self.assertEqual('resource-id-alternate', resources[0].resource_id) + + def test_get_resources_by_project(self): + resources = list(self.conn.get_resources(project='project-id')) + self.assertEqual(2, len(resources)) + ids = set(r.resource_id for r in resources) + self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) + + def test_get_resources_by_metaquery(self): + q = {'metadata.display_name': 'test-server'} + resources = list(self.conn.get_resources(metaquery=q)) + self.assertEqual(9, len(resources)) + + def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): + q = {'metadata.display.name.$name_2': 'test-server2', + 'metadata.display.name.name.$1': 'test-server1'} + resources = list(self.conn.get_resources(metaquery=q)) + self.assertEqual(1, len(resources)) + + def test_get_resources_by_empty_metaquery(self): + resources = list(self.conn.get_resources(metaquery={})) + self.assertEqual(10, len(resources)) + + def test_get_resources_most_recent_metadata_all(self): + resources = self.conn.get_resources() + expected_tags = ['self.counter', 'self.counter3', 'counter-2', + 'counter-3', 'counter-4', 'counter-5', 'counter-6', + 'counter-7', 'counter-8'] + + for resource in resources: + self.assertIn(resource.metadata['tag'], expected_tags) + + def test_get_resources_most_recent_metadata_single(self): + resource = list( + self.conn.get_resources(resource='resource-id-alternate') + )[0] + expected_tag = 'self.counter3' + self.assertEqual(expected_tag, resource.metadata['tag']) + + +class ResourceTestOrdering(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + def prepare_data(self): + sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), + (2013, 8, 10, 10, 44), + (2013, 8, 10, 10, 42), + (2013, 8, 10, 10, 49), + (2013, 8, 10, 10, 47)]), + ('resource-id-2', [(2013, 8, 10, 10, 43), + (2013, 8, 10, 10, 48), + (2013, 8, 10, 10, 42), + (2013, 8, 10, 10, 48), + (2013, 8, 10, 10, 47)]), + ('resource-id-3', [(2013, 8, 10, 10, 43), + (2013, 8, 10, 10, 44), + (2013, 8, 10, 10, 50), + (2013, 8, 10, 10, 49), + (2013, 8, 10, 10, 47)])] + + counter = 0 + for resource, timestamps in sample_timings: + for timestamp in timestamps: + self.create_and_store_sample( + timestamp=datetime.datetime(*timestamp), + resource_id=resource, + user_id=str(counter % 2), + project_id=str(counter % 3), + metadata={ + 'display_name': 'test-server', + 'tag': 'sample-%s' % counter + }, + source='test' + ) + counter += 1 + + def test_get_resources_ordering_all(self): + resources = list(self.conn.get_resources()) + expected = set([ + ('resource-id-1', 'sample-3'), + ('resource-id-2', 'sample-8'), + ('resource-id-3', 'sample-12') + ]) + received = set([(r.resource_id, r.metadata['tag']) for r in resources]) + self.assertEqual(expected, received) + + def test_get_resources_ordering_single(self): + resource = list(self.conn.get_resources(resource='resource-id-2'))[0] + self.assertEqual('resource-id-2', resource.resource_id) + self.assertEqual('sample-8', resource.metadata['tag']) + + +class MeterTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def test_get_meters(self): + msgs_sources = [msg['source'] for msg in self.msgs] + results = list(self.conn.get_meters()) + self.assertEqual(9, len(results)) + for meter in results: + self.assertIn(meter.source, msgs_sources) + + def test_get_meters_by_user(self): + results = list(self.conn.get_meters(user='user-id')) + self.assertEqual(1, len(results)) + + def test_get_meters_by_project(self): + results = list(self.conn.get_meters(project='project-id')) + self.assertEqual(2, len(results)) + + def test_get_meters_by_metaquery(self): + q = {'metadata.display_name': 'test-server'} + results = list(self.conn.get_meters(metaquery=q)) + self.assertIsNotEmpty(results) + self.assertEqual(9, len(results)) + + def test_get_meters_by_empty_metaquery(self): + results = list(self.conn.get_meters(metaquery={})) + self.assertEqual(9, len(results)) + + +class RawSampleTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def prepare_data(self): + super(RawSampleTest, self).prepare_data() + + self.msgs.append(self.create_and_store_sample( + timestamp=datetime.datetime(2012, 7, 2, 10, 39), + user_id='mongodb_test', + resource_id='resource-id-mongo_bad_key', + project_id='project-id-test', + metadata={'display.name': {'name.$1': 'test-server1', + '$name_2': 'test-server2'}, + 'tag': 'self.counter'}, + source='test-4' + )) + + def test_get_sample_counter_volume(self): + # NOTE(idegtiarov) Because wsme expected a float type of data this test + # checks type of counter_volume received from database. + f = storage.SampleFilter() + result = next(self.conn.get_samples(f, limit=1)) + self.assertIsInstance(result.counter_volume, float) + + def test_get_samples_limit_zero(self): + f = storage.SampleFilter() + results = list(self.conn.get_samples(f, limit=0)) + self.assertEqual(0, len(results)) + + def test_get_samples_limit(self): + f = storage.SampleFilter() + results = list(self.conn.get_samples(f, limit=3)) + self.assertEqual(3, len(results)) + for result in results: + self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) + + def test_get_samples_in_default_order(self): + f = storage.SampleFilter() + prev_timestamp = None + for sample_item in self.conn.get_samples(f): + if prev_timestamp is not None: + self.assertTrue(prev_timestamp >= sample_item.timestamp) + prev_timestamp = sample_item.timestamp + + def test_get_samples_by_user(self): + f = storage.SampleFilter(user='user-id') + results = list(self.conn.get_samples(f)) + self.assertEqual(3, len(results)) + for meter in results: + d = meter.as_dict() + self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) + del d['recorded_at'] + self.assertIn(d, self.msgs[:3]) + + def test_get_samples_by_user_limit(self): + f = storage.SampleFilter(user='user-id') + results = list(self.conn.get_samples(f, limit=1)) + self.assertEqual(1, len(results)) + + def test_get_samples_by_user_limit_bigger(self): + f = storage.SampleFilter(user='user-id') + results = list(self.conn.get_samples(f, limit=42)) + self.assertEqual(3, len(results)) + + def test_get_samples_by_project(self): + f = storage.SampleFilter(project='project-id') + results = list(self.conn.get_samples(f)) + self.assertIsNotNone(results) + for meter in results: + d = meter.as_dict() + self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) + del d['recorded_at'] + self.assertIn(d, self.msgs[:4]) + + def test_get_samples_by_resource(self): + f = storage.SampleFilter(user='user-id', resource='resource-id') + results = list(self.conn.get_samples(f)) + self.assertEqual(2, len(results)) + d = results[1].as_dict() + self.assertEqual(timeutils.utcnow(), d['recorded_at']) + del d['recorded_at'] + self.assertEqual(self.msgs[0], d) + + def test_get_samples_by_metaquery(self): + q = {'metadata.display_name': 'test-server'} + f = storage.SampleFilter(metaquery=q) + results = list(self.conn.get_samples(f)) + self.assertIsNotNone(results) + for meter in results: + d = meter.as_dict() + self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) + del d['recorded_at'] + self.assertIn(d, self.msgs) + + def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): + q = {'metadata.display.name.name.$1': 'test-server1', + 'metadata.display.name.$name_2': 'test-server2'} + f = storage.SampleFilter(metaquery=q) + results = list(self.conn.get_samples(f)) + self.assertIsNotNone(results) + self.assertEqual(1, len(results)) + + def test_get_samples_by_start_time(self): + timestamp = datetime.datetime(2012, 7, 2, 10, 41) + f = storage.SampleFilter( + user='user-id', + start_timestamp=timestamp, + ) + + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + self.assertEqual(timestamp, results[0].timestamp) + + f.start_timestamp_op = 'ge' + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + self.assertEqual(timestamp, results[0].timestamp) + + f.start_timestamp_op = 'gt' + results = list(self.conn.get_samples(f)) + self.assertEqual(0, len(results)) + + def test_get_samples_by_end_time(self): + timestamp = datetime.datetime(2012, 7, 2, 10, 40) + f = storage.SampleFilter( + user='user-id', + end_timestamp=timestamp, + ) + + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + + f.end_timestamp_op = 'lt' + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + + f.end_timestamp_op = 'le' + results = list(self.conn.get_samples(f)) + self.assertEqual(2, len(results)) + self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), + results[1].timestamp) + + def test_get_samples_by_both_times(self): + start_ts = datetime.datetime(2012, 7, 2, 10, 42) + end_ts = datetime.datetime(2012, 7, 2, 10, 43) + f = storage.SampleFilter( + start_timestamp=start_ts, + end_timestamp=end_ts, + ) + + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + self.assertEqual(start_ts, results[0].timestamp) + + f.start_timestamp_op = 'gt' + f.end_timestamp_op = 'lt' + results = list(self.conn.get_samples(f)) + self.assertEqual(0, len(results)) + + f.start_timestamp_op = 'ge' + f.end_timestamp_op = 'lt' + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + self.assertEqual(start_ts, results[0].timestamp) + + f.start_timestamp_op = 'gt' + f.end_timestamp_op = 'le' + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + self.assertEqual(end_ts, results[0].timestamp) + + f.start_timestamp_op = 'ge' + f.end_timestamp_op = 'le' + results = list(self.conn.get_samples(f)) + self.assertEqual(2, len(results)) + self.assertEqual(end_ts, results[0].timestamp) + self.assertEqual(start_ts, results[1].timestamp) + + def test_get_samples_by_name(self): + f = storage.SampleFilter(user='user-id', meter='no-such-meter') + results = list(self.conn.get_samples(f)) + self.assertIsEmpty(results) + + def test_get_samples_by_name2(self): + f = storage.SampleFilter(user='user-id', meter='instance') + results = list(self.conn.get_samples(f)) + self.assertIsNotEmpty(results) + + def test_get_samples_by_source(self): + f = storage.SampleFilter(source='test-1') + results = list(self.conn.get_samples(f)) + self.assertEqual(2, len(results)) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + def test_clear_metering_data(self): + # NOTE(jd) Override this test in MongoDB because our code doesn't clear + # the collections, this is handled by MongoDB TTL feature. + + self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) + self.conn.clear_expired_metering_data(3 * 60) + f = storage.SampleFilter(meter='instance') + results = list(self.conn.get_samples(f)) + self.assertEqual(5, len(results)) + results = list(self.conn.get_resources()) + self.assertEqual(5, len(results)) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + def test_clear_metering_data_no_data_to_remove(self): + # NOTE(jd) Override this test in MongoDB because our code doesn't clear + # the collections, this is handled by MongoDB TTL feature. + + self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) + self.conn.clear_expired_metering_data(3 * 60) + f = storage.SampleFilter(meter='instance') + results = list(self.conn.get_samples(f)) + self.assertEqual(12, len(results)) + results = list(self.conn.get_resources()) + self.assertEqual(10, len(results)) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql') + def test_clear_metering_data_expire_samples_only(self): + + cfg.CONF.set_override('sql_expire_samples_only', True) + self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) + self.conn.clear_expired_metering_data(4 * 60) + f = storage.SampleFilter(meter='instance') + results = list(self.conn.get_samples(f)) + self.assertEqual(7, len(results)) + results = list(self.conn.get_resources()) + self.assertEqual(6, len(results)) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql') + def test_record_metering_data_retry_success_on_deadlock(self): + raise_deadlock = [False, True] + self.CONF.set_override('max_retries', 2, group='database') + + s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', + volume=1, user_id='user_id', + project_id='project_id', + resource_id='resource_id', + timestamp=datetime.datetime.utcnow(), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.counter'}, + source=None) + + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret + ) + + mock_resource_create = mock.patch.object(self.conn, "_create_resource") + + mock_resource_create.side_effect = self.create_side_effect( + self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) + with mock.patch.object(api.time, 'sleep') as retry_sleep: + self.conn.record_metering_data(msg) + self.assertEqual(1, retry_sleep.call_count) + + f = storage.SampleFilter(meter='instance') + results = list(self.conn.get_samples(f)) + self.assertEqual(13, len(results)) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql') + def test_record_metering_data_retry_failure_on_deadlock(self): + raise_deadlock = [True, True, True] + self.CONF.set_override('max_retries', 3, group='database') + + s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', + volume=1, user_id='user_id', + project_id='project_id', + resource_id='resource_id', + timestamp=datetime.datetime.utcnow(), + resource_metadata={'display_name': 'test-server', + 'tag': 'self.counter'}, + source=None) + + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret + ) + + mock_resource_create = mock.patch.object(self.conn, "_create_resource") + + mock_resource_create.side_effect = self.create_side_effect( + self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) + with mock.patch.object(api.time, 'sleep') as retry_sleep: + try: + self.conn.record_metering_data(msg) + except dbexc.DBError as err: + self.assertIn('DBDeadlock', str(type(err))) + self.assertEqual(3, retry_sleep.call_count) + + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + def test_clear_metering_data_with_alarms(self): + # NOTE(jd) Override this test in MongoDB because our code doesn't clear + # the collections, this is handled by MongoDB TTL feature. + alarm = alarm_models.Alarm(alarm_id='r3d', + enabled=True, + type='threshold', + name='red-alert', + description='my red-alert', + timestamp=constants.MIN_DATETIME, + user_id='user-id', + project_id='project-id', + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=['http://nowhere/alarms'], + insufficient_data_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict(comparison_operator='eq', + threshold=36, + statistic='count', + evaluation_periods=1, + period=60, + meter_name='test.one', + query=[{'field': 'key', + 'op': 'eq', + 'value': 'value', + 'type': 'string'}]), + ) + + self.alarm_conn.create_alarm(alarm) + self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) + self.conn.clear_expired_metering_data(5) + f = storage.SampleFilter(meter='instance') + results = list(self.conn.get_samples(f)) + self.assertEqual(2, len(results)) + results = list(self.conn.get_resources()) + self.assertEqual(2, len(results)) + + +class ComplexSampleQueryTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + def setUp(self): + super(ComplexSampleQueryTest, self).setUp() + self.complex_filter = { + "and": + [{"or": + [{"=": {"resource_id": "resource-id-42"}}, + {"=": {"resource_id": "resource-id-44"}}]}, + {"and": + [{"=": {"counter_name": "cpu_util"}}, + {"and": + [{">": {"counter_volume": 0.4}}, + {"not": {">": {"counter_volume": 0.8}}}]}]}]} + or_expression = [{"=": {"resource_id": "resource-id-42"}}, + {"=": {"resource_id": "resource-id-43"}}, + {"=": {"resource_id": "resource-id-44"}}] + and_expression = [{">": {"counter_volume": 0.4}}, + {"not": {">": {"counter_volume": 0.8}}}] + self.complex_filter_list = {"and": + [{"or": or_expression}, + {"and": + [{"=": {"counter_name": "cpu_util"}}, + {"and": and_expression}]}]} + in_expression = {"in": {"resource_id": ["resource-id-42", + "resource-id-43", + "resource-id-44"]}} + self.complex_filter_in = {"and": + [in_expression, + {"and": + [{"=": {"counter_name": "cpu_util"}}, + {"and": and_expression}]}]} + + def _create_samples(self): + for resource in range(42, 45): + for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: + metadata = {'a_string_key': "meta-value" + str(volume), + 'a_float_key': volume, + 'an_int_key': resource, + 'a_bool_key': (resource == 43)} + + self.create_and_store_sample(resource_id="resource-id-%s" + % resource, + metadata=metadata, + name="cpu_util", + volume=volume) + + def test_no_filter(self): + results = list(self.conn.query_samples()) + self.assertEqual(len(self.msgs), len(results)) + for sample_item in results: + d = sample_item.as_dict() + del d['recorded_at'] + self.assertIn(d, self.msgs) + + def test_query_complex_filter_with_regexp(self): + self._create_samples() + complex_regex_filter = {"and": [ + {"=~": {"resource_id": "resource-id.*"}}, + {"=": {"counter_volume": 0.4}}]} + results = list( + self.conn.query_samples(filter_expr=complex_regex_filter)) + self.assertEqual(3, len(results)) + for sample_item in results: + self.assertIn(sample_item.resource_id, + set(["resource-id-42", + "resource-id-43", + "resource-id-44"])) + + def test_query_complex_filter_with_regexp_metadata(self): + self._create_samples() + complex_regex_filter = {"and": [ + {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, + {"=": {"counter_volume": 0.4}}]} + results = list( + self.conn.query_samples(filter_expr=complex_regex_filter)) + self.assertEqual(3, len(results)) + for sample_item in results: + self.assertEqual("meta-value0.4", + sample_item.resource_metadata['a_string_key']) + + def test_no_filter_with_zero_limit(self): + limit = 0 + results = list(self.conn.query_samples(limit=limit)) + self.assertEqual(limit, len(results)) + + def test_no_filter_with_limit(self): + limit = 3 + results = list(self.conn.query_samples(limit=limit)) + self.assertEqual(limit, len(results)) + + def test_query_simple_filter(self): + simple_filter = {"=": {"resource_id": "resource-id-8"}} + results = list(self.conn.query_samples(filter_expr=simple_filter)) + self.assertEqual(1, len(results)) + for sample_item in results: + self.assertEqual("resource-id-8", sample_item.resource_id) + + def test_query_simple_filter_with_not_equal_relation(self): + simple_filter = {"!=": {"resource_id": "resource-id-8"}} + results = list(self.conn.query_samples(filter_expr=simple_filter)) + self.assertEqual(len(self.msgs) - 1, len(results)) + for sample_item in results: + self.assertNotEqual("resource-id-8", sample_item.resource_id) + + def test_query_complex_filter(self): + self._create_samples() + results = list(self.conn.query_samples(filter_expr=( + self.complex_filter))) + self.assertEqual(6, len(results)) + for sample_item in results: + self.assertIn(sample_item.resource_id, + set(["resource-id-42", "resource-id-44"])) + self.assertEqual("cpu_util", sample_item.counter_name) + self.assertTrue(sample_item.counter_volume > 0.4) + self.assertTrue(sample_item.counter_volume <= 0.8) + + def test_query_complex_filter_with_limit(self): + self._create_samples() + limit = 3 + results = list(self.conn.query_samples(filter_expr=self.complex_filter, + limit=limit)) + self.assertEqual(limit, len(results)) + + def test_query_complex_filter_with_simple_orderby(self): + self._create_samples() + expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] + orderby = [{"counter_volume": "asc"}] + results = list(self.conn.query_samples(filter_expr=self.complex_filter, + orderby=orderby)) + self.assertEqual(expected_volume_order, + [s.counter_volume for s in results]) + + def test_query_complex_filter_with_complex_orderby(self): + self._create_samples() + expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] + expected_resource_id_order = ["resource-id-44", "resource-id-42", + "resource-id-44", "resource-id-42", + "resource-id-44", "resource-id-42"] + + orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] + + results = list(self.conn.query_samples(filter_expr=self.complex_filter, + orderby=orderby)) + + self.assertEqual(expected_volume_order, + [s.counter_volume for s in results]) + self.assertEqual(expected_resource_id_order, + [s.resource_id for s in results]) + + def test_query_complex_filter_with_list(self): + self._create_samples() + results = list( + self.conn.query_samples(filter_expr=self.complex_filter_list)) + self.assertEqual(9, len(results)) + for sample_item in results: + self.assertIn(sample_item.resource_id, + set(["resource-id-42", + "resource-id-43", + "resource-id-44"])) + self.assertEqual("cpu_util", sample_item.counter_name) + self.assertTrue(sample_item.counter_volume > 0.4) + self.assertTrue(sample_item.counter_volume <= 0.8) + + def test_query_complex_filter_with_list_with_limit(self): + self._create_samples() + limit = 3 + results = list( + self.conn.query_samples(filter_expr=self.complex_filter_list, + limit=limit)) + self.assertEqual(limit, len(results)) + + def test_query_complex_filter_with_list_with_simple_orderby(self): + self._create_samples() + expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, + 0.79, 0.8, 0.8, 0.8] + orderby = [{"counter_volume": "asc"}] + results = list( + self.conn.query_samples(filter_expr=self.complex_filter_list, + orderby=orderby)) + self.assertEqual(expected_volume_order, + [s.counter_volume for s in results]) + + def test_query_complex_filterwith_list_with_complex_orderby(self): + self._create_samples() + expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, + 0.79, 0.8, 0.8, 0.8] + expected_resource_id_order = ["resource-id-44", "resource-id-43", + "resource-id-42", "resource-id-44", + "resource-id-43", "resource-id-42", + "resource-id-44", "resource-id-43", + "resource-id-42"] + + orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] + + results = list( + self.conn.query_samples(filter_expr=self.complex_filter_list, + orderby=orderby)) + + self.assertEqual(expected_volume_order, + [s.counter_volume for s in results]) + self.assertEqual(expected_resource_id_order, + [s.resource_id for s in results]) + + def test_query_complex_filter_with_wrong_order_in_orderby(self): + self._create_samples() + + orderby = [{"counter_volume": "not valid order"}, + {"resource_id": "desc"}] + + query = lambda: list(self.conn.query_samples(filter_expr=( + self.complex_filter), + orderby=orderby)) + self.assertRaises(KeyError, query) + + def test_query_complex_filter_with_in(self): + self._create_samples() + results = list( + self.conn.query_samples(filter_expr=self.complex_filter_in)) + self.assertEqual(9, len(results)) + for sample_item in results: + self.assertIn(sample_item.resource_id, + set(["resource-id-42", + "resource-id-43", + "resource-id-44"])) + self.assertEqual("cpu_util", sample_item.counter_name) + self.assertTrue(sample_item.counter_volume > 0.4) + self.assertTrue(sample_item.counter_volume <= 0.8) + + def test_query_simple_metadata_filter(self): + self._create_samples() + + filter_expr = {"=": {"resource_metadata.a_bool_key": True}} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(6, len(results)) + for sample_item in results: + self.assertTrue(sample_item.resource_metadata["a_bool_key"]) + + def test_query_simple_metadata_with_in_op(self): + self._create_samples() + + filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(12, len(results)) + for sample_item in results: + self.assertIn(sample_item.resource_metadata["an_int_key"], + [42, 43]) + + def test_query_complex_metadata_filter(self): + self._create_samples() + subfilter = {"or": [{"=": {"resource_metadata.a_string_key": + "meta-value0.81"}}, + {"<=": {"resource_metadata.a_float_key": 0.41}}]} + filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, + subfilter]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(8, len(results)) + for sample_item in results: + self.assertTrue((sample_item.resource_metadata["a_string_key"] == + "meta-value0.81" or + sample_item.resource_metadata["a_float_key"] <= + 0.41)) + self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) + + def test_query_mixed_data_and_metadata_filter(self): + self._create_samples() + subfilter = {"or": [{"=": {"resource_metadata.a_string_key": + "meta-value0.81"}}, + {"<=": {"resource_metadata.a_float_key": 0.41}}]} + + filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, + subfilter]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(4, len(results)) + for sample_item in results: + self.assertTrue((sample_item.resource_metadata["a_string_key"] == + "meta-value0.81" or + sample_item.resource_metadata["a_float_key"] <= + 0.41)) + self.assertEqual("resource-id-42", sample_item.resource_id) + + def test_query_non_existing_metadata_with_result(self): + self._create_samples() + + filter_expr = { + "or": [{"=": {"resource_metadata.a_string_key": + "meta-value0.81"}}, + {"<=": {"resource_metadata.key_not_exists": 0.41}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(3, len(results)) + for sample_item in results: + self.assertEqual("meta-value0.81", + sample_item.resource_metadata["a_string_key"]) + + def test_query_non_existing_metadata_without_result(self): + self._create_samples() + + filter_expr = { + "or": [{"=": {"resource_metadata.key_not_exists": + "meta-value0.81"}}, + {"<=": {"resource_metadata.key_not_exists": 0.41}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + self.assertEqual(0, len(results)) + + def test_query_negated_metadata(self): + self._create_samples() + + filter_expr = { + "and": [{"=": {"resource_id": "resource-id-42"}}, + {"not": {"or": [{">": {"resource_metadata.an_int_key": + 43}}, + {"<=": {"resource_metadata.a_float_key": + 0.41}}]}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(3, len(results)) + for sample_item in results: + self.assertEqual("resource-id-42", sample_item.resource_id) + self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) + self.assertTrue(sample_item.resource_metadata["a_float_key"] > + 0.41) + + def test_query_negated_complex_expression(self): + self._create_samples() + filter_expr = { + "and": + [{"=": {"counter_name": "cpu_util"}}, + {"not": + {"or": + [{"or": + [{"=": {"resource_id": "resource-id-42"}}, + {"=": {"resource_id": "resource-id-44"}}]}, + {"and": + [{">": {"counter_volume": 0.4}}, + {"<": {"counter_volume": 0.8}}]}]}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(4, len(results)) + for sample_item in results: + self.assertEqual("resource-id-43", sample_item.resource_id) + self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) + self.assertEqual("cpu_util", sample_item.counter_name) + + def test_query_with_double_negation(self): + self._create_samples() + filter_expr = { + "and": + [{"=": {"counter_name": "cpu_util"}}, + {"not": + {"or": + [{"or": + [{"=": {"resource_id": "resource-id-42"}}, + {"=": {"resource_id": "resource-id-44"}}]}, + {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, + {"<": {"counter_volume": 0.8}}]}]}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(4, len(results)) + for sample_item in results: + self.assertEqual("resource-id-43", sample_item.resource_id) + self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) + self.assertEqual("cpu_util", sample_item.counter_name) + + def test_query_negate_not_equal(self): + self._create_samples() + filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(6, len(results)) + for sample_item in results: + self.assertEqual("resource-id-43", sample_item.resource_id) + + def test_query_negated_in_op(self): + self._create_samples() + filter_expr = { + "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, + {"=": {"resource_id": "resource-id-42"}}]} + + results = list(self.conn.query_samples(filter_expr=filter_expr)) + + self.assertEqual(3, len(results)) + for sample_item in results: + self.assertIn(sample_item.counter_volume, + [0.41, 0.8, 0.81]) + + +class StatisticsTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def prepare_data(self): + for i in range(3): + c = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 5 + i, + 'user-id', + 'project1', + 'resource-id', + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.counter', + }, + source='test', + ) + msg = utils.meter_message_from_counter( + c, + secret='not-so-secret', + ) + self.conn.record_metering_data(msg) + for i in range(3): + c = sample.Sample( + 'volume.size', + 'gauge', + 'GiB', + 8 + i, + 'user-5', + 'project2', + 'resource-6', + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={'display_name': 'test-volume', + 'tag': 'self.counter', + }, + source='test', + ) + msg = utils.meter_message_from_counter( + c, + secret='not-so-secret', + ) + self.conn.record_metering_data(msg) + for i in range(3): + c = sample.Sample( + 'memory', + 'gauge', + 'MB', + 8 + i, + 'user-5', + 'project2', + 'resource-6', + timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), + resource_metadata={}, + source='test', + ) + msg = utils.meter_message_from_counter( + c, + secret='not-so-secret', + ) + self.conn.record_metering_data(msg) + + def test_by_meter(self): + f = storage.SampleFilter( + meter='memory' + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) + - datetime.datetime(2012, 9, 25, 10, 30)).seconds, + results.duration) + self.assertEqual(3, results.count) + self.assertEqual('MB', results.unit) + self.assertEqual(8, results.min) + self.assertEqual(10, results.max) + self.assertEqual(27, results.sum) + self.assertEqual(9, results.avg) + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), + results.period_start) + self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), + results.period_end) + + def test_by_user(self): + f = storage.SampleFilter( + user='user-5', + meter='volume.size', + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) + - datetime.datetime(2012, 9, 25, 10, 30)).seconds, + results.duration) + self.assertEqual(3, results.count) + self.assertEqual('GiB', results.unit) + self.assertEqual(8, results.min) + self.assertEqual(10, results.max) + self.assertEqual(27, results.sum) + self.assertEqual(9, results.avg) + + def test_no_period_in_query(self): + f = storage.SampleFilter( + user='user-5', + meter='volume.size', + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertEqual(0, results.period) + + def test_period_is_int(self): + f = storage.SampleFilter( + meter='volume.size', + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertIs(int, type(results.period)) + self.assertEqual(6, results.count) + + def test_by_user_period(self): + f = storage.SampleFilter( + user='user-5', + meter='volume.size', + start_timestamp='2012-09-25T10:28:00', + ) + results = list(self.conn.get_meter_statistics(f, period=7200)) + self.assertEqual(2, len(results)) + self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), + datetime.datetime(2012, 9, 25, 12, 28)]), + set(r.period_start for r in results)) + self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), + datetime.datetime(2012, 9, 25, 14, 28)]), + set(r.period_end for r in results)) + r = results[0] + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), + r.period_start) + self.assertEqual(2, r.count) + self.assertEqual('GiB', r.unit) + self.assertEqual(8.5, r.avg) + self.assertEqual(8, r.min) + self.assertEqual(9, r.max) + self.assertEqual(17, r.sum) + self.assertEqual(7200, r.period) + self.assertIsInstance(r.period, int) + expected_end = r.period_start + datetime.timedelta(seconds=7200) + self.assertEqual(expected_end, r.period_end) + self.assertEqual(3660, r.duration) + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), + r.duration_start) + self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), + r.duration_end) + + def test_by_user_period_with_timezone(self): + dates = [ + '2012-09-25T00:28:00-10:00', + '2012-09-25T01:28:00-09:00', + '2012-09-25T02:28:00-08:00', + '2012-09-25T03:28:00-07:00', + '2012-09-25T04:28:00-06:00', + '2012-09-25T05:28:00-05:00', + '2012-09-25T06:28:00-04:00', + '2012-09-25T07:28:00-03:00', + '2012-09-25T08:28:00-02:00', + '2012-09-25T09:28:00-01:00', + '2012-09-25T10:28:00Z', + '2012-09-25T11:28:00+01:00', + '2012-09-25T12:28:00+02:00', + '2012-09-25T13:28:00+03:00', + '2012-09-25T14:28:00+04:00', + '2012-09-25T15:28:00+05:00', + '2012-09-25T16:28:00+06:00', + '2012-09-25T17:28:00+07:00', + '2012-09-25T18:28:00+08:00', + '2012-09-25T19:28:00+09:00', + '2012-09-25T20:28:00+10:00', + '2012-09-25T21:28:00+11:00', + '2012-09-25T22:28:00+12:00', + ] + for date in dates: + f = storage.SampleFilter( + user='user-5', + meter='volume.size', + start_timestamp=date + ) + results = list(self.conn.get_meter_statistics(f, period=7200)) + self.assertEqual(2, len(results)) + self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), + datetime.datetime(2012, 9, 25, 12, 28)]), + set(r.period_start for r in results)) + self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), + datetime.datetime(2012, 9, 25, 14, 28)]), + set(r.period_end for r in results)) + + def test_by_user_period_start_end(self): + f = storage.SampleFilter( + user='user-5', + meter='volume.size', + start_timestamp='2012-09-25T10:28:00', + end_timestamp='2012-09-25T11:28:00', + ) + results = list(self.conn.get_meter_statistics(f, period=1800)) + self.assertEqual(1, len(results)) + r = results[0] + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), + r.period_start) + self.assertEqual(1, r.count) + self.assertEqual('GiB', r.unit) + self.assertEqual(8, r.avg) + self.assertEqual(8, r.min) + self.assertEqual(8, r.max) + self.assertEqual(8, r.sum) + self.assertEqual(1800, r.period) + self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), + r.period_end) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), + r.duration_start) + self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), + r.duration_end) + + def test_by_project(self): + f = storage.SampleFilter( + meter='volume.size', + resource='resource-id', + start_timestamp='2012-09-25T11:30:00', + end_timestamp='2012-09-25T11:32:00', + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertEqual(0, results.duration) + self.assertEqual(1, results.count) + self.assertEqual('GiB', results.unit) + self.assertEqual(6, results.min) + self.assertEqual(6, results.max) + self.assertEqual(6, results.sum) + self.assertEqual(6, results.avg) + + def test_one_resource(self): + f = storage.SampleFilter( + user='user-id', + meter='volume.size', + ) + results = list(self.conn.get_meter_statistics(f))[0] + self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) + - datetime.datetime(2012, 9, 25, 10, 30)).seconds, + results.duration) + self.assertEqual(3, results.count) + self.assertEqual('GiB', results.unit) + self.assertEqual(5, results.min) + self.assertEqual(7, results.max) + self.assertEqual(18, results.sum) + self.assertEqual(6, results.avg) + + def test_with_no_sample(self): + f = storage.SampleFilter( + user='user-not-exists', + meter='volume.size', + ) + results = list(self.conn.get_meter_statistics(f, period=1800)) + self.assertEqual([], results) + + +class StatisticsGroupByTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def prepare_data(self): + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source-2', 'metadata_instance_type': '84'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-2', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source-2', 'metadata_instance_type': '83'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-1', 'metadata_instance_type': '82'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1', 'metadata_instance_type': '82'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1', 'metadata_instance_type': '84'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1', 'metadata_instance_type': '82'}, + {'volume': 4, 'user': 'user-3', 'project': 'project-1', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-3', 'metadata_instance_type': '83'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_CUMULATIVE, + unit='s', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], + 'instance_type': + test_sample['metadata_instance_type']}, + source=test_sample['source'], + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_group_by_user(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['user_id']), groupby_keys_set) + self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) + + for r in results: + if r.groupby == {'user_id': 'user-1'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-2'}: + self.assertEqual(4, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(8, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + + def test_group_by_resource(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['resource_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), + groupby_vals_set) + for r in results: + if r.groupby == {'resource_id': 'resource-1'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'resource_id': 'resource-2'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'resource_id': 'resource-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + + def test_group_by_project(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + self.assertEqual(2, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in results: + if r.groupby == {'project_id': 'project-1'}: + self.assertEqual(5, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(10, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'project_id': 'project-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(3, r.avg) + + def test_group_by_source(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, groupby=['source'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['source']), groupby_keys_set) + self.assertEqual(set(['source-1', 'source-2', 'source-3']), + groupby_vals_set) + + for r in results: + if r.groupby == {'source': 'source-1'}: + self.assertEqual(4, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(8, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'source': 'source-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'source': 'source-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + + def test_group_by_unknown_field(self): + f = storage.SampleFilter( + meter='instance', + ) + # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list + # whereas the SQLAlchemy get_meter_statistics() returns a generator. + # You have to apply list() to the SQLAlchemy generator to get it to + # throw an error. The MongoDB get_meter_statistics() will throw an + # error before list() is called. By using lambda, we can cover both + # MongoDB and SQLAlchemy in a single test. + self.assertRaises( + ceilometer.NotImplementedError, + lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) + ) + + def test_group_by_metadata(self): + # This test checks grouping by a single metadata field + # (now only resource_metadata.instance_type is available). + f = storage.SampleFilter( + meter='instance', + ) + results = list( + self.conn.get_meter_statistics( + f, groupby=['resource_metadata.instance_type'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_metadata.instance_type']), + groupby_keys_set) + self.assertEqual(set(['82', '83', '84']), groupby_vals_set) + + for r in results: + if r.groupby == {'resource_metadata.instance_type': '82'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'resource_metadata.instance_type': '83'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(3, r.avg) + elif r.groupby == {'resource_metadata.instance_type': '84'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + + def test_group_by_multiple_regular(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['user_id', + 'resource_id'])) + self.assertEqual(4, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) + self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', + 'resource-2', 'resource-3']), + groupby_vals_set) + + for r in results: + if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-2', + 'resource_id': 'resource-1'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-2', + 'resource_id': 'resource-2'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-3', + 'resource_id': 'resource-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + else: + self.assertNotEqual({'user_id': 'user-1', + 'resource_id': 'resource-2'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-1', + 'resource_id': 'resource-3'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-2', + 'resource_id': 'resource-3'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-3', + 'resource_id': 'resource-1'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-3', + 'resource_id': 'resource-2'}, + r.groupby, ) + + def test_group_by_multiple_metadata(self): + # TODO(terriyu): test_group_by_multiple_metadata needs to be + # implemented. + # This test should check grouping by multiple metadata fields. + pass + + def test_group_by_multiple_regular_metadata(self): + # This test checks grouping by a combination of regular and + # metadata fields. + f = storage.SampleFilter( + meter='instance', + ) + results = list( + self.conn.get_meter_statistics( + f, groupby=['user_id', 'resource_metadata.instance_type'])) + self.assertEqual(5, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), + groupby_keys_set) + self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', + '83', '84']), + groupby_vals_set) + + for r in results: + if r.groupby == {'user_id': 'user-1', + 'resource_metadata.instance_type': '83'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-1', + 'resource_metadata.instance_type': '84'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-2', + 'resource_metadata.instance_type': '82'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-2', + 'resource_metadata.instance_type': '84'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'user_id': 'user-3', + 'resource_metadata.instance_type': '83'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + else: + self.assertNotEqual({'user_id': 'user-1', + 'resource_metadata.instance_type': '82'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-2', + 'resource_metadata.instance_type': '83'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-3', + 'resource_metadata.instance_type': '82'}, + r.groupby) + self.assertNotEqual({'user_id': 'user-3', + 'resource_metadata.instance_type': '84'}, + r.groupby) + + def test_group_by_with_query_filter(self): + f = storage.SampleFilter( + meter='instance', + project='project-1', + ) + results = list(self.conn.get_meter_statistics( + f, + groupby=['resource_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), + groupby_vals_set) + + for r in results: + if r.groupby == {'resource_id': 'resource-1'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'resource_id': 'resource-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + elif r.groupby == {'resource_id': 'resource-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + + def test_group_by_metadata_with_query_filter(self): + # This test checks grouping by a metadata field in combination + # with a query filter. + f = storage.SampleFilter( + meter='instance', + project='project-1', + ) + results = list(self.conn.get_meter_statistics( + f, + groupby=['resource_metadata.instance_type'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_metadata.instance_type']), + groupby_keys_set) + self.assertEqual(set(['82', '83', '84']), + groupby_vals_set) + + for r in results: + if r.groupby == {'resource_metadata.instance_type': '82'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + elif r.groupby == {'resource_metadata.instance_type': '83'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + elif r.groupby == {'resource_metadata.instance_type': '84'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + + def test_group_by_with_query_filter_multiple(self): + f = storage.SampleFilter( + meter='instance', + user='user-2', + source='source-1', + ) + results = list(self.conn.get_meter_statistics( + f, + groupby=['project_id', 'resource_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2', + 'resource-1', 'resource-2']), + groupby_vals_set) + + for r in results: + if r.groupby == {'project_id': 'project-1', + 'resource_id': 'resource-1'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'project_id': 'project-1', + 'resource_id': 'resource-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + elif r.groupby == {'project_id': 'project-2', + 'resource_id': 'resource-2'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + else: + self.assertNotEqual({'project_id': 'project-2', + 'resource_id': 'resource-1'}, + r.groupby) + + def test_group_by_metadata_with_query_filter_multiple(self): + # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple + # needs to be implemented. + # This test should check grouping by multiple metadata fields in + # combination with a query filter. + pass + + def test_group_by_with_period(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + period=7200, + groupby=['project_id'])) + self.assertEqual(4, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11), + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(4260, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(4260, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), + r.period_end) + else: + self.assertNotEqual([{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 16, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 10, 11)], + [r.groupby, r.period_start]) + + def test_group_by_metadata_with_period(self): + # This test checks grouping by metadata fields in combination + # with period grouping. + f = storage.SampleFilter( + meter='instance') + + results = list(self.conn.get_meter_statistics(f, period=7200, + groupby=['resource_metadata.instance_type'])) + self.assertEqual(5, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_metadata.instance_type']), + groupby_keys_set) + self.assertEqual(set(['82', '83', '84']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11), + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'resource_metadata.instance_type': '82'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + self.assertEqual(1740, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '82'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '83'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '83'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '84'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(4260, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + else: + self.assertNotEqual([{'resource_metadata.instance_type': '82'}, + datetime.datetime(2013, 8, 1, 14, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'resource_metadata.instance_type': '83'}, + datetime.datetime(2013, 8, 1, 16, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'resource_metadata.instance_type': '84'}, + datetime.datetime(2013, 8, 1, 10, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'resource_metadata.instance_type': '84'}, + datetime.datetime(2013, 8, 1, 16, 11)], + [r.groupby, r.period_start]) + + def test_group_by_with_query_filter_and_period(self): + f = storage.SampleFilter( + meter='instance', + source='source-1', + ) + results = list(self.conn.get_meter_statistics(f, + period=7200, + groupby=['project_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11), + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + self.assertEqual(1740, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), + r.period_end) + else: + self.assertNotEqual([{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 16, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 10, 11)], + [r.groupby, r.period_start]) + + def test_group_by_metadata_with_query_filter_and_period(self): + # This test checks grouping with metadata fields in combination + # with a query filter and period grouping. + f = storage.SampleFilter( + meter='instance', + project='project-1', + ) + results = list( + self.conn.get_meter_statistics( + f, period=7200, groupby=['resource_metadata.instance_type'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_metadata.instance_type']), + groupby_keys_set) + self.assertEqual(set(['82', '83', '84']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'resource_metadata.instance_type': '82'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + self.assertEqual(1740, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '83'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), + r.period_end) + elif (r.groupby == {'resource_metadata.instance_type': '84'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(4260, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), + r.period_end) + else: + self.assertNotEqual([{'resource_metadata.instance_type': '82'}, + datetime.datetime(2013, 8, 1, 14, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'resource_metadata.instance_type': '83'}, + datetime.datetime(2013, 8, 1, 14, 11)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'resource_metadata.instance_type': '84'}, + datetime.datetime(2013, 8, 1, 10, 11)], + [r.groupby, r.period_start]) + + def test_group_by_start_timestamp_after(self): + f = storage.SampleFilter( + meter='instance', + start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + + self.assertEqual([], results) + + def test_group_by_end_timestamp_before(self): + f = storage.SampleFilter( + meter='instance', + end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + + self.assertEqual([], results) + + def test_group_by_start_timestamp(self): + f = storage.SampleFilter( + meter='instance', + start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + self.assertEqual(2, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in results: + if r.groupby == {'project_id': 'project-1'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'project_id': 'project-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(3, r.avg) + + def test_group_by_end_timestamp(self): + f = storage.SampleFilter( + meter='instance', + end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + self.assertEqual(1, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1']), groupby_vals_set) + + for r in results: + if r.groupby == {'project_id': 'project-1'}: + self.assertEqual(3, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(2, r.avg) + + def test_group_by_start_end_timestamp(self): + f = storage.SampleFilter( + meter='instance', + start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), + end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + self.assertEqual(2, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + + for r in results: + if r.groupby == {'project_id': 'project-1'}: + self.assertEqual(5, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(4, r.max) + self.assertEqual(10, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'project_id': 'project-2'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(4, r.max) + self.assertEqual(6, r.sum) + self.assertEqual(3, r.avg) + + def test_group_by_start_end_timestamp_with_query_filter(self): + f = storage.SampleFilter( + meter='instance', + project='project-1', + start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), + end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['resource_id'])) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['resource_id']), groupby_keys_set) + self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) + + for r in results: + if r.groupby == {'resource_id': 'resource-1'}: + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(2, r.avg) + elif r.groupby == {'resource_id': 'resource-3'}: + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + + def test_group_by_start_end_timestamp_with_period(self): + f = storage.SampleFilter( + meter='instance', + start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), + end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), + ) + results = list(self.conn.get_meter_statistics(f, + period=3600, + groupby=['project_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), + datetime.datetime(2013, 8, 1, 15, 0), + datetime.datetime(2013, 8, 1, 16, 0)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_end) + self.assertEqual(3600, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), + r.period_end) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), + r.duration_end) + self.assertEqual(3600, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), + r.period_end) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), + r.duration_end) + self.assertEqual(3600, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), + r.period_end) + else: + self.assertNotEqual([{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 15, 0)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 14, 0)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 16, 0)], + [r.groupby, r.period_start]) + + def test_group_by_start_end_timestamp_with_query_filter_and_period(self): + f = storage.SampleFilter( + meter='instance', + source='source-1', + start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), + end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), + ) + results = list(self.conn.get_meter_statistics(f, + period=7200, + groupby=['project_id'])) + self.assertEqual(3, len(results)) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(set(['project_id']), groupby_keys_set) + self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), + datetime.datetime(2013, 8, 1, 14, 0), + datetime.datetime(2013, 8, 1, 16, 0)]) + self.assertEqual(period_start_valid, period_start_set) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): + self.assertEqual(2, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(1, r.min) + self.assertEqual(1, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(1, r.avg) + self.assertEqual(1740, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), + r.period_end) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(2, r.min) + self.assertEqual(2, r.max) + self.assertEqual(2, r.sum) + self.assertEqual(2, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), + r.period_end) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): + self.assertEqual(1, r.count) + self.assertEqual('s', r.unit) + self.assertEqual(4, r.min) + self.assertEqual(4, r.max) + self.assertEqual(4, r.sum) + self.assertEqual(4, r.avg) + self.assertEqual(0, r.duration) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_start) + self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), + r.duration_end) + self.assertEqual(7200, r.period) + self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), + r.period_end) + else: + self.assertNotEqual([{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 16, 0)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 10, 0)], + [r.groupby, r.period_start]) + self.assertNotEqual([{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 14, 0)], + [r.groupby, r.period_start]) + + +class CounterDataTypeTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + def prepare_data(self): + c = sample.Sample( + 'dummyBigCounter', + sample.TYPE_CUMULATIVE, + unit='', + volume=337203685477580, + user_id='user-id', + project_id='project-id', + resource_id='resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={}, + source='test-1', + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + + self.conn.record_metering_data(msg) + + c = sample.Sample( + 'dummySmallCounter', + sample.TYPE_CUMULATIVE, + unit='', + volume=-337203685477580, + user_id='user-id', + project_id='project-id', + resource_id='resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={}, + source='test-1', + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + c = sample.Sample( + 'floatCounter', + sample.TYPE_CUMULATIVE, + unit='', + volume=1938495037.53697, + user_id='user-id', + project_id='project-id', + resource_id='resource-id', + timestamp=datetime.datetime(2012, 7, 2, 10, 40), + resource_metadata={}, + source='test-1', + ) + msg = utils.meter_message_from_counter( + c, self.CONF.publisher.telemetry_secret, + ) + self.conn.record_metering_data(msg) + + def test_storage_can_handle_large_values(self): + f = storage.SampleFilter( + meter='dummyBigCounter', + ) + results = list(self.conn.get_samples(f)) + self.assertEqual(337203685477580, results[0].counter_volume) + f = storage.SampleFilter( + meter='dummySmallCounter', + ) + results = list(self.conn.get_samples(f)) + observed_num = int(results[0].counter_volume) + self.assertEqual(-337203685477580, observed_num) + + def test_storage_can_handle_float_values(self): + f = storage.SampleFilter( + meter='floatCounter', + ) + results = list(self.conn.get_samples(f)) + self.assertEqual(1938495037.53697, results[0].counter_volume) + + +class AlarmTestBase(DBTestBase): + def add_some_alarms(self): + alarms = [alarm_models.Alarm(alarm_id='r3d', + enabled=True, + type='threshold', + name='red-alert', + description='my red-alert', + timestamp=datetime.datetime(2015, 7, + 2, 10, 25), + user_id='me', + project_id='and-da-boys', + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=['http://nowhere/alarms'], + insufficient_data_actions=[], + repeat_actions=False, + time_constraints=[dict(name='testcons', + start='0 11 * * *', + duration=300)], + rule=dict(comparison_operator='eq', + threshold=36, + statistic='count', + evaluation_periods=1, + period=60, + meter_name='test.one', + query=[{'field': 'key', + 'op': 'eq', + 'value': 'value', + 'type': 'string'}]), + ), + alarm_models.Alarm(alarm_id='0r4ng3', + enabled=True, + type='threshold', + name='orange-alert', + description='a orange', + timestamp=datetime.datetime(2015, 7, + 2, 10, 40), + user_id='me', + project_id='and-da-boys', + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=['http://nowhere/alarms'], + insufficient_data_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict(comparison_operator='gt', + threshold=75, + statistic='avg', + evaluation_periods=1, + period=60, + meter_name='test.forty', + query=[{'field': 'key2', + 'op': 'eq', + 'value': 'value2', + 'type': 'string'}]), + ), + alarm_models.Alarm(alarm_id='y3ll0w', + enabled=False, + type='threshold', + name='yellow-alert', + description='yellow', + timestamp=datetime.datetime(2015, 7, + 2, 10, 10), + user_id='me', + project_id='and-da-boys', + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=['http://nowhere/alarms'], + insufficient_data_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict(comparison_operator='lt', + threshold=10, + statistic='min', + evaluation_periods=1, + period=60, + meter_name='test.five', + query=[{'field': 'key2', + 'op': 'eq', + 'value': 'value2', + 'type': 'string'}, + {'field': + 'user_metadata.key3', + 'op': 'eq', + 'value': 'value3', + 'type': 'string'}]), + )] + + for a in alarms: + self.alarm_conn.create_alarm(a) + + +class AlarmTest(AlarmTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def test_empty(self): + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual([], alarms) + + def test_list(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(3, len(alarms)) + + def test_list_ordered_by_timestamp(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(len(alarms), 3) + alarm_l = [a.timestamp for a in alarms] + alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40), + datetime.datetime(2015, 7, 2, 10, 25), + datetime.datetime(2015, 7, 2, 10, 10)] + self.assertEqual(alarm_l_ordered, alarm_l) + + def test_list_enabled(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms(enabled=True)) + self.assertEqual(2, len(alarms)) + + def test_list_disabled(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms(enabled=False)) + self.assertEqual(1, len(alarms)) + + def test_list_by_type(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms(alarm_type='threshold')) + self.assertEqual(3, len(alarms)) + alarms = list(self.alarm_conn.get_alarms(alarm_type='combination')) + self.assertEqual(0, len(alarms)) + + def test_add(self): + self.add_some_alarms() + alarms = list(self.alarm_conn.get_alarms()) + self.assertEqual(3, len(alarms)) + + meter_names = sorted([a.rule['meter_name'] for a in alarms]) + self.assertEqual(['test.five', 'test.forty', 'test.one'], meter_names) + + def test_update(self): + self.add_some_alarms() + orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] + orange.enabled = False + orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA + query = [{'field': 'metadata.group', + 'op': 'eq', + 'value': 'test.updated', + 'type': 'string'}] + orange.rule['query'] = query + orange.rule['meter_name'] = 'new_meter_name' + updated = self.alarm_conn.update_alarm(orange) + self.assertEqual(False, updated.enabled) + self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA, + updated.state) + self.assertEqual(query, updated.rule['query']) + self.assertEqual('new_meter_name', updated.rule['meter_name']) + + def test_update_llu(self): + llu = alarm_models.Alarm(alarm_id='llu', + enabled=True, + type='threshold', + name='llu', + description='llu', + timestamp=constants.MIN_DATETIME, + user_id='bla', + project_id='ffo', + state="insufficient data", + state_timestamp=constants.MIN_DATETIME, + ok_actions=[], + alarm_actions=[], + insufficient_data_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict(comparison_operator='lt', + threshold=34, + statistic='max', + evaluation_periods=1, + period=60, + meter_name='llt', + query=[]) + ) + updated = self.alarm_conn.update_alarm(llu) + updated.state = alarm_models.Alarm.ALARM_OK + updated.description = ':)' + self.alarm_conn.update_alarm(updated) + + all = list(self.alarm_conn.get_alarms()) + self.assertEqual(1, len(all)) + + def test_delete(self): + self.add_some_alarms() + victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] + self.alarm_conn.delete_alarm(victim.alarm_id) + survivors = list(self.alarm_conn.get_alarms()) + self.assertEqual(2, len(survivors)) + for s in survivors: + self.assertNotEqual(victim.name, s.name) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') +class AlarmHistoryTest(AlarmTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(AlarmTestBase, self).setUp() + self.add_some_alarms() + self.prepare_alarm_history() + + def prepare_alarm_history(self): + alarms = list(self.alarm_conn.get_alarms()) + for alarm in alarms: + i = alarms.index(alarm) + alarm_change = { + "event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i, + "alarm_id": alarm.alarm_id, + "type": alarm_models.AlarmChange.CREATION, + "detail": "detail %s" % alarm.name, + "user_id": alarm.user_id, + "project_id": alarm.project_id, + "on_behalf_of": alarm.project_id, + "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) + } + self.alarm_conn.record_alarm_change(alarm_change=alarm_change) + + def _clear_alarm_history(self, utcnow, ttl, count): + self.mock_utcnow.return_value = utcnow + self.alarm_conn.clear_expired_alarm_history_data(ttl) + history = list(self.alarm_conn.query_alarm_history()) + self.assertEqual(count, len(history)) + + def test_clear_alarm_history_no_data_to_remove(self): + utcnow = datetime.datetime(2013, 4, 7, 7, 30) + self._clear_alarm_history(utcnow, 1, 3) + + def test_clear_some_alarm_history(self): + utcnow = datetime.datetime(2014, 4, 7, 7, 35) + self._clear_alarm_history(utcnow, 3 * 60, 1) + + def test_clear_all_alarm_history(self): + utcnow = datetime.datetime(2014, 4, 7, 7, 45) + self._clear_alarm_history(utcnow, 3 * 60, 0) + + +class ComplexAlarmQueryTest(AlarmTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def test_no_filter(self): + self.add_some_alarms() + result = list(self.alarm_conn.query_alarms()) + self.assertEqual(3, len(result)) + + def test_no_filter_with_limit(self): + self.add_some_alarms() + result = list(self.alarm_conn.query_alarms(limit=2)) + self.assertEqual(2, len(result)) + + def test_filter(self): + self.add_some_alarms() + filter_expr = {"and": + [{"or": + [{"=": {"name": "yellow-alert"}}, + {"=": {"name": "red-alert"}}]}, + {"=": {"enabled": True}}]} + + result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) + + self.assertEqual(1, len(result)) + for a in result: + self.assertIn(a.name, set(["yellow-alert", "red-alert"])) + self.assertTrue(a.enabled) + + def test_filter_with_regexp(self): + self.add_some_alarms() + filter_expr = {"and": + [{"or": [{"=": {"name": "yellow-alert"}}, + {"=": {"name": "red-alert"}}]}, + {"=~": {"description": "yel.*"}}]} + + result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) + + self.assertEqual(1, len(result)) + for a in result: + self.assertEqual("yellow", a.description) + + def test_filter_for_alarm_id(self): + self.add_some_alarms() + filter_expr = {"=": {"alarm_id": "0r4ng3"}} + + result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) + + self.assertEqual(1, len(result)) + for a in result: + self.assertEqual("0r4ng3", a.alarm_id) + + def test_filter_and_orderby(self): + self.add_some_alarms() + result = list(self.alarm_conn.query_alarms(filter_expr=( + {"=": {"enabled": True}}), + orderby=[{"name": "asc"}])) + self.assertEqual(2, len(result)) + self.assertEqual(["orange-alert", "red-alert"], + [a.name for a in result]) + for a in result: + self.assertTrue(a.enabled) + + +class ComplexAlarmHistoryQueryTest(AlarmTestBase, + tests_db.MixinTestsWithBackendScenarios): + def setUp(self): + super(DBTestBase, self).setUp() + self.filter_expr = {"and": + [{"or": + [{"=": {"type": "rule change"}}, + {"=": {"type": "state transition"}}]}, + {"=": {"alarm_id": "0r4ng3"}}]} + self.add_some_alarms() + self.prepare_alarm_history() + + def prepare_alarm_history(self): + alarms = list(self.alarm_conn.get_alarms()) + name_index = { + 'red-alert': 0, + 'orange-alert': 1, + 'yellow-alert': 2 + } + + for alarm in alarms: + i = name_index[alarm.name] + alarm_change = dict(event_id=( + "16fd2706-8baf-433b-82eb-8c7fada847c%s" % i), + alarm_id=alarm.alarm_id, + type=alarm_models.AlarmChange.CREATION, + detail="detail %s" % alarm.name, + user_id=alarm.user_id, + project_id=alarm.project_id, + on_behalf_of=alarm.project_id, + timestamp=datetime.datetime(2012, 9, 24, + 7 + i, + 30 + i)) + self.alarm_conn.record_alarm_change(alarm_change=alarm_change) + + alarm_change2 = dict(event_id=( + "16fd2706-8baf-433b-82eb-8c7fada847d%s" % i), + alarm_id=alarm.alarm_id, + type=alarm_models.AlarmChange.RULE_CHANGE, + detail="detail %s" % i, + user_id=alarm.user_id, + project_id=alarm.project_id, + on_behalf_of=alarm.project_id, + timestamp=datetime.datetime(2012, 9, 25, + 10 + i, + 30 + i)) + self.alarm_conn.record_alarm_change(alarm_change=alarm_change2) + + alarm_change3 = dict( + event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i, + alarm_id=alarm.alarm_id, + type=alarm_models.AlarmChange.STATE_TRANSITION, + detail="detail %s" % (i + 1), + user_id=alarm.user_id, + project_id=alarm.project_id, + on_behalf_of=alarm.project_id, + timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i) + ) + + if alarm.name == "red-alert": + alarm_change3['on_behalf_of'] = 'and-da-girls' + + self.alarm_conn.record_alarm_change(alarm_change=alarm_change3) + + if alarm.name in ["red-alert", "yellow-alert"]: + alarm_change4 = dict(event_id=( + "16fd2706-8baf-433b-82eb-8c7fada847f%s" + % i), + alarm_id=alarm.alarm_id, + type=alarm_models.AlarmChange.DELETION, + detail="detail %s" % (i + 2), + user_id=alarm.user_id, + project_id=alarm.project_id, + on_behalf_of=alarm.project_id, + timestamp=datetime.datetime(2012, 9, 27, + 10 + i, + 30 + i)) + self.alarm_conn.record_alarm_change(alarm_change=alarm_change4) + + def test_alarm_history_with_no_filter(self): + history = list(self.alarm_conn.query_alarm_history()) + self.assertEqual(11, len(history)) + + def test_alarm_history_with_no_filter_and_limit(self): + history = list(self.alarm_conn.query_alarm_history(limit=3)) + self.assertEqual(3, len(history)) + + def test_alarm_history_with_filter(self): + history = list( + self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr)) + self.assertEqual(2, len(history)) + + def test_alarm_history_with_regexp(self): + filter_expr = {"and": + [{"=~": {"type": "(rule)|(state)"}}, + {"=": {"alarm_id": "0r4ng3"}}]} + history = list( + self.alarm_conn.query_alarm_history(filter_expr=filter_expr)) + self.assertEqual(2, len(history)) + + def test_alarm_history_with_filter_and_orderby(self): + history = list( + self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, + orderby=[{"timestamp": + "asc"}])) + self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE, + alarm_models.AlarmChange.STATE_TRANSITION], + [h.type for h in history]) + + def test_alarm_history_with_filter_and_orderby_and_limit(self): + history = list( + self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, + orderby=[{"timestamp": + "asc"}], + limit=1)) + self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type) + + def test_alarm_history_with_on_behalf_of_filter(self): + filter_expr = {"=": {"on_behalf_of": "and-da-girls"}} + history = list(self.alarm_conn.query_alarm_history( + filter_expr=filter_expr)) + self.assertEqual(1, len(history)) + self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0", + history[0].event_id) + + def test_alarm_history_with_alarm_id_as_filter(self): + filter_expr = {"=": {"alarm_id": "r3d"}} + history = list(self.alarm_conn.query_alarm_history( + filter_expr=filter_expr, orderby=[{"timestamp": "asc"}])) + self.assertEqual(4, len(history)) + self.assertEqual([alarm_models.AlarmChange.CREATION, + alarm_models.AlarmChange.RULE_CHANGE, + alarm_models.AlarmChange.STATE_TRANSITION, + alarm_models.AlarmChange.DELETION], + [h.type for h in history]) + + +class EventTestBase(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + """Separate test base class. + + We don't want to inherit all the Meter stuff. + """ + + def setUp(self): + super(EventTestBase, self).setUp() + self.prepare_data() + + def prepare_data(self): + self.event_models = [] + base = 0 + self.start = datetime.datetime(2013, 12, 31, 5, 0) + now = self.start + for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']: + trait_models = [event_models.Trait(name, dtype, value) + for name, dtype, value in [ + ('trait_A', event_models.Trait.TEXT_TYPE, + "my_%s_text" % event_type), + ('trait_B', event_models.Trait.INT_TYPE, + base + 1), + ('trait_C', event_models.Trait.FLOAT_TYPE, + float(base) + 0.123456), + ('trait_D', event_models.Trait.DATETIME_TYPE, + now)]] + self.event_models.append( + event_models.Event("id_%s_%d" % (event_type, base), + event_type, now, trait_models, + {'status': {'nested': 'started'}})) + base += 100 + now = now + datetime.timedelta(hours=1) + self.end = now + + self.event_conn.record_events(self.event_models) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql') +class EventTTLTest(EventTestBase): + + @mock.patch.object(timeutils, 'utcnow') + def test_clear_expired_event_data(self, mock_utcnow): + mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0) + self.event_conn.clear_expired_event_data(3600) + + events = list(self.event_conn.get_events(storage.EventFilter())) + self.assertEqual(2, len(events)) + event_types = list(self.event_conn.get_event_types()) + self.assertEqual(['Bar', 'Zoo'], event_types) + for event_type in event_types: + trait_types = list(self.event_conn.get_trait_types(event_type)) + self.assertEqual(4, len(trait_types)) + traits = list(self.event_conn.get_traits(event_type)) + self.assertEqual(4, len(traits)) + + +@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'db2') +class EventTest(EventTestBase): + def test_duplicate_message_id(self): + now = datetime.datetime.utcnow() + m = [event_models.Event("1", "Foo", now, None, {}), + event_models.Event("1", "Zoo", now, [], {})] + with mock.patch('%s.LOG' % + self.event_conn.record_events.__module__) as log: + self.event_conn.record_events(m) + self.assertEqual(1, log.info.call_count) + + def test_bad_event(self): + now = datetime.datetime.utcnow() + broken_event = event_models.Event("1", "Foo", now, None, {}) + del(broken_event.__dict__['raw']) + m = [broken_event, broken_event] + with mock.patch('%s.LOG' % + self.event_conn.record_events.__module__) as log: + self.assertRaises(AttributeError, self.event_conn.record_events, m) + # ensure that record_events does not break on first error but + # delays exception and tries to record each event. + self.assertEqual(2, log.exception.call_count) + + +class GetEventTest(EventTestBase): + + def test_generated_is_datetime(self): + event_filter = storage.EventFilter(self.start, self.end) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(6, len(events)) + for i, event in enumerate(events): + self.assertIsInstance(event.generated, datetime.datetime) + self.assertEqual(event.generated, + self.event_models[i].generated) + model_traits = self.event_models[i].traits + for j, trait in enumerate(event.traits): + if trait.dtype == event_models.Trait.DATETIME_TYPE: + self.assertIsInstance(trait.value, datetime.datetime) + self.assertEqual(trait.value, model_traits[j].value) + + def test_simple_get(self): + event_filter = storage.EventFilter(self.start, self.end) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(6, len(events)) + start_time = None + for i, type in enumerate(['Foo', 'Bar', 'Zoo']): + self.assertEqual(type, events[i].event_type) + self.assertEqual(4, len(events[i].traits)) + # Ensure sorted results ... + if start_time is not None: + # Python 2.6 has no assertLess :( + self.assertTrue(start_time < events[i].generated) + start_time = events[i].generated + + def test_simple_get_event_type(self): + expected_trait_values = { + 'id_Bar_100': { + 'trait_A': 'my_Bar_text', + 'trait_B': 101, + 'trait_C': 100.123456, + 'trait_D': self.start + datetime.timedelta(hours=1) + }, + 'id_Bar_400': { + 'trait_A': 'my_Bar_text', + 'trait_B': 401, + 'trait_C': 400.123456, + 'trait_D': self.start + datetime.timedelta(hours=4) + } + } + + event_filter = storage.EventFilter(self.start, self.end, "Bar") + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Bar", events[0].event_type) + self.assertEqual("Bar", events[1].event_type) + self.assertEqual(4, len(events[0].traits)) + self.assertEqual(4, len(events[1].traits)) + for event in events: + trait_values = expected_trait_values.get(event.message_id, + None) + if not trait_values: + self.fail("Unexpected event ID returned:" % event.message_id) + + for trait in event.traits: + expected_val = trait_values.get(trait.name) + if not expected_val: + self.fail("Unexpected trait type: %s" % trait.dtype) + self.assertEqual(expected_val, trait.value) + + def test_get_event_trait_filter(self): + trait_filters = [{'key': 'trait_B', 'integer': 101}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Bar", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + + def test_get_event_trait_filter_op_string(self): + trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text', + 'op': 'eq'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Foo", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + trait_filters[0].update({'key': 'trait_A', 'op': 'lt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Bar", events[0].event_type) + trait_filters[0].update({'key': 'trait_A', 'op': 'le'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Bar", events[1].event_type) + trait_filters[0].update({'key': 'trait_A', 'op': 'ne'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Zoo", events[3].event_type) + trait_filters[0].update({'key': 'trait_A', 'op': 'gt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Zoo", events[0].event_type) + trait_filters[0].update({'key': 'trait_A', 'op': 'ge'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Foo", events[2].event_type) + + def test_get_event_trait_filter_op_integer(self): + trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Bar", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + trait_filters[0].update({'key': 'trait_B', 'op': 'lt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Foo", events[0].event_type) + trait_filters[0].update({'key': 'trait_B', 'op': 'le'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Bar", events[1].event_type) + trait_filters[0].update({'key': 'trait_B', 'op': 'ne'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(5, len(events)) + self.assertEqual("Zoo", events[4].event_type) + trait_filters[0].update({'key': 'trait_B', 'op': 'gt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Zoo", events[0].event_type) + trait_filters[0].update({'key': 'trait_B', 'op': 'ge'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(5, len(events)) + self.assertEqual("Foo", events[2].event_type) + + def test_get_event_trait_filter_op_float(self): + trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Foo", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + trait_filters[0].update({'key': 'trait_C', 'op': 'lt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(3, len(events)) + self.assertEqual("Zoo", events[2].event_type) + trait_filters[0].update({'key': 'trait_C', 'op': 'le'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Bar", events[1].event_type) + trait_filters[0].update({'key': 'trait_C', 'op': 'ne'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(5, len(events)) + self.assertEqual("Zoo", events[2].event_type) + trait_filters[0].update({'key': 'trait_C', 'op': 'gt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + self.assertEqual("Bar", events[0].event_type) + trait_filters[0].update({'key': 'trait_C', 'op': 'ge'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(3, len(events)) + self.assertEqual("Zoo", events[2].event_type) + + def test_get_event_trait_filter_op_datetime(self): + trait_filters = [{'key': 'trait_D', + 'datetime': self.start + datetime.timedelta(hours=2), + 'op': 'eq'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Zoo", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + trait_filters[0].update({'key': 'trait_D', 'op': 'lt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(2, len(events)) + trait_filters[0].update({'key': 'trait_D', 'op': 'le'}) + self.assertEqual("Bar", events[1].event_type) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(3, len(events)) + self.assertEqual("Bar", events[1].event_type) + trait_filters[0].update({'key': 'trait_D', 'op': 'ne'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(5, len(events)) + self.assertEqual("Foo", events[2].event_type) + trait_filters[0].update({'key': 'trait_D', 'op': 'gt'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(3, len(events)) + self.assertEqual("Zoo", events[2].event_type) + trait_filters[0].update({'key': 'trait_D', 'op': 'ge'}) + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(4, len(events)) + self.assertEqual("Bar", events[2].event_type) + + def test_get_event_multiple_trait_filter(self): + trait_filters = [{'key': 'trait_B', 'integer': 1}, + {'key': 'trait_A', 'string': 'my_Foo_text'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("Foo", events[0].event_type) + self.assertEqual(4, len(events[0].traits)) + + def test_get_event_multiple_trait_filter_expect_none(self): + trait_filters = [{'key': 'trait_B', 'integer': 1}, + {'key': 'trait_A', 'string': 'my_Zoo_text'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(0, len(events)) + + def test_get_event_types(self): + event_types = [e for e in + self.event_conn.get_event_types()] + + self.assertEqual(3, len(event_types)) + self.assertIn("Bar", event_types) + self.assertIn("Foo", event_types) + self.assertIn("Zoo", event_types) + + def test_get_trait_types(self): + trait_types = [tt for tt in + self.event_conn.get_trait_types("Foo")] + self.assertEqual(4, len(trait_types)) + trait_type_names = map(lambda x: x['name'], trait_types) + self.assertIn("trait_A", trait_type_names) + self.assertIn("trait_B", trait_type_names) + self.assertIn("trait_C", trait_type_names) + self.assertIn("trait_D", trait_type_names) + + def test_get_trait_types_unknown_event(self): + trait_types = [tt for tt in + self.event_conn.get_trait_types("Moo")] + self.assertEqual(0, len(trait_types)) + + def test_get_traits(self): + traits = self.event_conn.get_traits("Bar") + # format results in a way that makes them easier to work with + trait_dict = {} + for trait in traits: + trait_dict[trait.name] = trait.dtype + + self.assertIn("trait_A", trait_dict) + self.assertEqual(event_models.Trait.TEXT_TYPE, trait_dict["trait_A"]) + self.assertIn("trait_B", trait_dict) + self.assertEqual(event_models.Trait.INT_TYPE, trait_dict["trait_B"]) + self.assertIn("trait_C", trait_dict) + self.assertEqual(event_models.Trait.FLOAT_TYPE, trait_dict["trait_C"]) + self.assertIn("trait_D", trait_dict) + self.assertEqual(event_models.Trait.DATETIME_TYPE, + trait_dict["trait_D"]) + + def test_get_all_traits(self): + traits = self.event_conn.get_traits("Foo") + traits = sorted([t for t in traits], key=operator.attrgetter('dtype')) + self.assertEqual(8, len(traits)) + trait = traits[0] + self.assertEqual("trait_A", trait.name) + self.assertEqual(event_models.Trait.TEXT_TYPE, trait.dtype) + + def test_simple_get_event_no_traits(self): + new_events = [event_models.Event("id_notraits", "NoTraits", + self.start, [], {})] + self.event_conn.record_events(new_events) + event_filter = storage.EventFilter(self.start, self.end, "NoTraits") + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + self.assertEqual("id_notraits", events[0].message_id) + self.assertEqual("NoTraits", events[0].event_type) + self.assertEqual(0, len(events[0].traits)) + + def test_simple_get_no_filters(self): + event_filter = storage.EventFilter(None, None, None) + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(6, len(events)) + + def test_get_by_message_id(self): + new_events = [event_models.Event("id_testid", + "MessageIDTest", + self.start, + [], {})] + + self.event_conn.record_events(new_events) + event_filter = storage.EventFilter(message_id="id_testid") + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertEqual(1, len(events)) + event = events[0] + self.assertEqual("id_testid", event.message_id) + + def test_simple_get_raw(self): + event_filter = storage.EventFilter() + events = [event for event in self.event_conn.get_events(event_filter)] + self.assertTrue(events) + self.assertEqual({'status': {'nested': 'started'}}, events[0].raw) + + def test_trait_type_enforced_on_none(self): + new_events = [event_models.Event( + "id_testid", "MessageIDTest", self.start, + [event_models.Trait('text', event_models.Trait.TEXT_TYPE, ''), + event_models.Trait('int', event_models.Trait.INT_TYPE, 0), + event_models.Trait('float', event_models.Trait.FLOAT_TYPE, 0.0)], + {})] + self.event_conn.record_events(new_events) + event_filter = storage.EventFilter(message_id="id_testid") + events = [event for event in self.event_conn.get_events(event_filter)] + options = [(event_models.Trait.TEXT_TYPE, ''), + (event_models.Trait.INT_TYPE, 0.0), + (event_models.Trait.FLOAT_TYPE, 0.0)] + for trait in events[0].traits: + options.remove((trait.dtype, trait.value)) + + +class BigIntegerTest(tests_db.TestBase, + tests_db.MixinTestsWithBackendScenarios): + def test_metadata_bigint(self): + metadata = {'bigint': 99999999999999} + s = sample.Sample(name='name', + type=sample.TYPE_GAUGE, + unit='B', + volume=1, + user_id='user-id', + project_id='project-id', + resource_id='resource-id', + timestamp=datetime.datetime.utcnow(), + resource_metadata=metadata) + msg = utils.meter_message_from_counter( + s, self.CONF.publisher.telemetry_secret) + self.conn.record_metering_data(msg) + + +@tests_db.run_with('mongodb') +class MongoAutoReconnectTest(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + + def setUp(self): + super(MongoAutoReconnectTest, self).setUp() + self.CONF.set_override('retry_interval', 0, group='database') + + def test_mongo_client(self): + if cfg.CONF.database.mongodb_replica_set: + self.assertIsInstance(self.conn.conn.conn, + pymongo.MongoReplicaSetClient) + else: + self.assertIsInstance(self.conn.conn.conn, + pymongo.MongoClient) + + def test_mongo_cursor_next(self): + expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) + raise_exc = [False, True] + method = self.conn.db.resource.find().cursor.next + with mock.patch('pymongo.cursor.Cursor.next', + mock.Mock()) as mock_next: + mock_next.side_effect = self.create_side_effect( + method, pymongo.errors.AutoReconnect, raise_exc) + resource = self.conn.db.resource.find().next() + self.assertEqual(expected_first_sample_timestamp, + resource['first_sample_timestamp']) + + def test_mongo_insert(self): + raise_exc = [False, True] + method = self.conn.db.meter.insert + + with mock.patch('pymongo.collection.Collection.insert', + mock.Mock(return_value=method)) as mock_insert: + mock_insert.side_effect = self.create_side_effect( + method, pymongo.errors.AutoReconnect, raise_exc) + mock_insert.__name__ = 'insert' + self.create_and_store_sample( + timestamp=datetime.datetime(2014, 10, 15, 14, 39), + source='test-proxy') + meters = list(self.conn.db.meter.find()) + self.assertEqual(12, len(meters)) + + def test_mongo_find_and_modify(self): + raise_exc = [False, True] + method = self.conn.db.resource.find_and_modify + + with mock.patch('pymongo.collection.Collection.find_and_modify', + mock.Mock()) as mock_fam: + mock_fam.side_effect = self.create_side_effect( + method, pymongo.errors.AutoReconnect, raise_exc) + mock_fam.__name__ = 'find_and_modify' + self.create_and_store_sample( + timestamp=datetime.datetime(2014, 10, 15, 14, 39), + source='test-proxy') + data = self.conn.db.resource.find( + {'last_sample_timestamp': + datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] + self.assertEqual('test-proxy', data) + + def test_mongo_update(self): + raise_exc = [False, True] + method = self.conn.db.resource.update + + with mock.patch('pymongo.collection.Collection.update', + mock.Mock()) as mock_update: + mock_update.side_effect = self.create_side_effect( + method, pymongo.errors.AutoReconnect, raise_exc) + mock_update.__name__ = 'update' + self.create_and_store_sample( + timestamp=datetime.datetime(2014, 10, 15, 17, 39), + source='test-proxy-update') + data = self.conn.db.resource.find( + {'last_sample_timestamp': + datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] + self.assertEqual('test-proxy-update', data) + + +@tests_db.run_with('mongodb') +class MongoTimeToLiveTest(DBTestBase, tests_db.MixinTestsWithBackendScenarios): + + def test_ensure_index(self): + cfg.CONF.set_override('metering_time_to_live', 5, group='database') + self.conn.upgrade() + self.assertEqual(5, self.conn.db.resource.index_information() + ['resource_ttl']['expireAfterSeconds']) + self.assertEqual(5, self.conn.db.meter.index_information() + ['meter_ttl']['expireAfterSeconds']) + + def test_modification_of_index(self): + cfg.CONF.set_override('metering_time_to_live', 5, group='database') + self.conn.upgrade() + cfg.CONF.set_override('metering_time_to_live', 15, group='database') + self.conn.upgrade() + self.assertEqual(15, self.conn.db.resource.index_information() + ['resource_ttl']['expireAfterSeconds']) + self.assertEqual(15, self.conn.db.meter.index_information() + ['meter_ttl']['expireAfterSeconds']) + + +class TestRecordUnicodeSamples(DBTestBase, + tests_db.MixinTestsWithBackendScenarios): + def prepare_data(self): + self.msgs = [] + self.msgs.append(self.create_and_store_sample( + name=u'meter.accent\xe9\u0437', + metadata={u"metadata_key\xe9\u0437": "test", + u"metadata_key": u"test\xe9\u0437"}, + )) + + def test_unicode_sample(self): + f = storage.SampleFilter() + results = list(self.conn.get_samples(f)) + self.assertEqual(1, len(results)) + expected = self.msgs[0] + actual = results[0].as_dict() + self.assertEqual(expected['counter_name'], actual['counter_name']) + self.assertEqual(expected['resource_metadata'], + actual['resource_metadata']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/test_bin.py ceilometer-5.0.0~b3/ceilometer/tests/functional/test_bin.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/test_bin.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/test_bin.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,298 @@ +#!/usr/bin/env python +# +# Copyright 2012 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +import random +import socket +import subprocess +import time + +import httplib2 +from oslo_utils import fileutils +import six + +from ceilometer.tests import base + + +class BinTestCase(base.BaseTestCase): + def setUp(self): + super(BinTestCase, self).setUp() + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "[database]\n" + "connection=log://localhost\n") + if six.PY3: + content = content.encode('utf-8') + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='ceilometer', + suffix='.conf') + + def tearDown(self): + super(BinTestCase, self).tearDown() + os.remove(self.tempfile) + + def test_dbsync_run(self): + subp = subprocess.Popen(['ceilometer-dbsync', + "--config-file=%s" % self.tempfile]) + self.assertEqual(0, subp.wait()) + + def test_run_expirer_ttl_disabled(self): + subp = subprocess.Popen(['ceilometer-expirer', + '-d', + "--config-file=%s" % self.tempfile], + stderr=subprocess.PIPE) + __, err = subp.communicate() + self.assertEqual(0, subp.poll()) + self.assertIn(b"Nothing to clean, database metering " + b"time to live is disabled", err) + self.assertIn(b"Nothing to clean, database event " + b"time to live is disabled", err) + self.assertIn(b"Nothing to clean, database alarm history " + b"time to live is disabled", err) + + def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "[database]\n" + "%s=1\n" + "connection=log://localhost\n" % ttl_name) + if six.PY3: + content = content.encode('utf-8') + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='ceilometer', + suffix='.conf') + subp = subprocess.Popen(['ceilometer-expirer', + '-d', + "--config-file=%s" % self.tempfile], + stderr=subprocess.PIPE) + __, err = subp.communicate() + self.assertEqual(0, subp.poll()) + msg = "Dropping %s data with TTL 1" % data_name + if six.PY3: + msg = msg.encode('utf-8') + self.assertIn(msg, err) + + def test_run_expirer_ttl_enabled(self): + self._test_run_expirer_ttl_enabled('metering_time_to_live', + 'metering') + self._test_run_expirer_ttl_enabled('time_to_live', 'metering') + self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') + self._test_run_expirer_ttl_enabled('alarm_history_time_to_live', + 'alarm history') + + +class BinSendSampleTestCase(base.BaseTestCase): + def setUp(self): + super(BinSendSampleTestCase, self).setUp() + pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) + if six.PY3: + content = content.encode('utf-8') + + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='ceilometer', + suffix='.conf') + + def tearDown(self): + super(BinSendSampleTestCase, self).tearDown() + os.remove(self.tempfile) + + def test_send_counter_run(self): + subp = subprocess.Popen(['ceilometer-send-sample', + "--config-file=%s" % self.tempfile, + "--sample-resource=someuuid", + "--sample-name=mycounter"]) + self.assertEqual(0, subp.wait()) + + +class BinApiTestCase(base.BaseTestCase): + + def setUp(self): + super(BinApiTestCase, self).setUp() + # create api_paste.ini file without authentication + content = ("[pipeline:main]\n" + "pipeline = api-server\n" + "[app:api-server]\n" + "paste.app_factory = ceilometer.api.app:app_factory\n") + if six.PY3: + content = content.encode('utf-8') + self.paste = fileutils.write_to_tempfile(content=content, + prefix='api_paste', + suffix='.ini') + + # create ceilometer.conf file + self.api_port = random.randint(10000, 11000) + self.http = httplib2.Http(proxy_info=None) + self.pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') + self.policy_file = self.path_get('etc/ceilometer/policy.json') + + def tearDown(self): + super(BinApiTestCase, self).tearDown() + try: + self.subp.kill() + self.subp.wait() + except OSError: + pass + os.remove(self.tempfile) + + def get_response(self, path): + url = 'http://%s:%d/%s' % ('127.0.0.1', self.api_port, path) + + for x in range(10): + try: + r, c = self.http.request(url, 'GET') + except socket.error: + time.sleep(.5) + self.assertIsNone(self.subp.poll()) + else: + return r, c + return None, None + + def run_api(self, content, err_pipe=None): + if six.PY3: + content = content.encode('utf-8') + + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='ceilometer', + suffix='.conf') + if err_pipe: + return subprocess.Popen(['ceilometer-api', + "--config-file=%s" % self.tempfile], + stderr=subprocess.PIPE) + else: + return subprocess.Popen(['ceilometer-api', + "--config-file=%s" % self.tempfile]) + + def test_v2(self): + + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "auth_strategy=noauth\n" + "debug=true\n" + "pipeline_cfg_file={0}\n" + "api_paste_config={2}\n" + "[api]\n" + "port={3}\n" + "[oslo_policy]\n" + "policy_file={1}\n" + "[database]\n" + "connection=log://localhost\n". + format(self.pipeline_cfg_file, + self.policy_file, + self.paste, + self.api_port)) + + self.subp = self.run_api(content) + + response, content = self.get_response('v2/meters') + self.assertEqual(200, response.status) + if six.PY3: + content = content.decode('utf-8') + self.assertEqual([], json.loads(content)) + + def test_v2_with_bad_storage_conn(self): + + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "auth_strategy=noauth\n" + "debug=true\n" + "pipeline_cfg_file={0}\n" + "policy_file={1}\n" + "api_paste_config={2}\n" + "[api]\n" + "port={3}\n" + "[database]\n" + "max_retries=1\n" + "alarm_connection=log://localhost\n" + "connection=dummy://localhost\n". + format(self.pipeline_cfg_file, + self.policy_file, + self.paste, + self.api_port)) + + self.subp = self.run_api(content, err_pipe=True) + + response, content = self.get_response('v2/alarms') + self.assertEqual(200, response.status) + if six.PY3: + content = content.decode('utf-8') + self.assertEqual([], json.loads(content)) + + response, content = self.get_response('v2/meters') + self.assertEqual(500, response.status) + + def test_v2_with_all_bad_conns(self): + + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "auth_strategy=noauth\n" + "debug=true\n" + "pipeline_cfg_file={0}\n" + "policy_file={1}\n" + "api_paste_config={2}\n" + "[api]\n" + "port={3}\n" + "[database]\n" + "max_retries=1\n" + "alarm_connection=dummy://localhost\n" + "connection=dummy://localhost\n" + "event_connection=dummy://localhost\n". + format(self.pipeline_cfg_file, + self.policy_file, + self.paste, + self.api_port)) + + self.subp = self.run_api(content, err_pipe=True) + + __, err = self.subp.communicate() + + self.assertIn(b"Api failed to start. Failed to connect to" + b" databases, purpose: metering, event, alarm", err) + + +class BinCeilometerPollingServiceTestCase(base.BaseTestCase): + def setUp(self): + super(BinCeilometerPollingServiceTestCase, self).setUp() + content = ("[DEFAULT]\n" + "rpc_backend=fake\n" + "[database]\n" + "connection=log://localhost\n") + if six.PY3: + content = content.encode('utf-8') + self.tempfile = fileutils.write_to_tempfile(content=content, + prefix='ceilometer', + suffix='.conf') + self.subp = None + + def tearDown(self): + super(BinCeilometerPollingServiceTestCase, self).tearDown() + if self.subp: + self.subp.kill() + os.remove(self.tempfile) + + def test_starting_with_duplication_namespaces(self): + self.subp = subprocess.Popen(['ceilometer-polling', + "--config-file=%s" % self.tempfile, + "--polling-namespaces", + "compute", + "compute"], + stderr=subprocess.PIPE) + out = self.subp.stderr.read(1024) + self.assertIn(b'Duplicated values: [\'compute\', \'compute\'] ' + b'found in CLI options, auto de-duplidated', out) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/test_collector.py ceilometer-5.0.0~b3/ceilometer/tests/functional/test_collector.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/test_collector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/test_collector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,304 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import socket + +import mock +import msgpack +from oslo_config import fixture as fixture_config +from oslo_context import context +import oslo_messaging +from oslo_utils import timeutils +from oslotest import mockpatch +from stevedore import extension + +from ceilometer import collector +from ceilometer import dispatcher +from ceilometer import messaging +from ceilometer.publisher import utils +from ceilometer import sample +from ceilometer.tests import base as tests_base + + +class FakeException(Exception): + pass + + +class FakeConnection(object): + def create_worker(self, topic, proxy, pool_name): + pass + + +class TestCollector(tests_base.BaseTestCase): + def setUp(self): + super(TestCollector, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.import_opt("connection", "oslo_db.options", group="database") + self.CONF.set_override("connection", "log://", group='database') + self.CONF.set_override('telemetry_secret', 'not-so-secret', + group='publisher') + self._setup_messaging() + + self.counter = sample.Sample( + name='foobar', + type='bad', + unit='F', + volume=1, + user_id='jd', + project_id='ceilometer', + resource_id='cat', + timestamp=timeutils.utcnow().isoformat(), + resource_metadata={}, + ).as_dict() + + self.utf8_msg = utils.meter_message_from_counter( + sample.Sample( + name=u'test', + type=sample.TYPE_CUMULATIVE, + unit=u'', + volume=1, + user_id=u'test', + project_id=u'test', + resource_id=u'test_run_tasks', + timestamp=timeutils.utcnow().isoformat(), + resource_metadata={u'name': [([u'TestPublish'])]}, + source=u'testsource', + ), + 'not-so-secret') + + self.srv = collector.CollectorService() + + self.useFixture(mockpatch.PatchObject( + self.srv.tg, 'add_thread', + side_effect=self._dummy_thread_group_add_thread)) + + @staticmethod + def _dummy_thread_group_add_thread(method): + method() + + def _setup_messaging(self, enabled=True): + if enabled: + self.setup_messaging(self.CONF) + else: + self.useFixture(mockpatch.Patch( + 'ceilometer.messaging.get_transport', + return_value=None)) + + def _setup_fake_dispatcher(self): + plugin = mock.MagicMock() + fake_dispatcher = extension.ExtensionManager.make_test_instance([ + extension.Extension('test', None, None, plugin,), + ], propagate_map_exceptions=True) + self.useFixture(mockpatch.Patch( + 'ceilometer.dispatcher.load_dispatcher_manager', + return_value=fake_dispatcher)) + return plugin + + def _make_fake_socket(self, sample): + def recvfrom(size): + # Make the loop stop + self.srv.stop() + return msgpack.dumps(sample), ('127.0.0.1', 12345) + + sock = mock.Mock() + sock.recvfrom = recvfrom + return sock + + def _verify_udp_socket(self, udp_socket): + conf = self.CONF.collector + udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + udp_socket.bind.assert_called_once_with((conf.udp_address, + conf.udp_port)) + + def test_record_metering_data(self): + mock_dispatcher = self._setup_fake_dispatcher() + self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() + self.srv.record_metering_data(None, self.counter) + mock_dispatcher.record_metering_data.assert_called_once_with( + data=self.counter) + + def test_udp_receive_base(self): + self._setup_messaging(False) + mock_dispatcher = self._setup_fake_dispatcher() + self.counter['source'] = 'mysource' + self.counter['counter_name'] = self.counter['name'] + self.counter['counter_volume'] = self.counter['volume'] + self.counter['counter_type'] = self.counter['type'] + self.counter['counter_unit'] = self.counter['unit'] + + udp_socket = self._make_fake_socket(self.counter) + + with mock.patch('socket.socket') as mock_socket: + mock_socket.return_value = udp_socket + self.srv.start() + mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) + + self._verify_udp_socket(udp_socket) + + mock_dispatcher.record_metering_data.assert_called_once_with( + self.counter) + + def test_udp_socket_ipv6(self): + self._setup_messaging(False) + self.CONF.set_override('udp_address', '::1', group='collector') + self._setup_fake_dispatcher() + sock = self._make_fake_socket('data') + + with mock.patch.object(socket, 'socket') as mock_socket: + mock_socket.return_value = sock + self.srv.start() + mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) + + def test_udp_receive_storage_error(self): + self._setup_messaging(False) + mock_dispatcher = self._setup_fake_dispatcher() + mock_dispatcher.record_metering_data.side_effect = self._raise_error + + self.counter['source'] = 'mysource' + self.counter['counter_name'] = self.counter['name'] + self.counter['counter_volume'] = self.counter['volume'] + self.counter['counter_type'] = self.counter['type'] + self.counter['counter_unit'] = self.counter['unit'] + + udp_socket = self._make_fake_socket(self.counter) + with mock.patch('socket.socket', return_value=udp_socket): + self.srv.start() + + self._verify_udp_socket(udp_socket) + + mock_dispatcher.record_metering_data.assert_called_once_with( + self.counter) + + @staticmethod + def _raise_error(*args, **kwargs): + raise Exception + + def test_udp_receive_bad_decoding(self): + self._setup_messaging(False) + udp_socket = self._make_fake_socket(self.counter) + with mock.patch('socket.socket', return_value=udp_socket): + with mock.patch('msgpack.loads', self._raise_error): + self.srv.start() + + self._verify_udp_socket(udp_socket) + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') + @mock.patch.object(collector.CollectorService, 'start_udp') + def test_only_udp(self, udp_start, rpc_start): + """Check that only UDP is started if messaging transport is unset.""" + self._setup_messaging(False) + udp_socket = self._make_fake_socket(self.counter) + with mock.patch('socket.socket', return_value=udp_socket): + self.srv.start() + self.assertEqual(0, rpc_start.call_count) + self.assertEqual(1, udp_start.call_count) + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') + @mock.patch.object(collector.CollectorService, 'start_udp') + def test_only_rpc(self, udp_start, rpc_start): + """Check that only RPC is started if udp_address is empty.""" + self.CONF.set_override('enable_rpc', True, group='collector') + self.CONF.set_override('udp_address', '', group='collector') + self.srv.start() + # two calls because two servers (notification and rpc) + self.assertEqual(2, rpc_start.call_count) + self.assertEqual(0, udp_start.call_count) + + def test_udp_receive_valid_encoding(self): + self._setup_messaging(False) + mock_dispatcher = self._setup_fake_dispatcher() + self.data_sent = [] + with mock.patch('socket.socket', + return_value=self._make_fake_socket(self.utf8_msg)): + self.srv.start() + self.assertTrue(utils.verify_signature( + mock_dispatcher.method_calls[0][1][0], + "not-so-secret")) + + @mock.patch('ceilometer.storage.impl_log.LOG') + def test_collector_no_mock(self, mylog): + self.CONF.set_override('enable_rpc', True, group='collector') + self.CONF.set_override('udp_address', '', group='collector') + self.srv.start() + mylog.info.side_effect = lambda *args: self.srv.stop() + + client = messaging.get_rpc_client(self.transport, version='1.0') + cclient = client.prepare(topic='metering') + cclient.cast(context.RequestContext(), + 'record_metering_data', data=[self.utf8_msg]) + + self.srv.rpc_server.wait() + mylog.info.assert_called_once_with( + 'metering data test for test_run_tasks: 1') + + def _test_collector_requeue(self, listener): + + mock_dispatcher = self._setup_fake_dispatcher() + self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() + mock_dispatcher.record_metering_data.side_effect = Exception('boom') + mock_dispatcher.record_events.side_effect = Exception('boom') + + self.srv.start() + endp = getattr(self.srv, listener).dispatcher.endpoints[0] + ret = endp.sample({}, 'pub_id', 'event', {}, {}) + self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, + ret) + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.Mock()) + @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) + def test_collector_sample_requeue(self): + self.CONF.set_override('requeue_sample_on_dispatcher_error', True, + group='collector') + self._test_collector_requeue('sample_listener') + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.Mock()) + @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) + def test_collector_event_requeue(self): + self.CONF.set_override('requeue_event_on_dispatcher_error', True, + group='collector') + self.CONF.set_override('store_events', True, group='notification') + self._test_collector_requeue('event_listener') + + def _test_collector_no_requeue(self, listener): + mock_dispatcher = self._setup_fake_dispatcher() + self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() + mock_dispatcher.record_metering_data.side_effect = (FakeException + ('boom')) + mock_dispatcher.record_events.side_effect = (FakeException + ('boom')) + + self.srv.start() + endp = getattr(self.srv, listener).dispatcher.endpoints[0] + self.assertRaises(FakeException, endp.sample, {}, 'pub_id', + 'event', {}, {}) + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.Mock()) + @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) + def test_collector_sample_no_requeue(self): + self.CONF.set_override('requeue_sample_on_dispatcher_error', False, + group='collector') + self._test_collector_no_requeue('sample_listener') + + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.Mock()) + @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) + def test_collector_event_no_requeue(self): + self.CONF.set_override('requeue_event_on_dispatcher_error', False, + group='collector') + self.CONF.set_override('store_events', True, group='notification') + self._test_collector_no_requeue('event_listener') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/test_empty.py ceilometer-5.0.0~b3/ceilometer/tests/functional/test_empty.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/test_empty.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/test_empty.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" This test is written to avoid failing running the functional test job -caused by `subunit-trace -f` with empty testr output -in ./tools/pretty-tox.sh""" - -from oslotest import base - - -class TestEmpty(base.BaseTestCase): - def test_empty(self): - pass diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/functional/test_notification.py ceilometer-5.0.0~b3/ceilometer/tests/functional/test_notification.py --- ceilometer-5.0.0~b2/ceilometer/tests/functional/test_notification.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/functional/test_notification.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,551 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for Ceilometer notify daemon.""" + +import shutil + +import eventlet +import mock +from oslo_config import fixture as fixture_config +from oslo_context import context +import oslo_messaging +import oslo_messaging.conffixture +import oslo_service.service +from oslo_utils import fileutils +from oslo_utils import timeutils +import six +from stevedore import extension +import yaml + +from ceilometer.compute.notifications import instance +from ceilometer import messaging +from ceilometer import notification +from ceilometer.publisher import test as test_publisher +from ceilometer import service +from ceilometer.tests import base as tests_base + +TEST_NOTICE_CTXT = { + u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'is_admin': True, + u'project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'quota_class': None, + u'read_deleted': u'no', + u'remote_address': u'10.0.2.15', + u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', + u'roles': [u'admin'], + u'timestamp': u'2012-05-08T20:23:41.425105', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', +} + +TEST_NOTICE_METADATA = { + u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', + u'timestamp': u'2012-05-08 20:23:48.028195', +} + +TEST_NOTICE_PAYLOAD = { + u'created_at': u'2012-05-08 20:23:41', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'fixed_ips': [{u'address': u'10.0.0.2', + u'floating_ips': [], + u'meta': {}, + u'type': u'fixed', + u'version': 4}], + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-08 20:23:47.985999', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', +} + + +class TestNotification(tests_base.BaseTestCase): + + def setUp(self): + super(TestNotification, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override("connection", "log://", group='database') + self.CONF.set_override("store_events", False, group="notification") + self.CONF.set_override("disable_non_metric_meters", False, + group="notification") + self.setup_messaging(self.CONF) + self.srv = notification.NotificationService() + + def fake_get_notifications_manager(self, pm): + self.plugin = instance.Instance(pm) + return extension.ExtensionManager.make_test_instance( + [ + extension.Extension('test', + None, + None, + self.plugin) + ] + ) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.MagicMock()) + @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') + def _do_process_notification_manager_start(self, + fake_event_endpoint_class): + with mock.patch.object(self.srv, + '_get_notifications_manager') as get_nm: + get_nm.side_effect = self.fake_get_notifications_manager + self.srv.start() + self.fake_event_endpoint = fake_event_endpoint_class.return_value + + def test_start_multiple_listeners(self): + urls = ["fake://vhost1", "fake://vhost2"] + self.CONF.set_override("messaging_urls", urls, group="notification") + self._do_process_notification_manager_start() + self.assertEqual(2, len(self.srv.listeners)) + + def test_process_notification(self): + self._do_process_notification_manager_start() + self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() + + self.plugin.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', + 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) + + self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) + self.assertTrue(self.srv.pipeline_manager.publisher.called) + + def test_process_notification_no_events(self): + self._do_process_notification_manager_start() + self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) + self.assertNotEqual(self.fake_event_endpoint, + self.srv.listeners[0].dispatcher.endpoints[0]) + + @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) + def test_process_notification_with_events(self): + self.CONF.set_override("store_events", True, group="notification") + self._do_process_notification_manager_start() + self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) + self.assertEqual(self.fake_event_endpoint, + self.srv.listeners[0].dispatcher.endpoints[0]) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', + mock.MagicMock()) + @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') + def test_unique_consumers(self, fake_event_endpoint_class): + + def fake_get_notifications_manager_dup_targets(pm): + plugin = instance.Instance(pm) + return extension.ExtensionManager.make_test_instance( + [extension.Extension('test', None, None, plugin), + extension.Extension('test', None, None, plugin)]) + + with mock.patch.object(self.srv, + '_get_notifications_manager') as get_nm: + get_nm.side_effect = fake_get_notifications_manager_dup_targets + self.srv.start() + self.assertEqual(1, len(self.srv.listeners[0].dispatcher.targets)) + + +class BaseRealNotification(tests_base.BaseTestCase): + def setup_pipeline(self, counter_names): + pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 5, + 'meters': counter_names, + 'sinks': ['test_sink'] + }], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ['test://'] + }] + }) + if six.PY3: + pipeline = pipeline.encode('utf-8') + + pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, + prefix="pipeline", + suffix="yaml") + return pipeline_cfg_file + + def setUp(self): + super(BaseRealNotification, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + # Dummy config file to avoid looking for system config + service.prepare_service(argv=[], config_files=[]) + self.setup_messaging(self.CONF, 'nova') + + pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + + self.expected_samples = 2 + + self.CONF.set_override("store_events", True, group="notification") + self.CONF.set_override("disable_non_metric_meters", False, + group="notification") + ev_pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_event', + 'events': ['compute.instance.*'], + 'sinks': ['test_sink'] + }], + 'sinks': [{ + 'name': 'test_sink', + 'publishers': ['test://'] + }] + }) + if six.PY3: + ev_pipeline = ev_pipeline.encode('utf-8') + self.expected_events = 1 + ev_pipeline_cfg_file = fileutils.write_to_tempfile( + content=ev_pipeline, prefix="event_pipeline", suffix="yaml") + self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) + self.CONF.set_override( + "definitions_cfg_file", + self.path_get('etc/ceilometer/event_definitions.yaml'), + group='event') + self.publisher = test_publisher.TestPublisher("") + + def _check_notification_service(self): + self.srv.start() + + notifier = messaging.get_notifier(self.transport, + "compute.vagrant-precise") + notifier.info(context.RequestContext(), 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD) + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if (len(self.publisher.samples) >= self.expected_samples and + len(self.publisher.events) >= self.expected_events): + break + eventlet.sleep(0) + self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners) + self.srv.stop() + + resources = list(set(s.resource_id for s in self.publisher.samples)) + self.assertEqual(self.expected_samples, len(self.publisher.samples)) + self.assertEqual(self.expected_events, len(self.publisher.events)) + self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) + + +class TestRealNotificationReloadablePipeline(BaseRealNotification): + + def setUp(self): + super(TestRealNotificationReloadablePipeline, self).setUp() + self.CONF.set_override('refresh_pipeline_cfg', True) + self.CONF.set_override('pipeline_polling_interval', 1) + self.srv = notification.NotificationService() + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_pipeline_poller(self, fake_publisher_cls): + fake_publisher_cls.return_value = self.publisher + self.srv.tg = mock.MagicMock() + self.srv.start() + + pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) + self.assertIn(pipeline_poller_call, + self.srv.tg.add_timer.call_args_list) + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_reloaded_pipeline(self, fake_publisher_cls): + fake_publisher_cls.return_value = self.publisher + + pipeline_cfg_file = self.setup_pipeline(['instance']) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + + self.expected_samples = 1 + self.srv.start() + + notifier = messaging.get_notifier(self.transport, + "compute.vagrant-precise") + notifier.info(context.RequestContext(), 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD) + + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if (len(self.publisher.samples) >= self.expected_samples and + len(self.publisher.events) >= self.expected_events): + break + eventlet.sleep(0) + + self.assertEqual(self.expected_samples, len(self.publisher.samples)) + + # Flush publisher samples to test reloading + self.publisher.samples = [] + # Modify the collection targets + updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', + 'disk.root.size']) + # Move/re-name the updated pipeline file to the original pipeline + # file path as recorded in oslo config + shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) + + self.expected_samples = 2 + # Random sleep to let the pipeline poller complete the reloading + eventlet.sleep(3) + # Send message again to verify the reload works + notifier = messaging.get_notifier(self.transport, + "compute.vagrant-precise") + notifier.info(context.RequestContext(), 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD) + + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if (len(self.publisher.samples) >= self.expected_samples and + len(self.publisher.events) >= self.expected_events): + break + eventlet.sleep(0) + + self.assertEqual(self.expected_samples, len(self.publisher.samples)) + + (self.assertIn(sample.name, ['disk.root.size', 'vcpus']) + for sample in self.publisher.samples) + + +class TestRealNotification(BaseRealNotification): + + def setUp(self): + super(TestRealNotification, self).setUp() + self.srv = notification.NotificationService() + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_service(self, fake_publisher_cls): + fake_publisher_cls.return_value = self.publisher + self._check_notification_service() + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_service_error_topic(self, fake_publisher_cls): + fake_publisher_cls.return_value = self.publisher + self.srv.start() + notifier = messaging.get_notifier(self.transport, + 'compute.vagrant-precise') + notifier.error(context.RequestContext(), 'compute.instance.error', + TEST_NOTICE_PAYLOAD) + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if len(self.publisher.events) >= self.expected_events: + break + eventlet.sleep(0) + self.srv.stop() + self.assertEqual(self.expected_events, len(self.publisher.events)) + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_disable_non_metrics(self, fake_publisher_cls): + self.CONF.set_override("disable_non_metric_meters", True, + group="notification") + # instance is a not a metric. we should only get back memory + self.expected_samples = 1 + fake_publisher_cls.return_value = self.publisher + self._check_notification_service() + self.assertEqual('memory', self.publisher.samples[0].name) + + @mock.patch.object(oslo_service.service.Service, 'stop') + def test_notification_service_start_abnormal(self, mocked): + try: + self.srv.stop() + except Exception: + pass + self.assertEqual(1, mocked.call_count) + + +class TestRealNotificationHA(BaseRealNotification): + + def setUp(self): + super(TestRealNotificationHA, self).setUp() + self.CONF.set_override('workload_partitioning', True, + group='notification') + self.srv = notification.NotificationService() + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_notification_service(self, fake_publisher_cls): + fake_publisher_cls.return_value = self.publisher + self._check_notification_service() + + def test_reset_listeners_on_refresh(self): + self.srv.start() + self.assertEqual(2, len(self.srv.pipeline_listeners)) + self.srv._refresh_listeners() + self.assertEqual(2, len(self.srv.pipeline_listeners)) + self.srv.stop() + + @mock.patch('oslo_messaging.Notifier.sample') + def test_broadcast_to_relevant_pipes_only(self, mock_notifier): + self.srv.start() + for endpoint in self.srv.listeners[0].dispatcher.endpoints: + if (hasattr(endpoint, 'filter_rule') and + not endpoint.filter_rule.match(None, None, 'nonmatching.end', + None, None)): + continue + endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', + 'nonmatching.end', + TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) + self.assertFalse(mock_notifier.called) + for endpoint in self.srv.listeners[0].dispatcher.endpoints: + if (hasattr(endpoint, 'filter_rule') and + not endpoint.filter_rule.match(None, None, + 'compute.instance.create.end', + None, None)): + continue + endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', + 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) + self.assertTrue(mock_notifier.called) + self.assertEqual(3, mock_notifier.call_count) + self.assertEqual('pipeline.event', + mock_notifier.call_args_list[0][1]['event_type']) + self.assertEqual('ceilometer.pipeline', + mock_notifier.call_args_list[1][1]['event_type']) + self.assertEqual('ceilometer.pipeline', + mock_notifier.call_args_list[2][1]['event_type']) + self.srv.stop() + + +class TestRealNotificationMultipleAgents(tests_base.BaseTestCase): + def setup_pipeline(self, transformers): + pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 5, + 'meters': ['instance', 'memory'], + 'sinks': ['test_sink'] + }], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': transformers, + 'publishers': ['test://'] + }] + }) + if six.PY3: + pipeline = pipeline.encode('utf-8') + + pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, + prefix="pipeline", + suffix="yaml") + return pipeline_cfg_file + + def setUp(self): + super(TestRealNotificationMultipleAgents, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + service.prepare_service(argv=[], config_files=[]) + self.setup_messaging(self.CONF, 'nova') + + pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + self.CONF.set_override("store_events", False, group="notification") + self.CONF.set_override("disable_non_metric_meters", False, + group="notification") + self.CONF.set_override('workload_partitioning', True, + group='notification') + self.publisher = test_publisher.TestPublisher("") + self.publisher2 = test_publisher.TestPublisher("") + + def _check_notifications(self, fake_publisher_cls): + fake_publisher_cls.side_effect = [self.publisher, self.publisher2] + + self.srv = notification.NotificationService() + self.srv2 = notification.NotificationService() + with mock.patch('ceilometer.coordination.PartitionCoordinator' + '._get_members', return_value=['harry', 'lloyd']): + with mock.patch('uuid.uuid4', return_value='harry'): + self.srv.start() + with mock.patch('uuid.uuid4', return_value='lloyd'): + self.srv2.start() + + notifier = messaging.get_notifier(self.transport, + "compute.vagrant-precise") + payload1 = TEST_NOTICE_PAYLOAD.copy() + payload1['instance_id'] = '0' + notifier.info(context.RequestContext(), 'compute.instance.create.end', + payload1) + payload2 = TEST_NOTICE_PAYLOAD.copy() + payload2['instance_id'] = '1' + notifier.info(context.RequestContext(), 'compute.instance.create.end', + payload2) + self.expected_samples = 4 + start = timeutils.utcnow() + with mock.patch('six.moves.builtins.hash', lambda x: int(x)): + while timeutils.delta_seconds(start, timeutils.utcnow()) < 60: + if (len(self.publisher.samples + self.publisher2.samples) >= + self.expected_samples): + break + eventlet.sleep(0) + self.srv.stop() + self.srv2.stop() + + self.assertEqual(2, len(self.publisher.samples)) + self.assertEqual(2, len(self.publisher2.samples)) + self.assertEqual(1, len(set( + s.resource_id for s in self.publisher.samples))) + self.assertEqual(1, len(set( + s.resource_id for s in self.publisher2.samples))) + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_multiple_agents_no_transform(self, fake_publisher_cls): + pipeline_cfg_file = self.setup_pipeline([]) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + self._check_notifications(fake_publisher_cls) + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_multiple_agents_transform(self, fake_publisher_cls): + pipeline_cfg_file = self.setup_pipeline( + [{ + 'name': 'unit_conversion', + 'parameters': { + 'source': {}, + 'target': {'name': 'cpu_mins', + 'unit': 'min', + 'scale': 'volume'}, + } + }]) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + self._check_notifications(fake_publisher_cls) + + @mock.patch('ceilometer.publisher.test.TestPublisher') + def test_multiple_agents_multiple_transform(self, fake_publisher_cls): + pipeline_cfg_file = self.setup_pipeline( + [{ + 'name': 'unit_conversion', + 'parameters': { + 'source': {}, + 'target': {'name': 'cpu_mins', + 'unit': 'min', + 'scale': 'volume'}, + } + }, { + 'name': 'unit_conversion', + 'parameters': { + 'source': {}, + 'target': {'name': 'cpu_mins', + 'unit': 'min', + 'scale': 'volume'}, + } + }]) + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + self._check_notifications(fake_publisher_cls) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/fixtures.py ceilometer-5.0.0~b3/ceilometer/tests/gabbi/fixtures.py --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/fixtures.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/fixtures.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Fixtures used during Gabbi-based test runs.""" - -import datetime -import os -import random -from unittest import case -import uuid - -from gabbi import fixture -from oslo_config import fixture as fixture_config -from oslo_policy import opts - -from ceilometer.event.storage import models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import service -from ceilometer import storage - - -# TODO(chdent): For now only MongoDB is supported, because of easy -# database name handling and intentional focus on the API, not the -# data store. -ENGINES = ['MONGODB'] - - -class ConfigFixture(fixture.GabbiFixture): - """Establish the relevant configuration for a test run.""" - - def start_fixture(self): - """Set up config.""" - - self.conf = None - - # Determine the database connection. - db_url = None - for engine in ENGINES: - try: - db_url = os.environ['CEILOMETER_TEST_%s_URL' % engine] - except KeyError: - pass - if db_url is None: - raise case.SkipTest('No database connection configured') - - service.prepare_service([]) - conf = fixture_config.Config().conf - self.conf = conf - opts.set_defaults(self.conf) - conf.import_group('api', 'ceilometer.api.controllers.v2.root') - conf.import_opt('store_events', 'ceilometer.notification', - group='notification') - conf.set_override('policy_file', - os.path.abspath('etc/ceilometer/policy.json'), - group='oslo_policy') - - # A special pipeline is required to use the direct publisher. - conf.set_override('pipeline_cfg_file', - 'etc/ceilometer/gabbi_pipeline.yaml') - - database_name = '%s-%s' % (db_url, str(uuid.uuid4())) - conf.set_override('connection', database_name, group='database') - conf.set_override('metering_connection', '', group='database') - conf.set_override('event_connection', '', group='database') - conf.set_override('alarm_connection', '', group='database') - - conf.set_override('pecan_debug', True, group='api') - conf.set_override('gnocchi_is_enabled', False, group='api') - conf.set_override('aodh_is_enabled', False, group='api') - - conf.set_override('store_events', True, group='notification') - - def stop_fixture(self): - """Reset the config and remove data.""" - if self.conf: - storage.get_connection_from_config(self.conf).clear() - self.conf.reset() - - -class SampleDataFixture(fixture.GabbiFixture): - """Instantiate some sample data for use in testing.""" - - def start_fixture(self): - """Create some samples.""" - conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf) - timestamp = datetime.datetime.utcnow() - project_id = str(uuid.uuid4()) - self.source = str(uuid.uuid4()) - resource_metadata = {'farmed_by': 'nancy'} - - for name in ['cow', 'pig', 'sheep']: - resource_metadata.update({'breed': name}), - c = sample.Sample(name='livestock', - type='gauge', - unit='head', - volume=int(10 * random.random()), - user_id='farmerjon', - project_id=project_id, - resource_id=project_id, - timestamp=timestamp, - resource_metadata=resource_metadata, - source=self.source) - data = utils.meter_message_from_counter( - c, conf.publisher.telemetry_secret) - self.conn.record_metering_data(data) - - def stop_fixture(self): - """Destroy the samples.""" - # NOTE(chdent): print here for sake of info during testing. - # This will go away eventually. - print('resource', - self.conn.db.resource.remove({'source': self.source})) - print('meter', self.conn.db.meter.remove({'source': self.source})) - - -class EventDataFixture(fixture.GabbiFixture): - """Instantiate some sample event data for use in testing.""" - - def start_fixture(self): - """Create some events.""" - conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf, 'event') - events = [] - name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] - for ix, name in enumerate(name_list): - timestamp = datetime.datetime.utcnow() - message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix) - traits = [models.Trait('type', 1, name), - models.Trait('ate', 2, ix)] - event = models.Event(message_id, - 'cookies_{}'.format(name), - timestamp, - traits, {'nested': {'inside': 'value'}}) - events.append(event) - self.conn.record_events(events) - - def stop_fixture(self): - """Destroy the events.""" - self.conn.db.event.remove({'event_type': '/^cookies_/'}) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/alarms.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/alarms.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/alarms.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/alarms.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,138 +0,0 @@ -# Requests to cover the basic endpoints for alarms. - -fixtures: - - ConfigFixture - -tests: -- name: list alarms none - desc: Lists alarms, none yet exist - url: /v2/alarms - method: GET - response_strings: - - "[]" - -- name: try to PUT an alarm - desc: what does PUT do - url: /v2/alarms - method: PUT - request_headers: - content-type: application/json - data: - name: added_alarm_defaults2 - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 405 - response_headers: - allow: GET, POST - -# TODO(chdent): A POST should return a location header. -- name: createAlarm - xfail: true - desc: Creates an alarm. - url: /v2/alarms - method: POST - request_headers: - content-type: application/json - data: - name: added_alarm_defaults - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 201 - response_headers: - location: /$SCHEME://$NETLOC/v2/alarms/ - content-type: application/json; charset=UTF-8 - response_json_paths: - $.severity: low - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - -- name: showAlarm - desc: Shows information for a specified alarm. - url: /v2/alarms/$RESPONSE['$.alarm_id'] - method: GET - response_json_paths: - $.severity: low - $.alarm_id: $RESPONSE['$.alarm_id'] - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - response_headers: - content-type: application/json; charset=UTF-8 - -- name: updateAlarm - desc: Updates a specified alarm. - url: /v2/alarms/$RESPONSE['$.alarm_id'] - method: PUT - request_headers: - content-type: application/json - data: - name: added_alarm_defaults - type: threshold - severity: moderate - threshold_rule: - meter_name: ameter - threshold: 200.0 -# TODO(chdent): why do we have a response, why not status: 204? -# status: 204 - response_json_paths: - $.threshold_rule.threshold: 200.0 - $.severity: moderate - $.state: insufficient data - -- name: showAlarmHistory - desc: Assembles the history for a specified alarm. - url: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change - method: GET - response_json_paths: - $[0].type: rule change - -- name: updateAlarmState - desc: Sets the state of a specified alarm. - url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - request_headers: - content-type: application/json - data: '"alarm"' - method: PUT -# TODO(chdent): really? Of what possible use is this? - response_json_paths: - $: alarm - -# Get a list of alarms so we can extract an id for the next test -- name: list alarms for data - desc: Lists alarms, only one - url: /v2/alarms - method: GET - response_json_paths: - $[0].name: added_alarm_defaults - -- name: showAlarmState - desc: Gets the state of a specified alarm. - url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - method: GET - response_headers: - content-type: application/json; charset=UTF-8 - response_json_paths: - $: alarm - -- name: list alarms one - desc: Lists alarms, only one - url: /v2/alarms - method: GET - response_json_paths: - $[0].name: added_alarm_defaults - -- name: deleteAlarm - desc: Deletes a specified alarm. - url: /v2/alarms/$RESPONSE['$[0].alarm_id'] - method: DELETE - status: 204 - -- name: list alarms none end - desc: Lists alarms, none now exist - url: /v2/alarms - method: GET - response_strings: - - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/api_events_no_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/api_events_no_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/api_events_no_data.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/api_events_no_data.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -# These test run against the Events API with no data preloaded into the -# datastore. This allows us to verify that requests are still processed -# normally even if data is missing for that endpoint. -fixtures: -- ConfigFixture - -tests: - -# this attempts to get all the events and expects an empty list back -- name: get all events - url: /v2/events - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_strings: - - "[]" - -# this attempts to get all the events with invalid parameters and expects a 400 -- name: get events with bad params - url: /v2/events?bad_Stuff_here - status: 400 - -# this attempts to query the events with the correct parameterized query syntax -# and expects an empty list -- name: get events that match query - url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_strings: - - "[]" - -# this attempts to query the events with the correct data query syntax and -# expects an empty list -- name: get events that match query via request data - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - q: - - field: event_type - op: eq - type: string - value: cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_strings: - - "[]" - -# this attempts to query the events with the correct parameterized query syntax -# but a bad field name and expects an empty list -- name: get events that match bad query - url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_strings: - - "[]" - -# this attempts to query the events with the correct data query syntax and -# a bad field name and expects an empty list -- name: get events that match bad query via request data - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - q: - - field: bad_field - op: eq - type: string - value: cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_strings: - - "[]" - -# this attempts to query the events with the wrong data query syntax missing the -# q object but supplying the field list and a bad field name and expects a 400 -- name: get events that match bad query via request data malformed list - desc: https://bugs.launchpad.net/ceilometer/+bug/1423634 - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - - field: bad_field - op: eq - type: string - value: cookies_chocolate.chip - xfail: True - status: 400 - -# this attempts to query the events with the wrong data query syntax missing the -# q object but supplying the field list along with a bad content-type. Should -# return a 400 -- name: get events that match bad query via request data wrong type - desc: https://bugs.launchpad.net/ceilometer/+bug/1423634 and https://bugs.launchpad.net/ceilometer/+bug/1424642 - url: /v2/events - request_headers: - content-type: text/plain - data: - "field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True" - xfail: True - status: 400 - -# Get a single event by message_id no data is present so should return a 404 -- name: get a single event - url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 - status: 404 - -# Get all the event types should return an empty list -- name: get all event types - url: /v2/event_types - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types - response_strings: - - "[]" - -# Get a single event type by name, this API is unused and should return a 404 -- name: get event types for good event_type unused api - url: /v2/event_types/cookies_chocolate.chip - status: 404 - -# Get all traits for an event type should return an empty list -- name: get all traits for event type - url: /v2/event_types/cookies_chocolate.chip/traits - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits - response_strings: - - "[]" - -# Get all traits named ate for an event type should return an empty list -- name: get all traits named ate for event type - url: /v2/event_types/cookies_chocolate.chip/traits/ate - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/ate - response_strings: - - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/api_events_with_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/api_events_with_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/api_events_with_data.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/api_events_with_data.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -# These test run against the Events API with data preloaded into the datastore. -fixtures: -- ConfigFixture -- EventDataFixture - -tests: - -# this attempts to get all the events and checks to make sure they are valid -- name: get all events - url: /v2/events - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_json_paths: - $.[0].event_type: cookies_chocolate.chip - $.[0].traits.[0].value: chocolate.chip - $.[0].traits.[1].value: '0' - $.[0].raw.nested.inside: value - $.[1].event_type: cookies_peanut.butter - $.[1].traits.[0].name: type - $.[1].traits.[1].name: ate - $.[1].raw.nested.inside: value - $.[2].event_type: cookies_sugar - $.[2].traits.[0].type: string - $.[2].traits.[1].type: integer - $.[2].raw.nested.inside: value - -# this attempts to get all the events with invalid parameters and expects a 400 -- name: get events with bad params - url: /v2/events?bad_Stuff_here - status: 400 - -# this attempts to query the events with the correct parameterized query syntax -# and expects a matching event -- name: get events that match query - url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_json_paths: - $.[0].event_type: cookies_chocolate.chip - $.[0].traits.[0].value: chocolate.chip - -# this attempts to query the events with the correct data query syntax and -# expects a matching event -- name: get events that match query via data - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - q: - - field: event_type - op: eq - type: string - value: cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_json_paths: - $.[0].event_type: cookies_chocolate.chip - $.[0].traits.[0].value: chocolate.chip - -# this attempts to query the events with the correct parameterized query syntax -# but a bad field name and expects an empty list -- name: get events that match bad query - url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip - response_strings: - - "[]" - -# this attempts to query the events with the correct data query syntax and -# a bad field name and expects an empty list -- name: get events that match bad query via data - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - q: - - field: bad_field - op: eq - type: string - value: cookies_chocolate.chip - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events - response_strings: - - "[]" - -# this attempts to query the events with the wrong data query syntax missing the -# q object but supplying the field list and a bad field name and expects a 400 -- name: get events that match bad query via data list - desc: https://bugs.launchpad.net/ceilometer/+bug/1423634 - url: /v2/events - request_headers: - content-type: application/json; charset=UTF-8 - data: - - field: bad_field - op: eq - type: string - value: cookies_chocolate.chip - xfail: True - status: 400 - -# Get a single event by message_id should return an event -- name: get a single event - url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 - response_json_paths: - $.event_type: cookies_chocolate.chip - $.traits.[0].value: chocolate.chip - $.traits.[1].value: '0' - -# Get a single event by message_id no data is present so should return a 404 -- name: get a single event that does not exist - url: /v2/events/bad-id - status: 404 - -# Get all the event types should return a list of event types -- name: get all event types - url: /v2/event_types - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types - response_strings: - - cookies_chocolate.chip - - cookies_peanut.butter - - cookies_sugar - -# Get a single event type by valid name, this API is unused and should return a 404 -- name: get event types for good event_type unused api - url: /v2/event_types/cookies_chocolate.chip - status: 404 - -# Get a single event type by invalid name, this API is unused and should return a 404 -- name: get event types for bad event_type unused api - url: /v2/event_types/bad_event_type - status: 404 - -# Get all traits for a valid event type should return an list of traits -- name: get all traits for event type - url: /v2/event_types/cookies_chocolate.chip/traits - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits - response_json_paths: - $.[0].type: string - $.[1].name: ate - -# Get all traits for an invalid event type should return an empty list -- name: get all traits names for event type bad event type - url: /v2/event_types/bad_event_type/traits - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/bad_event_type/traits - response_strings: - - "[]" - -# Get all traits of type ate for a valid event type should return an list of -# traits -- name: get all traits of type ate for event type - url: /v2/event_types/cookies_chocolate.chip/traits/ate - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/ate - response_json_paths: - $.[0].name: ate - $.[0].value: '0' - -# Get all traits of type ate for a invalid event type should return an empty -# list -- name: get all traits of type for event type bad event type - url: /v2/event_types/bad_event_type/traits/ate - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/bad_event_type/traits/ate - response_strings: - - "[]" - -# Get all traits of type bad_trait_name for a valid event type should return an -# empty list -- name: get all traits of type instances for event type bad trait name - url: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name - response_headers: - content-type: application/json; charset=UTF-8 - content-location: $SCHEME://$NETLOC/v2/event_types/cookies_chocolate.chip/traits/bad_trait_name - response_strings: - - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/basic.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/basic.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/basic.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/basic.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -# -# Some simple tests just to confirm that the system works. -# -fixtures: - - ConfigFixture - -tests: - -# Root gives us some information on where to go from here. -- name: quick root check - url: / - response_headers: - content-type: application/json; charset=UTF-8 - response_strings: - - '"base": "application/json"' - response_json_paths: - versions.values.[0].status: stable - versions.values.[0].media-types.[0].base: application/json - -# NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! -- name: v2 visit - desc: this demonstrates a bug in the info in / - url: $RESPONSE['versions.values.[0].links.[0].href'] - status: 404 diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/capabilities.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/capabilities.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/capabilities.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/capabilities.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -# -# Explore the capabilities API -# -fixtures: - - ConfigFixture - -tests: - -- name: get capabilities - desc: retrieve capabilities for the mongo store - url: /v2/capabilities - response_json_paths: - $.alarm_storage.['storage:production_ready']: true - $.event_storage.['storage:production_ready']: true - $.storage.['storage:production_ready']: true diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/clean-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/clean-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/clean-samples.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/clean-samples.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -# Post a simple sample, sir, and the retrieve it in various ways. -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - -# When POSTing a sample perhaps we should get back a location header -# with the URI of the posted sample - - - name: post a sample expect location - desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 - user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - resource_metadata: - name2: value2 - name1: value1 - counter_type: gauge - response_headers: - location: /$SCHEME://$NETLOC/ - -# GET all the samples created for the apples meter - - - name: get samples for meter - desc: get all the samples at that meter - url: /v2/meters/apples - response_json_paths: - $.[0].counter_name: apples - $.[0].counter_volume: 1 - $.[0].resource_metadata.name2: value2 - -# POSTing a sample to a meter will implicitly create a resource - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - -# NOTE(chdent): We assume that the first item in links is self. -# Need to determine how to express the more correct JSONPath here -# (if possible). - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - -# GET the created samples - - - name: get samples - desc: get all the created samples - url: /v2/samples - response_json_paths: - $.[0].metadata.name2: value2 - $.[0].meter: apples - - - name: get one sample - desc: get the one sample that exists - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.metadata.name2: value2 - $.meter: apples diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/fixture-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/fixture-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/fixture-samples.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/fixture-samples.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -# -# Demonstrate a simple sample fixture. -# -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: -- name: get fixture samples - desc: get all the samples at livestock - url: /v2/meters/livestock - response_json_paths: - $.[0].counter_name: livestock - $.[1].counter_name: livestock - $.[2].counter_name: livestock - $.[2].user_id: farmerjon - $.[0].resource_metadata.breed: cow - $.[1].resource_metadata.farmed_by: nancy diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/meters.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/meters.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/meters.yaml 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/meters.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,361 +0,0 @@ -# -# Tests to explore and cover the /v2/meters section of the -# Ceilometer API. -# - -fixtures: - - ConfigFixture - -tests: - -# Generic HTTP health explorations of all meters. - - - name: empty meters list - url: /v2/meters - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: meters list bad accept - url: /v2/meters - request_headers: - accept: text/plain - status: 406 - - - name: meters list bad method - url: /v2/meters - method: POST - status: 405 - response_headers: - allow: GET - - - name: try to delete meters - url: /v2/meters - method: DELETE - status: 405 - response_headers: - allow: GET - -# Generic HTTP health explorations of single meter. - - - name: get non exist meter - url: /v2/meters/noexist - response_strings: - - "[]" - - - name: meter bad accept - url: /v2/meters/noexist?direct=True - request_headers: - accept: text/plain - status: 406 - - - name: meter delete noexist - url: /v2/meters/noexist - method: DELETE - status: "404 || 405" - - - name: post meter no data - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - - - name: post meter error is JSON - desc: https://bugs.launchpad.net/ceilometer/+bug/1426483 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - error_message: - faulstring: 'Missing argument: "samples"' - - - name: post meter bad content-type - desc: https://bugs.launchpad.net/wsme/+bug/1419110 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: text/plain - data: hello - status: 400 - - - name: post bad samples to meter - desc: https://bugs.launchpad.net/ceilometer/+bug/1428185 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - samples: - - red - - blue - - yellow - status: 400 - -# POST variations on a malformed sample - - - name: post limited counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - - name: post mismatched counter name to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: cars - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - "should be apples" - - - name: post counter no resource to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - status: 400 - response_strings: - - "Invalid input for field/attribute resource_id" - - "Mandatory field missing." - - - name: post counter bad type to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: elevation - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_type." - - "The counter type must be: gauge, delta, cumulative" - -# Manipulate samples - - - name: post counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 201 - - - name: list apple samples - url: /v2/meters/apples - response_json_paths: - $[0].counter_volume: 1.0 - $[0].counter_name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - - - name: list meters - url: /v2/meters - response_json_paths: - $[0].name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - $[0].type: gauge - $[-1].name: apples - - - name: negative limit on meter list - url: /v2/meters/apples?limit=-5 - status: 400 - response_strings: - - Limit must be positive - - - name: nan limit on meter list - url: /v2/meters/apples?limit=NaN - status: 400 - response_strings: - - unable to convert to int - - - name: post counter to meter different resource - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 2 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - - - name: query for resource - url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq - response_json_paths: - $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - -# Explore posting samples with less than perfect data. - - - name: post counter with bad timestamp - desc: https://bugs.launchpad.net/wsme/+bug/1428624 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-bad 23:23:20" - - - name: post counter with good timestamp - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - - - name: post counter with wrong metadata - desc: https://bugs.launchpad.net/ceilometer/+bug/1428628 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: "a string" - - - name: post counter with empty metadata - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: {} - -# Statistics - - - name: get sample statistics - url: /v2/meters/apples/statistics - response_json_paths: - $[0].groupby: null - $[0].unit: instance - $[0].sum: 9.0 - $[0].min: 1.0 - $[0].max: 3.0 - $[0].count: 4 - - - name: get incorrectly grouped sample statistics - url: /v2/meters/apples/statistics?groupby=house_id - status: 400 - response_strings: - - Invalid groupby fields - - - name: get grouped sample statistics - url: /v2/meters/apples/statistics?groupby=resource_id - response_json_paths: - $[1].max: 3.0 - $[0].max: 1.0 - - - name: get sample statistics bad period - url: /v2/meters/apples/statistics?period=seven - status: 400 - response_strings: - - unable to convert to int - - - name: get sample statistics negative period - url: /v2/meters/apples/statistics?period=-7 - status: 400 - response_strings: - - Period must be positive. - - - name: get sample statistics 600 period - url: /v2/meters/apples/statistics?period=600 - response_json_paths: - $[0].period: 600 - - - name: get sample statistics time limit not time - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember - status: 400 - response_strings: - - invalid timestamp format - - - name: get sample statistics time limit gt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit lt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit bounded - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_strings: - - "[]" - - - name: get sample statistics select aggregate bad format - desc: https://bugs.launchpad.net/wsme/+bug/1428658 - xfail: true - url: /v2/meters/apples/statistics?aggregate=max - status: 400 - - - name: get sample statistics select aggregate - url: /v2/meters/apples/statistics?aggregate.func=max - response_json_paths: - $[0].aggregate.max: 3.0 diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/resources-empty.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/resources-empty.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/resources-empty.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/resources-empty.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are no -# resources. -# - -fixtures: - - ConfigFixture - -tests: - -# Check for a list of resources, modifying the request in various -# ways. - - - name: list resources no extra - desc: Provide no additional header guidelines - url: /v2/resources - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: list resources but get url wrong - url: /v2/resrces - status: 404 - - - name: list resources explicit accept - url: /v2/resources - request_headers: - accept: application/json - response_strings: - - "[]" - - - name: list resources bad accept - url: /v2/resources - request_headers: - accept: text/plain - status: 406 - - - name: list resources with bad query field - url: /v2/resources?q.field=id&q.value=cars - status: 400 - response_strings: - - unrecognized field in query - - - name: list resources with query - url: /v2/resources?q.field=resource&q.value=cars - response_strings: - - "[]" - - - name: list resource bad type meter links - url: /v2/resources?meter_links=yes%20please - status: 400 - response_strings: - - unable to convert to int - - - name: list resource meter links int - url: /v2/resources?meter_links=0 - response_strings: - - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/resources-fixtured.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/resources-fixtured.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/resources-fixtured.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/resources-fixtured.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] - - - name: list resources limit user_id - url: /v2/resources?q.field=user_id&q.value=farmerjon - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata - url: /v2/resources?q.field=metadata.breed&q.value=sheep - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata no match - url: /v2/resources?q.field=metadata.breed&q.value=llamma - response_strings: - - "[]" - - - name: fail to get one resource - url: /v2/resources/nosirnothere - status: 404 - - - name: list resource meter links present - url: /v2/resources?meter_links=1 - response_json_paths: - $[0].links[0].rel: self - $[0].links[1].rel: livestock - $[0].links[-1].rel: livestock - - - name: list resource meter links not present - url: /v2/resources?meter_links=0 - desc: there is only one links entry when meter_links is 0 - response_json_paths: - $[0].links[0].rel: self - $[0].links[-1].rel: self diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits/samples.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits/samples.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -# -# Explore and test the samples controller, using samples supplied by -# the SampleDataFixture. -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - -# Confirm all the samples are there and expected requests behave. -# TODO(chdent): There's a danger here that the ordering of multiple -# samples will not be consistent. - - - name: lists samples - url: /v2/samples - response_headers: - content-type: /application/json/ - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[1].metadata.breed: pig - $[2].metadata.breed: sheep - - - name: get just one - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.meter: livestock - $.metadata.breed: cow - - - name: list samples with limit - url: /v2/samples?limit=1 - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: list zero samples with zero limit - url: /v2/samples?limit=0 - response_strings: - - '[]' - - - name: list samples with query - url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: query by user - url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by user_id - url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by project - url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - - - name: query by project_id - url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - -# Explore failure modes for listing samples - - - name: list samples with bad field - url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq - status: 400 - response_strings: - - timestamp - - project - - unrecognized field in query - - - name: list samples with bad metaquery field - url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq - status: 400 - response_strings: - - unrecognized field in query - - - name: bad limit value - url: /v2/samples?limit=happiness - status: 400 - response_strings: - - Invalid input for field/attribute limit - - - name: negative limit value 400 - url: /v2/samples?limit=-99 - status: 400 - - - name: negative limit value error message - url: /v2/samples?limit=-99 - desc: https://bugs.launchpad.net/ceilometer/+bug/1426483 - xfail: true - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Limit must be positive - - - name: bad accept - desc: try an unexpected content type - url: /v2/samples - request_headers: - accept: text/plain - status: 406 - - - name: complex good accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/json; q=0.8 - - - name: complex bad accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/binary; q=0.8 - status: 406 - - - name: bad method - url: /v2/samples - method: POST - status: 405 - response_headers: - allow: GET - -# Work with just one sample. - - - name: list one of the samples - url: /v2/samples?limit=1 - - - name: retrieve one sample - url: /v2/samples/$RESPONSE['$[0].id'] - response_headers: - content-type: /application/json/ - response_json_paths: - $.meter: livestock - - - name: retrieve sample with useless query - url: /v2/samples/$RESPONSE['$.id']?limit=5 - status: 400 - response_strings: - - "Unknown argument:" - - - name: attempt missing sample - url: /v2/samples/davesnothere - desc: https://bugs.launchpad.net/ceilometer/+bug/1426483 - xfail: true - status: 404 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Sample davesnothere Not Found diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/api_events_with_data.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/api_events_with_data.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/api_events_with_data.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/api_events_with_data.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -# This test runs against the Events API and confirms the -# content-location header includes a prefix. -fixtures: -- ConfigFixture -- EventDataFixture - -tests: - -- name: get all events - url: /v2/events - response_headers: - content-type: application/json; charset=UTF-8 - content-location: /$SCHEME://.*/telemetry/v2/events/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/basic.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/basic.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/basic.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/basic.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -# -# Confirm root reports the right data including a prefixed URL -# -fixtures: - - ConfigFixture - -tests: - -# Root gives us some information on where to go from here. -- name: quick root check - url: / - response_headers: - content-type: application/json; charset=UTF-8 - response_strings: - - '"base": "application/json"' - response_json_paths: - versions.values.[0].status: stable - versions.values.[0].media-types.[0].base: application/json - response_strings: - - /telemetry/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/clean-samples.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/clean-samples.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/clean-samples.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/clean-samples.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -# Post a simple sample and confirm the created resource has -# reasonable URLs -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - response_strings: - - /telemetry/ diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/resources-fixtured.yaml ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/resources-fixtured.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/gabbits_prefix/resources-fixtured.yaml 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/gabbits_prefix/resources-fixtured.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - response_strings: - - /telemetry/ - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/test_gabbi_prefix.py ceilometer-5.0.0~b3/ceilometer/tests/gabbi/test_gabbi_prefix.py --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/test_gabbi_prefix.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/test_gabbi_prefix.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A module to exercise the Ceilometer API with gabbi with a URL prefix""" - -import os - -from gabbi import driver - -from ceilometer.api import app -from ceilometer.tests.gabbi import fixtures as fixture_module - - -TESTS_DIR = 'gabbits_prefix' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, - prefix='/telemetry', - intercept=app.VersionSelectorApplication, - fixture_module=fixture_module) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/gabbi/test_gabbi.py ceilometer-5.0.0~b3/ceilometer/tests/gabbi/test_gabbi.py --- ceilometer-5.0.0~b2/ceilometer/tests/gabbi/test_gabbi.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/gabbi/test_gabbi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Ceilometer API with gabbi - -For the sake of exploratory development. -""" - -import os - -from gabbi import driver - -from ceilometer.api import app -from ceilometer.tests.gabbi import fixtures as fixture_module - - -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, - intercept=app.VersionSelectorApplication, - fixture_module=fixture_module) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/inspector/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/inspector/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/inspector/test_inspector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/inspector/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# -# Copyright 2014 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import netutils - -from ceilometer.hardware import inspector -from ceilometer.tests import base - - -class TestHardwareInspector(base.BaseTestCase): - def test_get_inspector(self): - url = netutils.urlsplit("snmp://") - driver = inspector.get_inspector(url) - self.assertTrue(driver) - - def test_get_inspector_illegal(self): - url = netutils.urlsplit("illegal://") - self.assertRaises(RuntimeError, - inspector.get_inspector, - url) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/inspector/test_snmp.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/inspector/test_snmp.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/inspector/test_snmp.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/inspector/test_snmp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/hardware/inspector/snmp/inspector.py -""" -from oslo_utils import netutils -from oslotest import mockpatch - -from ceilometer.hardware.inspector import snmp -from ceilometer.tests import base as test_base - -ins = snmp.SNMPInspector - - -class FakeObjectName(object): - def __init__(self, name): - self.name = name - - def prettyPrint(self): - return str(self.name) - - -def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): - varBinds = [(FakeObjectName(oid), - int(oid.split('.')[-1])) for oid in oids] - return (None, None, 0, varBinds) - - -def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, - *oids, **kwargs): - varBindTable = [ - [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] - for oid in oids - ] - return (None, None, 0, varBindTable) - - -class TestSNMPInspector(test_base.BaseTestCase): - mapping = { - 'test_exact': { - 'matching_type': snmp.EXACT, - 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) - }, - 'post_op': '_fake_post_op', - }, - 'test_prefix': { - 'matching_type': snmp.PREFIX, - 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) - }, - 'post_op': None, - }, - } - - def setUp(self): - super(TestSNMPInspector, self).setUp() - self.inspector = snmp.SNMPInspector() - self.host = netutils.urlsplit("snmp://localhost") - self.inspector.MAPPING = self.mapping - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - - def test_snmp_error(self): - def get_list(func, *args, **kwargs): - return list(func(*args, **kwargs)) - - def faux_parse(ret, is_bulk): - return (True, 'forced error') - - self.useFixture(mockpatch.PatchObject( - snmp, 'parse_snmp_return', new=faux_parse)) - - self.assertRaises(snmp.SNMPException, - get_list, - self.inspector.inspect_generic, - self.host, - 'test_exact', - {}) - - @staticmethod - def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): - metadata.update(post_op_meta=4) - extra.update(project_id=2) - return value - - def test_inspect_generic_exact(self): - self.inspector._fake_post_op = self._fake_post_op - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - 'test_exact', - cache)) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) - self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) - self.assertEqual(1, len(ret)) - self.assertEqual(1, ret[0][0]) - self.assertEqual(8, ret[0][1]['meta']) - self.assertEqual(4, ret[0][1]['post_op_meta']) - self.assertEqual(2, ret[0][2]['project_id']) - - def test_inspect_generic_prefix(self): - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - 'test_prefix', - cache)) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) - self.assertEqual(2, len(ret)) - self.assertIn(ret[0][0], (1, 2)) - self.assertEqual(ret[0][0], ret[0][1]['meta']) - - def test_post_op_net(self): - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - cache = {} - metadata = {} - ret = self.inspector._post_op_net(self.host, cache, None, - value=8, - metadata=metadata, - extra={}, - suffix=".2") - self.assertEqual(8, ret) - self.assertIn('ip', metadata) - self.assertIn("2", metadata['ip']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - -from ceilometer.agent import manager -from ceilometer.hardware.inspector import base as inspector_base -from ceilometer.tests import base as test_base - - -class FakeInspector(inspector_base.Inspector): - net_metadata = dict(name='test.teest', - mac='001122334455', - ip='10.0.0.2', - speed=1000) - disk_metadata = dict(device='/dev/sda1', path='/') - DATA = { - 'cpu.load.1min': (0.99, {}, {}), - 'cpu.load.5min': (0.77, {}, {}), - 'cpu.load.15min': (0.55, {}, {}), - 'memory.total': (1000, {}, {}), - 'memory.used': (90, {}, {}), - 'memory.buffer': (500, {}, {}), - 'memory.cached': (200, {}, {}), - 'network.incoming.bytes': (90, net_metadata, {}), - 'network.outgoing.bytes': (80, net_metadata, {}), - 'network.outgoing.errors': (1, net_metadata, {}), - 'disk.size.total': (1000, disk_metadata, {}), - 'disk.size.used': (90, disk_metadata, {}), - 'system_stats.cpu.idle': (62, {}, {}), - 'system_stats.io.outgoing.blocks': (100, {}, {}), - 'system_stats.io.incoming.blocks': (120, {}, {}), - 'network.ip.outgoing.datagrams': (200, {}, {}), - 'network.ip.incoming.datagrams': (300, {}, {}), - } - - def inspect_generic(self, host, identifier, cache, extra_metadata=None): - yield self.DATA[identifier] - - -class TestPollsterBase(test_base.BaseTestCase): - @staticmethod - def faux_get_inspector(url, namespace=None): - return FakeInspector() - - def setUp(self): - super(TestPollsterBase, self).setUp() - self.hosts = ["test://test", "test://test2"] - self.useFixture(fixtures.MonkeyPatch( - 'ceilometer.hardware.inspector.get_inspector', - self.faux_get_inspector)) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, name, - expected_value, expected_type, expected_unit=None): - mgr = manager.AgentManager() - pollster = factory() - cache = {} - samples = list(pollster.get_samples(mgr, cache, self.hosts)) - self.assertTrue(samples) - self.assertIn(pollster.CACHE_KEY, cache) - for host in self.hosts: - self.assertIn(host, cache[pollster.CACHE_KEY]) - - self.assertEqual(set([name]), - set([s.name for s in samples])) - match = [s for s in samples if s.name == name] - self.assertEqual(expected_value, match[0].volume) - self.assertEqual(expected_type, match[0].type) - if expected_unit: - self.assertEqual(expected_unit, match[0].unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_cpu.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_cpu.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_cpu.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_cpu.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import cpu -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestCPUPollsters(base.TestPollsterBase): - def test_1min(self): - self._check_get_samples(cpu.CPULoad1MinPollster, - 'hardware.cpu.load.1min', - 0.99, sample.TYPE_GAUGE, - expected_unit='process') - - def test_5min(self): - self._check_get_samples(cpu.CPULoad5MinPollster, - 'hardware.cpu.load.5min', - 0.77, sample.TYPE_GAUGE, - expected_unit='process') - - def test_15min(self): - self._check_get_samples(cpu.CPULoad15MinPollster, - 'hardware.cpu.load.15min', - 0.55, sample.TYPE_GAUGE, - expected_unit='process') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_disk.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_disk.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_disk.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_disk.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import disk -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestDiskPollsters(base.TestPollsterBase): - def test_disk_size_total(self): - self._check_get_samples(disk.DiskTotalPollster, - 'hardware.disk.size.total', - 1000, sample.TYPE_GAUGE) - - def test_disk_size_used(self): - self._check_get_samples(disk.DiskUsedPollster, - 'hardware.disk.size.used', - 90, sample.TYPE_GAUGE) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_memory.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_memory.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_memory.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_memory.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import memory -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestMemoryPollsters(base.TestPollsterBase): - def test_memory_size_total(self): - self._check_get_samples(memory.MemoryTotalPollster, - 'hardware.memory.total', - 1000, sample.TYPE_GAUGE) - - def test_memory_size_used(self): - self._check_get_samples(memory.MemoryUsedPollster, - 'hardware.memory.used', - 90, sample.TYPE_GAUGE) - - def test_memory_size_buffer(self): - self._check_get_samples(memory.MemoryBufferPollster, - 'hardware.memory.buffer', - 500, sample.TYPE_GAUGE) - - def test_memory_size_cached(self): - self._check_get_samples(memory.MemoryCachedPollster, - 'hardware.memory.cached', - 200, sample.TYPE_GAUGE) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_net.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_net.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_net.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_net.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import net -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestNetPollsters(base.TestPollsterBase): - def test_incoming(self): - self._check_get_samples(net.IncomingBytesPollster, - 'hardware.network.incoming.bytes', - 90, sample.TYPE_CUMULATIVE, - expected_unit='B') - - def test_outgoing(self): - self._check_get_samples(net.OutgoingBytesPollster, - 'hardware.network.outgoing.bytes', - 80, sample.TYPE_CUMULATIVE, - expected_unit='B') - - def test_error(self): - self._check_get_samples(net.OutgoingErrorsPollster, - 'hardware.network.outgoing.errors', - 1, sample.TYPE_CUMULATIVE, - expected_unit='packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_network_aggregated.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_network_aggregated.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_network_aggregated.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_network_aggregated.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import network_aggregated -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestNetworkAggregatedPollsters(base.TestPollsterBase): - def test_incoming(self): - self._check_get_samples(network_aggregated. - NetworkAggregatedIPOutRequests, - 'hardware.network.ip.outgoing.datagrams', - 200, sample.TYPE_CUMULATIVE, - expected_unit='datagrams') - - def test_outgoing(self): - self._check_get_samples(network_aggregated. - NetworkAggregatedIPInReceives, - 'hardware.network.ip.incoming.datagrams', - 300, sample.TYPE_CUMULATIVE, - expected_unit='datagrams') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_system.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_system.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_system.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_system.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.hardware.pollsters import system -from ceilometer import sample -from ceilometer.tests.hardware.pollsters import base - - -class TestSystemPollsters(base.TestPollsterBase): - def test_cpu_idle(self): - self._check_get_samples(system.SystemCpuIdlePollster, - 'hardware.system_stats.cpu.idle', - 62, sample.TYPE_GAUGE, - expected_unit='%') - - def test_io_outgoing(self): - self._check_get_samples(system.SystemIORawSentPollster, - 'hardware.system_stats.io.outgoing.blocks', - 100, sample.TYPE_CUMULATIVE, - expected_unit='blocks') - - def test_io_incoming(self): - self._check_get_samples(system.SystemIORawReceivedPollster, - 'hardware.system_stats.io.incoming.blocks', - 120, sample.TYPE_CUMULATIVE, - expected_unit='blocks') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_util.py ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_util.py --- ceilometer-5.0.0~b2/ceilometer/tests/hardware/pollsters/test_util.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/hardware/pollsters/test_util.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import netutils - -from ceilometer.hardware.pollsters import util -from ceilometer import sample -from ceilometer.tests import base as test_base - - -class TestPollsterUtils(test_base.BaseTestCase): - def setUp(self): - super(TestPollsterUtils, self).setUp() - self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") - - def test_make_sample(self): - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - res_metadata={ - 'metakey': 'metaval', - }) - self.assertEqual('127.0.0.1', s.resource_id) - self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) - self.assertIn('metakey', s.resource_metadata.keys()) - - def test_make_sample_extra(self): - extra = { - 'project_id': 'project', - 'resource_id': 'resource' - } - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - extra=extra) - self.assertIsNone(s.user_id) - self.assertEqual('project', s.project_id) - self.assertEqual('resource', s.resource_id) - self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', - 'project_id': 'project', - 'resource_id': - 'resource'}, - s.resource_metadata) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/identity/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/identity/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/identity/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/identity/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,310 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslotest import base - -from ceilometer.identity import notifications -from ceilometer import sample - - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - -PROJECT_ID = u'project_id' -DOMAIN_ID = u'domain_id' -USER_ID = u'user_id' -ROLE_ID = u'role_id' -GROUP_ID = u'group_id' -TRUST_ID = u'trust_id' -PUBLISHER_ID = u'identity.node-n5x66lxdy67d' -ROLE_ASSIGNMENT = 'role_assignment' - - -def notification_for(resource_type, operation, resource_id): - - return { - u'event_type': '%s.%s.%s' % (notifications.SERVICE, resource_type, - operation), - u'message_id': u'ef921faa-7f7b-4854-8b86-a424ab93c96e', - u'payload': { - u'resource_info': resource_id - }, - u'priority': u'INFO', - u'publisher_id': PUBLISHER_ID, - u'timestamp': NOW - } - - -def cadf_format_notification(): - return { - u'event_type': u'some_identity_event', - u'message_id': u'1371a590-d5fd-448f-b3bb-a14dead6f4cb', - u'payload': { - u'typeURI': u'http://schemas.dmtf.org/cloud/audit/1.0/event', - u'initiator': { - u'typeURI': u'service/security/account/user', - u'host': { - u'agent': u'python-keystoneclient', - u'address': u'10.0.2.15' - }, - u'id': USER_ID, - u'name': u'openstack:demo_user' - }, - u'target': { - u'typeURI': u'service/security/account/user', - u'id': u'openstack:44b3d8cb-5f16-46e9-9b1b-ac90b64c2530' - }, - u'observer': { - u'typeURI': u'service/security', - u'id': u'openstack:55a9e88c-a4b1-4864-9339-62b7e6ecb6a7' - }, - u'eventType': u'activity', - u'eventTime': u'2014-08-04T05:38:59.978898+0000', - u'action': u'action_name', - u'outcome': 'success', - u'id': u'openstack:eca02fef-9394-4008-8fb3-c434133ca4b2' - }, - u'priority': u'INFO', - u'publisher_id': PUBLISHER_ID, - u'timestamp': NOW - } - - -def cadf_crud_notification_for(resource_type, operation, resource_id): - base = cadf_format_notification() - event_type = '%s.%s.%s' % (notifications.SERVICE, resource_type, - operation) - base['event_type'] = event_type - base['payload']['action'] = '%s.%s' % (operation, resource_type) - base['payload']['resource_info'] = resource_id - return base - - -def authn_notification_for(outcome): - base = cadf_format_notification() - base['event_type'] = 'identity.authenticate' - base['payload']['action'] = 'authenticate' - base['payload']['outcome'] = outcome - return base - - -def notification_for_role_change(action, project, user): - """Create a notifications for a role_assignment - - In this case, action is either 'created' or 'deleted'. Also - in a role_assignment notifications, in the payload portion, - there may be a 'domain' key or a 'project' key, never both. - The same holds for the 'user' key and 'group' key. - There must always be a 'role'. - """ - - base = cadf_format_notification() - - # NOTE(stevemar): i.e. created.role_assignment - action_name = '%s.%s' % (action, ROLE_ASSIGNMENT) - event, resource_type = action_name.split(".") - - # NOTE(stevemar): i.e. identity.role_assignment.created - event_name = '%s.%s.%s' % (notifications.SERVICE, resource_type, event) - - base['event_type'] = event_name - base['payload']['action'] = action_name - base['payload']['role'] = ROLE_ID - base['payload']['inherited_to_projects'] = False - if project: - base['payload']['project'] = PROJECT_ID - else: - base['payload']['domain'] = DOMAIN_ID - if user: - base['payload']['user'] = USER_ID - else: - base['payload']['group'] = GROUP_ID - return base - - -class TestCRUDNotification(base.BaseTestCase): - - def _verify_common_sample(self, s): - self.assertIsNotNone(s) - self.assertEqual(NOW, s.timestamp) - self.assertEqual(sample.TYPE_DELTA, s.type) - self.assertIsNone(s.project_id) - metadata = s.resource_metadata - self.assertEqual(PUBLISHER_ID, metadata.get('host')) - - def _verify_common_operations(self, data, resource_type, operation, - resource_id): - self.assertEqual(1, len(data)) - self.assertEqual(resource_id, data[0].resource_id) - name = '%s.%s.%s' % (notifications.SERVICE, resource_type, operation) - self.assertEqual(name, data[0].name) - - def _test_operation(self, resource_type, operation, resource_id, - notification_class): - notif = notification_for(resource_type, operation, resource_id) - handler = notification_class(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertIsNone(data[0].user_id) - self._verify_common_operations(data, resource_type, operation, - resource_id) - self._verify_common_sample(data[0]) - - def _test_audit_operation(self, resource_type, operation, resource_id, - notification_class): - notif = cadf_crud_notification_for(resource_type, operation, - resource_id) - handler = notification_class(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(USER_ID, data[0].user_id) - self._verify_common_operations(data, resource_type, operation, - resource_id) - self._verify_common_sample(data[0]) - - def test_create_user(self): - self._test_operation('user', 'created', USER_ID, notifications.User) - self._test_audit_operation('user', 'created', USER_ID, - notifications.User) - - def test_delete_user(self): - self._test_operation('user', 'deleted', USER_ID, notifications.User) - self._test_audit_operation('user', 'deleted', USER_ID, - notifications.User) - - def test_update_user(self): - self._test_operation('user', 'updated', USER_ID, notifications.User) - self._test_audit_operation('user', 'updated', USER_ID, - notifications.User) - - def test_create_group(self): - self._test_operation('group', 'created', GROUP_ID, notifications.Group) - self._test_audit_operation('group', 'created', GROUP_ID, - notifications.Group) - - def test_update_group(self): - self._test_operation('group', 'updated', GROUP_ID, notifications.Group) - self._test_audit_operation('group', 'updated', GROUP_ID, - notifications.Group) - - def test_delete_group(self): - self._test_operation('group', 'deleted', GROUP_ID, notifications.Group) - self._test_audit_operation('group', 'deleted', GROUP_ID, - notifications.Group) - - def test_create_project(self): - self._test_operation('project', 'created', PROJECT_ID, - notifications.Project) - self._test_audit_operation('project', 'created', PROJECT_ID, - notifications.Project) - - def test_update_project(self): - self._test_operation('project', 'updated', PROJECT_ID, - notifications.Project) - self._test_audit_operation('project', 'updated', PROJECT_ID, - notifications.Project) - - def test_delete_project(self): - self._test_operation('project', 'deleted', PROJECT_ID, - notifications.Project) - self._test_audit_operation('project', 'deleted', PROJECT_ID, - notifications.Project) - - def test_create_role(self): - self._test_operation('role', 'deleted', ROLE_ID, notifications.Role) - self._test_audit_operation('role', 'deleted', ROLE_ID, - notifications.Role) - - def test_update_role(self): - self._test_operation('role', 'updated', ROLE_ID, notifications.Role) - self._test_audit_operation('role', 'updated', ROLE_ID, - notifications.Role) - - def test_delete_role(self): - self._test_operation('role', 'deleted', ROLE_ID, notifications.Role) - self._test_audit_operation('role', 'deleted', ROLE_ID, - notifications.Role) - - def test_create_trust(self): - self._test_operation('trust', 'created', TRUST_ID, notifications.Trust) - self._test_audit_operation('trust', 'created', TRUST_ID, - notifications.Trust) - - def test_delete_trust(self): - self._test_operation('trust', 'deleted', TRUST_ID, notifications.Trust) - self._test_audit_operation('trust', 'deleted', TRUST_ID, - notifications.Trust) - - -class TestAuthenticationNotification(base.BaseTestCase): - - def _verify_common_sample(self, s): - self.assertIsNotNone(s) - self.assertEqual(NOW, s.timestamp) - self.assertEqual(sample.TYPE_DELTA, s.type) - self.assertIsNone(s.project_id) - self.assertEqual(USER_ID, s.user_id) - self.assertEqual(1, s.volume) - metadata = s.resource_metadata - self.assertEqual(PUBLISHER_ID, metadata.get('host')) - - def _test_authn_operation(self, outcome): - notif = authn_notification_for(outcome) - handler = notifications.Authenticate(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(1, len(data)) - name = '%s.%s.%s' % (notifications.SERVICE, 'authenticate', outcome) - self.assertEqual(name, data[0].name) - self.assertEqual(USER_ID, data[0].resource_id) - self.assertEqual('user', data[0].unit) - self._verify_common_sample(data[0]) - - def _test_role_assignment_operation(self, action, project, user): - notif = notification_for_role_change(action, project, user) - handler = notifications.RoleAssignment(mock.Mock()) - data = list(handler.process_notification(notif)) - self.assertEqual(1, len(data)) - name = '%s.%s.%s' % (notifications.SERVICE, ROLE_ASSIGNMENT, action) - self.assertEqual(name, data[0].name) - self.assertEqual(ROLE_ID, data[0].resource_id) - self.assertEqual(ROLE_ASSIGNMENT, data[0].unit) - metadata = data[0].resource_metadata - if project: - self.assertEqual(PROJECT_ID, metadata.get('project')) - else: - self.assertEqual(DOMAIN_ID, metadata.get('domain')) - if user: - self.assertEqual(USER_ID, metadata.get('user')) - else: - self.assertEqual(GROUP_ID, metadata.get('group')) - self._verify_common_sample(data[0]) - - def test_authn_success(self): - self._test_authn_operation('success') - - def test_authn_failure(self): - self._test_authn_operation('failure') - - def test_authn_pending(self): - self._test_authn_operation('pending') - - def test_create_role_assignment_group_domain(self): - self._test_role_assignment_operation('created', False, False) - - def test_delete_role_assignment_group_domain(self): - self._test_role_assignment_operation('deleted', False, False) - - def test_create_role_assignment_user_project(self): - self._test_role_assignment_operation('created', True, True) - - def test_delete_role_assignment_user_project(self): - self._test_role_assignment_operation('deleted', True, True) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/image/test_glance.py ceilometer-5.0.0~b3/ceilometer/tests/image/test_glance.py --- ceilometer-5.0.0~b2/ceilometer/tests/image/test_glance.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/image/test_glance.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslo_context import context -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.image import glance - -IMAGE_LIST = [ - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), - type('Image', (object,), - {u'status': u'active', - u'name': "hello world", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:27:41', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:27:41', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'22be9f90-864d-494c-aa74-8035fd535989', - u'location': None, - u'checksum': None, - u'owner': u'9e4f98287a0246daa42eaf4025db99d4', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 0}), - type('Image', (object,), - {u'status': u'queued', - u'name': None, - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:23:27', - u'disk_format': "raw", - u'updated_at': u'2012-09-18T16:23:27', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', - u'location': None, - u'checksum': None, - u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 1024}), - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), -] - -ENDPOINT = 'end://point' - - -class _BaseObject(object): - pass - - -class FakeGlanceClient(object): - class images(object): - pass - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self.keystone = mock.Mock() - self.keystone.service_catalog.get_endpoints = mock.Mock( - return_value={'image': mock.ANY}) - - -class TestImagePollsterPageSize(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = FakeGlanceClient() - glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollsterPageSize, self).setUp() - self.context = context.get_admin_context() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def _do_test_iter_images(self, page_size=0, length=0): - self.CONF.set_override("glance_page_size", page_size) - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - kwargs = {} - if page_size > 0: - kwargs['page_size'] = page_size - FakeGlanceClient.images.list.assert_called_with( - filters={'is_public': None}, **kwargs) - self.assertEqual(length, len(images)) - - def test_page_size(self): - self._do_test_iter_images(100, 4) - - def test_page_size_default(self): - self._do_test_iter_images(length=4) - - def test_page_size_negative_number(self): - self._do_test_iter_images(-1, 4) - - -class TestImagePollster(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = _BaseObject() - setattr(glanceclient, "images", _BaseObject()) - setattr(glanceclient.images, - "list", lambda *args, **kwargs: iter(IMAGE_LIST)) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollster, self).setUp() - self.context = context.get_admin_context() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - - def test_default_discovery(self): - pollster = glance.ImagePollster() - self.assertEqual('endpoint:image', pollster.default_discovery) - - def test_iter_images(self): - # Tests whether the iter_images method returns a unique image - # list when there is nothing in the cache - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - self.assertEqual(len(set(image.id for image in images)), len(images)) - - def test_iter_images_cached(self): - # Tests whether the iter_images method returns the values from - # the cache - cache = {'%s-images' % ENDPOINT: []} - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, cache, - ENDPOINT)) - self.assertEqual([], images) - - def test_image(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for sample in samples: - self.assertEqual(1, sample.volume) - - def test_image_size(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for image in IMAGE_LIST: - self.assertTrue( - any(map(lambda sample: sample.volume == image.size, - samples))) - - def test_image_get_sample_names(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(set(['image']), set([s.name for s in samples])) - - def test_image_size_get_sample_names(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(set(['image.size']), set([s.name for s in samples])) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/image/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/image/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/image/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/image/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,197 +0,0 @@ -# -# Copyright 2012 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslotest import base - -from ceilometer.image import notifications -from ceilometer import sample - - -def fake_uuid(x): - return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12) - - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - -NOTIFICATION_SEND = { - u'event_type': u'image.send', - u'timestamp': NOW, - u'message_id': fake_uuid('a'), - u'priority': u'INFO', - u'publisher_id': u'images.example.com', - u'payload': {u'receiver_tenant_id': fake_uuid('b'), - u'destination_ip': u'1.2.3.4', - u'bytes_sent': 42, - u'image_id': fake_uuid('c'), - u'receiver_user_id': fake_uuid('d'), - u'owner_id': fake_uuid('e')} -} - -IMAGE_META = {u'status': u'saving', - u'name': u'fake image #3', - u'deleted': False, - u'container_format': u'ovf', - u'created_at': u'2012-09-18T10:13:44.571370', - u'disk_format': u'vhd', - u'updated_at': u'2012-09-18T10:13:44.623120', - u'properties': {u'key2': u'value2', - u'key1': u'value1'}, - u'min_disk': 0, - u'protected': False, - u'id': fake_uuid('c'), - u'location': None, - u'checksum': u'd990432ef91afef3ad9dbf4a975d3365', - u'owner': "fake", - u'is_public': False, - u'deleted_at': None, - u'min_ram': 0, - u'size': 19} - - -NOTIFICATION_UPDATE = {"message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "images.example.com", - "event_type": "image.update", - "priority": "info", - "payload": IMAGE_META, - "timestamp": NOW} - - -NOTIFICATION_UPLOAD = {"message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "images.example.com", - "event_type": "image.upload", - "priority": "info", - "payload": IMAGE_META, - "timestamp": NOW} - - -NOTIFICATION_DELETE = {"message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "images.example.com", - "event_type": "image.delete", - "priority": "info", - "payload": IMAGE_META, - "timestamp": NOW} - - -class TestNotification(base.BaseTestCase): - - def _verify_common_counter(self, c, name, volume): - self.assertIsNotNone(c) - self.assertEqual(name, c.name) - self.assertEqual(fake_uuid('c'), c.resource_id) - self.assertEqual(NOW, c.timestamp) - self.assertEqual(volume, c.volume) - metadata = c.resource_metadata - self.assertEqual(u'images.example.com', metadata.get('host')) - - def test_image_download(self): - handler = notifications.ImageDownload(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_SEND)) - self.assertEqual(1, len(counters)) - download = counters[0] - self._verify_common_counter(download, 'image.download', 42) - self.assertEqual(fake_uuid('d'), download.user_id) - self.assertEqual(fake_uuid('b'), download.project_id) - self.assertEqual(sample.TYPE_DELTA, download.type) - - def test_image_serve(self): - handler = notifications.ImageServe(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_SEND)) - self.assertEqual(1, len(counters)) - serve = counters[0] - self._verify_common_counter(serve, 'image.serve', 42) - self.assertEqual(fake_uuid('e'), serve.project_id) - self.assertEqual(fake_uuid('d'), - serve.resource_metadata.get('receiver_user_id')) - self.assertEqual(fake_uuid('b'), - serve.resource_metadata.get('receiver_tenant_id')) - self.assertEqual(sample.TYPE_DELTA, serve.type) - - def test_image_crud_on_update(self): - handler = notifications.ImageCRUD(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPDATE)) - self.assertEqual(1, len(counters)) - update = counters[0] - self._verify_common_counter(update, 'image.update', 1) - self.assertEqual(sample.TYPE_DELTA, update.type) - - def test_image_on_update(self): - handler = notifications.Image(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPDATE)) - self.assertEqual(1, len(counters)) - update = counters[0] - self._verify_common_counter(update, 'image', 1) - self.assertEqual(sample.TYPE_GAUGE, update.type) - - def test_image_size_on_update(self): - handler = notifications.ImageSize(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPDATE)) - self.assertEqual(1, len(counters)) - update = counters[0] - self._verify_common_counter(update, 'image.size', - IMAGE_META['size']) - self.assertEqual(sample.TYPE_GAUGE, update.type) - - def test_image_crud_on_upload(self): - handler = notifications.ImageCRUD(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPLOAD)) - self.assertEqual(1, len(counters)) - upload = counters[0] - self._verify_common_counter(upload, 'image.upload', 1) - self.assertEqual(sample.TYPE_DELTA, upload.type) - - def test_image_on_upload(self): - handler = notifications.Image(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPLOAD)) - self.assertEqual(1, len(counters)) - upload = counters[0] - self._verify_common_counter(upload, 'image', 1) - self.assertEqual(sample.TYPE_GAUGE, upload.type) - - def test_image_size_on_upload(self): - handler = notifications.ImageSize(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_UPLOAD)) - self.assertEqual(1, len(counters)) - upload = counters[0] - self._verify_common_counter(upload, 'image.size', - IMAGE_META['size']) - self.assertEqual(sample.TYPE_GAUGE, upload.type) - - def test_image_crud_on_delete(self): - handler = notifications.ImageCRUD(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_DELETE)) - self.assertEqual(1, len(counters)) - delete = counters[0] - self._verify_common_counter(delete, 'image.delete', 1) - self.assertEqual(sample.TYPE_DELTA, delete.type) - - def test_image_on_delete(self): - handler = notifications.Image(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_DELETE)) - self.assertEqual(1, len(counters)) - delete = counters[0] - self._verify_common_counter(delete, 'image', 1) - self.assertEqual(sample.TYPE_GAUGE, delete.type) - - def test_image_size_on_delete(self): - handler = notifications.ImageSize(mock.Mock()) - counters = list(handler.process_notification(NOTIFICATION_DELETE)) - self.assertEqual(1, len(counters)) - delete = counters[0] - self._verify_common_counter(delete, 'image.size', - IMAGE_META['size']) - self.assertEqual(sample.TYPE_GAUGE, delete.type) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml --- ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,175 @@ +defaults: + request_headers: + x-auth-token: $ENVIRON['ADMIN_TOKEN'] + +tests: + - name: list alarms none + desc: Lists alarms, none yet exist + url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms + method: GET + response_strings: + - "[]" + + - name: list servers none + desc: List servers, none yet exists + url: $ENVIRON['NOVA_SERVICE_URL']/servers + method: GET + response_strings: + - "[]" + + - name: create stack + desc: Create an autoscaling stack + url: $ENVIRON['HEAT_SERVICE_URL']/stacks + method: POST + request_headers: + content-type: application/json + data: <@create_stack.json + status: 201 + + - name: waiting for stack creation + desc: Wait for the second event on the stack resource, it can be a success or failure + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test + redirects: true + method: GET + status: 200 + poll: + count: 300 + delay: 1 + response_json_paths: + $.events[1].resource_name: integration_test + + - name: control stack status + desc: Checks the stack have been created successfully + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test + redirects: true + method: GET + status: 200 + poll: + count: 5 + delay: 1 + response_json_paths: + $.stack.stack_status: "CREATE_COMPLETE" + + - name: list servers + desc: Wait the autoscaling stack grow to two servers + url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail + method: GET + poll: + count: 600 + delay: 1 + response_json_paths: + $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] + $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id'] + $.servers[0].status: ACTIVE + $.servers[1].status: ACTIVE + $.servers.`len`: 2 + + - name: check gnocchi resources + desc: Check the gnocchi resources for this two servers exists + url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance + method: GET + poll: + count: 30 + delay: 1 + response_strings: + - '"id": "$RESPONSE["$.servers[0].id"]"' + - '"id": "$RESPONSE["$.servers[1].id"]"' + + - name: check alarm + desc: Check the aodh alarm and its state + url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms + method: GET + poll: + count: 30 + delay: 1 + response_strings: + - "integration_test-cpu_alarm_high-" + response_json_paths: + $[0].state: alarm + + - name: get stack location for update + desc: Get the stack location + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test + method: GET + status: 302 + + - name: update stack + desc: Update an autoscaling stack + url: $LOCATION + method: PUT + request_headers: + content-type: application/json + data: <@update_stack.json + status: 202 + + - name: waiting for stack update + desc: Wait for the third event on the stack resource, it can be a success or failure + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test + redirects: true + method: GET + status: 200 + poll: + count: 300 + delay: 1 + response_json_paths: + $.events[3].resource_name: integration_test + + - name: control stack status + desc: Checks the stack have been created successfully + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test + redirects: true + method: GET + status: 200 + poll: + count: 5 + delay: 1 + response_json_paths: + $.stack.stack_status: "UPDATE_COMPLETE" + + - name: list servers + desc: Wait the autoscaling stack shrink to one server + url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail + method: GET + poll: + count: 600 + delay: 1 + response_json_paths: + $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] + $.servers[0].status: ACTIVE + $.servers.`len`: 1 + + - name: get stack location + desc: Get the stack location + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test + method: GET + status: 302 + + - name: delete stack + desc: Delete the stack + url: $LOCATION + method: DELETE + status: 204 + + - name: get deleted stack + desc: Check the stack have been deleted + url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test + redirects: true + method: GET + poll: + count: 240 + delay: 1 + status: 404 + + - name: list alarms deleted + desc: List alarms, no more exist + url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms + method: GET + response_strings: + - "[]" + + - name: list servers deleted + desc: List servers, no more exists + url: $ENVIRON['NOVA_SERVICE_URL']/servers + method: GET + response_strings: + - "[]" diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json --- ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,67 @@ +{ + "stack_name": "integration_test", + "template": { + "heat_template_version": "2013-05-23", + "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", + "resources": { + "asg": { + "type": "OS::Heat::AutoScalingGroup", + "properties": { + "min_size": 1, + "max_size": 2, + "resource": { + "type": "OS::Nova::Server", + "properties": { + "networks": [{ "network": "private" }], + "flavor": "m1.tiny", + "image": "$ENVIRON['GLANCE_IMAGE_NAME']", + "metadata": { + "metering.server_group": { "get_param": "OS::stack_id" } + }, + "user_data_format": "RAW", + "user_data": {"Fn::Join": ["", [ + "#!/bin/sh\n", + "echo 'Loading CPU'\n", + "set -v\n", + "cat /dev/urandom > /dev/null\n" + ]]} + } + } + } + }, + "web_server_scaleup_policy": { + "type": "OS::Heat::ScalingPolicy", + "properties": { + "adjustment_type": "change_in_capacity", + "auto_scaling_group_id": { "get_resource": "asg" }, + "cooldown": 2, + "scaling_adjustment": 1 + } + }, + "cpu_alarm_high": { + "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", + "properties": { + "description": "Scale-up if the mean CPU > 10% on 1 minute", + "metric": "cpu_util", + "aggregation_method": "mean", + "granularity": 60, + "evaluation_periods": 1, + "threshold": 10, + "comparison_operator": "gt", + "alarm_actions": [ + { "get_attr": [ "web_server_scaleup_policy", "alarm_url" ] } + ], + "resource_type": "instance", + "query": { + "str_replace": { + "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", + "params": { + "stack_id": { "get_param": "OS::stack_id" } + } + } + } + } + } + } + } +} diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json --- ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,66 @@ +{ + "template": { + "heat_template_version": "2013-05-23", + "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", + "resources": { + "asg": { + "type": "OS::Heat::AutoScalingGroup", + "properties": { + "min_size": 1, + "max_size": 2, + "resource": { + "type": "OS::Nova::Server", + "properties": { + "networks": [{ "network": "private" }], + "flavor": "m1.tiny", + "image": "$ENVIRON['GLANCE_IMAGE_NAME']", + "metadata": { + "metering.server_group": { "get_param": "OS::stack_id" } + }, + "user_data_format": "RAW", + "user_data": {"Fn::Join": ["", [ + "#!/bin/sh\n", + "echo 'Loading CPU'\n", + "set -v\n", + "cat /dev/urandom > /dev/null\n" + ]]} + } + } + } + }, + "web_server_scaledown_policy": { + "type": "OS::Heat::ScalingPolicy", + "properties": { + "adjustment_type": "change_in_capacity", + "auto_scaling_group_id": { "get_resource": "asg" }, + "cooldown": 2, + "scaling_adjustment": -1 + } + }, + "cpu_alarm_high": { + "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", + "properties": { + "description": "Scale-down if the mean CPU > 10% on 1 minute", + "metric": "cpu_util", + "aggregation_method": "mean", + "granularity": 60, + "evaluation_periods": 1, + "threshold": 10, + "comparison_operator": "gt", + "alarm_actions": [ + { "get_attr": [ "web_server_scaledown_policy", "alarm_url" ] } + ], + "resource_type": "instance", + "query": { + "str_replace": { + "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", + "params": { + "stack_id": { "get_param": "OS::stack_id" } + } + } + } + } + } + } + } +} diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/test_gabbi_live.py ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/test_gabbi_live.py --- ceilometer-5.0.0~b2/ceilometer/tests/integration/gabbi/test_gabbi_live.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/integration/gabbi/test_gabbi_live.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Copyright 2015 Red Hat. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A test module to exercise the Gnocchi API with gabbi.""" + +import os + +from gabbi import driver + + +TESTS_DIR = 'gabbits-live' + + +def load_tests(loader, tests, pattern): + """Provide a TestSuite to the discovery process.""" + NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL", + "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", + "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"] + + for env_variable in NEEDED_ENV: + if not os.getenv(env_variable): + if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): + raise RuntimeError('%s is not set' % env_variable) + else: + return + + test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) + return driver.build_tests(test_dir, loader, host="localhost", port=8041) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/integration/hooks/post_test_hook.sh ceilometer-5.0.0~b3/ceilometer/tests/integration/hooks/post_test_hook.sh --- ceilometer-5.0.0~b2/ceilometer/tests/integration/hooks/post_test_hook.sh 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/integration/hooks/post_test_hook.sh 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,61 @@ +#!/bin/bash -xe + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is executed inside post_test_hook function in devstack gate. + +function generate_testr_results { + if [ -f .testrepository/0 ]; then + sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit + sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit + sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html + sudo gzip -9 $BASE/logs/testrepository.subunit + sudo gzip -9 $BASE/logs/testr_results.html + sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz + sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz + fi +} + +# If we're running in the gate find our keystone endpoint to give to +# gabbi tests and do a chown. Otherwise the existing environment +# should provide URL and TOKEN. +if [ -d $BASE/new/devstack ]; then + export CEILOMETER_DIR="$BASE/new/ceilometer" + STACK_USER=stack + sudo chown -R $STACK_USER:stack $CEILOMETER_DIR + source $BASE/new/devstack/openrc admin admin + # Go to the ceilometer dir + cd $CEILOMETER_DIR +fi + +openstack catalog list +export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/publicURL/{print $2}') +export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/publicURL/{print $2}') +export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/publicURL/{print $2}') +export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/publicURL/{print $2}') +export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.*uec /{print $4}') +export ADMIN_TOKEN=$(openstack token issue -c id -f value) + +# Run tests +echo "Running telemetry integration test suite" +set +e + +sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration +EXIT_CODE=$? +set -e + +# Collect and parse result +if [ -n "$CEILOMETER_DIR" ]; then + generate_testr_results +fi +exit $EXIT_CODE diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/notifications/ipmi_test_data.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/notifications/ipmi_test_data.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/notifications/ipmi_test_data.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/notifications/ipmi_test_data.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,795 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Sample data for test_ipmi. - -This data is provided as a sample of the data expected from the ipmitool -driver in the Ironic project, which is the publisher of the notifications -being tested. -""" - - -TEMPERATURE_DATA = { - 'DIMM GH VR Temp (0x3b)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.6 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM GH VR Temp (0x3b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU1 VR Temp (0x36)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '32 (+/- 0.500) degrees C', - 'Entity ID': '20.1 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU1 VR Temp (0x36)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM EF VR Temp (0x3a)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.5 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM EF VR Temp (0x3a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU2 VR Temp (0x37)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '31 (+/- 0.500) degrees C', - 'Entity ID': '20.2 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU2 VR Temp (0x37)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'Ambient Temp (0x32)': { - 'Status': 'ok', - 'Sensor Reading': '25 (+/- 0) degrees C', - 'Entity ID': '12.1 (Front Panel Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Upper non-critical': '43.000', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Upper non-recoverable': '50.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '46.000', - 'Sensor ID': 'Ambient Temp (0x32)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'Mezz Card Temp (0x35)': { - 'Status': 'Disabled', - 'Sensor Reading': 'Disabled', - 'Entity ID': '44.1 (I/O Module)', - 'Event Message Control': 'Per-threshold', - 'Upper non-critical': '70.000', - 'Upper non-recoverable': '85.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'Mezz Card Temp (0x35)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'PCH Temp (0x3c)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '46 (+/- 0.500) degrees C', - 'Entity ID': '45.1 (Processor/IO Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '93.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '103.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '98.000', - 'Sensor ID': 'PCH Temp (0x3c)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM CD VR Temp (0x39)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '27 (+/- 0.500) degrees C', - 'Entity ID': '20.4 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM CD VR Temp (0x39)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 2 Temp (0x34)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '30 (+/- 0) degrees C', - 'Entity ID': '16.2 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 2 Temp (0x34)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM AB VR Temp (0x38)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '28 (+/- 0.500) degrees C', - 'Entity ID': '20.3 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM AB VR Temp (0x38)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '38 (+/- 0) degrees C', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, -} - - -CURRENT_DATA = { - 'Avg Power (0x2e)': { - 'Status': 'ok', - 'Sensor Reading': '130 (+/- 0) Watts', - 'Entity ID': '21.0 (Power Management)', - 'Assertions Enabled': '', - 'Event Message Control': 'Per-threshold', - 'Readable Thresholds': 'No Thresholds', - 'Positive Hysteresis': 'Unspecified', - 'Sensor Type (Analog)': 'Current', - 'Negative Hysteresis': 'Unspecified', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Avg Power (0x2e)', - 'Assertion Events': '', - 'Minimum sensor range': '2550.000', - 'Settable Thresholds': 'No Thresholds' - } -} - - -FAN_DATA = { - 'Fan 4A Tach (0x46)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4A Tach (0x46)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 5A Tach (0x48)': { - 'Status': 'ok', - 'Sensor Reading': '7140 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5A Tach (0x48)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3A Tach (0x44)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3A Tach (0x44)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 1A Tach (0x40)': { - 'Status': 'ok', - 'Sensor Reading': '6960 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1A Tach (0x40)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3B Tach (0x45)': { - 'Status': 'ok', - 'Sensor Reading': '7104 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3B Tach (0x45)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2A Tach (0x42)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2A Tach (0x42)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 4B Tach (0x47)': { - 'Status': 'ok', - 'Sensor Reading': '7488 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4B Tach (0x47)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2B Tach (0x43)': { - 'Status': 'ok', - 'Sensor Reading': '7168 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2B Tach (0x43)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 5B Tach (0x49)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5B Tach (0x49)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 1B Tach (0x41)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1B Tach (0x41)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6B Tach (0x4b)': { - 'Status': 'ok', - 'Sensor Reading': '7616 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6B Tach (0x4b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6A Tach (0x4a)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6A Tach (0x4a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - } -} - - -VOLTAGE_DATA = { - 'Planar 12V (0x18)': { - 'Status': 'ok', - 'Sensor Reading': '12.312 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.108', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '10.692', - 'Negative Hysteresis': '0.108', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '13.446', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 12V (0x18)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '12.042' - }, - 'Planar 3.3V (0x16)': { - 'Status': 'ok', - 'Sensor Reading': '3.309 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.028', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '3.039', - 'Negative Hysteresis': '0.028', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '3.564', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 3.3V (0x16)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.309' - }, - 'Planar VBAT (0x1c)': { - 'Status': 'ok', - 'Sensor Reading': '3.137 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lnc- lcr-', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Readable Thresholds': 'lcr lnc', - 'Positive Hysteresis': '0.025', - 'Deassertions Enabled': 'lnc- lcr-', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '2.095', - 'Negative Hysteresis': '0.025', - 'Lower non-critical': '2.248', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Planar VBAT (0x1c)', - 'Settable Thresholds': 'lcr lnc', - 'Threshold Read Mask': 'lcr lnc', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.010' - }, - 'Planar 5V (0x17)': { - 'Status': 'ok', - 'Sensor Reading': '5.062 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.045', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '4.475', - 'Negative Hysteresis': '0.045', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '5.582', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 5V (0x17)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4.995' - } -} - - -SENSOR_DATA = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': TEMPERATURE_DATA, - 'Current': CURRENT_DATA, - 'Fan': FAN_DATA, - 'Voltage': VOLTAGE_DATA - } - } -} - - -EMPTY_PAYLOAD = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - } - } -} - - -MISSING_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -BAD_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': 'some bad stuff', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -NO_SENSOR_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - }, - } - } - } -} - - -NO_NODE_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - }, - } - } - } -} diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/notifications/test_ironic.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/notifications/test_ironic.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/notifications/test_ironic.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/notifications/test_ironic.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for producing IPMI sample messages from notification events. -""" - -import mock -from oslotest import base - -from ceilometer.ipmi.notifications import ironic as ipmi -from ceilometer import sample -from ceilometer.tests.ipmi.notifications import ipmi_test_data - - -class TestNotifications(base.BaseTestCase): - - def test_ipmi_temperature_notification(self): - """Test IPMI Temperature sensor data. - - Based on the above ipmi_testdata the expected sample for a single - temperature reading has:: - - * a resource_id composed from the node_uuid Sensor ID - * a name composed from 'hardware.ipmi.' and 'temperature' - * a volume from the first chunk of the Sensor Reading - * a unit from the last chunk of the Sensor Reading - * some readings are skipped if the value is 'Disabled' - * metatata with the node id - """ - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' - ) - test_counter = counters[resource_id] - self.assertEqual(26.0, test_counter.volume) - self.assertEqual('C', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.temperature', test_counter.name) - self.assertEqual('hardware.ipmi.metrics.update', - test_counter.resource_metadata['event_type']) - self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - test_counter.resource_metadata['node']) - - def test_ipmi_current_notification(self): - """Test IPMI Current sensor data. - - A single current reading is effectively the same as temperature, - modulo "current". - """ - processor = ipmi.CurrentSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(1, len(counters), 'expected 1 current reading') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' - ) - test_counter = counters[resource_id] - self.assertEqual(130.0, test_counter.volume) - self.assertEqual('W', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.current', test_counter.name) - - def test_ipmi_fan_notification(self): - """Test IPMI Fan sensor data. - - A single fan reading is effectively the same as temperature, - modulo "fan". - """ - processor = ipmi.FanSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(12, len(counters), 'expected 12 fan readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' - ) - test_counter = counters[resource_id] - self.assertEqual(6900.0, test_counter.volume) - self.assertEqual('RPM', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.fan', test_counter.name) - - def test_ipmi_voltage_notification(self): - """Test IPMI Voltage sensor data. - - A single voltage reading is effectively the same as temperature, - modulo "voltage". - """ - processor = ipmi.VoltageSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(4, len(counters), 'expected 4 volate readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' - ) - test_counter = counters[resource_id] - self.assertEqual(3.137, test_counter.volume) - self.assertEqual('V', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.voltage', test_counter.name) - - def test_disabed_skips_metric(self): - """Test that a meter which a disabled volume is skipped.""" - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' - ) - - self.assertNotIn(resource_id, counters) - - def test_empty_payload_no_metrics_success(self): - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.EMPTY_PAYLOAD)]) - - self.assertEqual(0, len(counters), 'expected 0 readings') - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_data(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warn = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - "missing 'Sensor Reading' in payload", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_sensor_data_malformed(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warn = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - 'unable to parse sensor reading: some bad stuff', - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_node_uuid(self, mylog): - """Test for desired error message when 'node_uuid' missing. - - Presumably this will never happen given the way the data - is created, but better defensive than dead. - """ - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warn = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'node_uuid'", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_id(self, mylog): - """Test for desired error message when 'Sensor ID' missing.""" - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warn = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'Sensor ID'", - messages[0] - ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/fake_utils.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/fake_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/fake_utils.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/fake_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import binascii - -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.ipmi.platform import ipmitool_test_data as test_data - - -def get_sensor_status_init(parameter=''): - return (' 01\n', '') - - -def get_sensor_status_uninit(parameter=''): - return (' 00\n', '') - - -def init_sensor_agent(parameter=''): - return (' 00\n', '') - - -def get_nm_version_v2(parameter=''): - return test_data.nm_version_v2 - - -def get_nm_version_v3(parameter=''): - return test_data.nm_version_v3 - - -def sdr_dump(data_file=''): - if data_file == '': - raise ValueError("No file specified for ipmitool sdr dump") - fake_slave_address = '2c' - fake_channel = '60' - hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel - data = binascii.unhexlify(hexstr) - with open(data_file, 'wb') as bin_fp: - bin_fp.write(data) - - return ('', '') - - -def _execute(funcs, *cmd, **kwargs): - - datas = { - test_data.device_id_cmd: test_data.device_id, - test_data.nm_device_id_cmd: test_data.nm_device_id, - test_data.get_power_cmd: test_data.power_data, - test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, - test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, - test_data.get_airflow_cmd: test_data.airflow_data, - test_data.get_cups_index_cmd: test_data.cups_index_data, - test_data.get_cups_util_cmd: test_data.cups_util_data, - test_data.sdr_info_cmd: test_data.sdr_info, - test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, - test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, - test_data.read_sensor_current_cmd: test_data.sensor_current, - test_data.read_sensor_fan_cmd: test_data.sensor_fan, - } - - if cmd[1] == 'sdr' and cmd[2] == 'dump': - # ipmitool sdr dump /tmp/XXXX - cmd_str = "".join(cmd[:3]) - par_str = cmd[3] - else: - cmd_str = "".join(cmd) - par_str = '' - - try: - return datas[cmd_str] - except KeyError: - return funcs[cmd_str](par_str) - - -def execute_with_nm_v3(*cmd, **kwargs): - """test version of execute on Node Manager V3.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v3} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_with_nm_v2(*cmd, **kwargs): - """test version of execute on Node Manager V2.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v2} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_nm(*cmd, **kwargs): - """test version of execute on Non-Node Manager platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_ipmi(*cmd, **kwargs): - raise nmexcept.IPMIException diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/ipmitool_test_data.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/ipmitool_test_data.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/ipmitool_test_data.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/ipmitool_test_data.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,383 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Sample data for test_intel_node_manager and test_ipmi_sensor. - -This data is provided as a sample of the data expected from the ipmitool -binary, which produce Node Manager/IPMI raw data -""" - -sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Temperature - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB P1 VR Temp (0x20) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 25 (+/- 0) degrees C - Status : ok - Nominal Reading : 58.000 - Normal Minimum : 10.000 - Normal Maximum : 105.000 - Upper critical : 115.000 - Upper non-critical : 110.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : Front Panel Temp (0x21) - Entity ID : 12.1 (Front Panel Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 23 (+/- 0) degrees C - Status : ok - Nominal Reading : 28.000 - Normal Minimum : 10.000 - Normal Maximum : 45.000 - Upper critical : 55.000 - Upper non-critical : 50.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : SSB Temp (0x22) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 43 (+/- 0) degrees C - Status : ok - Nominal Reading : 52.000 - Normal Minimum : 10.000 - Normal Maximum : 93.000 - Upper critical : 103.000 - Upper non-critical : 98.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Voltage - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB +12.0V (0xd0) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 11.831 (+/- 0) Volts - Status : ok - Nominal Reading : 11.935 - Normal Minimum : 11.363 - Normal Maximum : 12.559 - Upper critical : 13.391 - Upper non-critical : 13.027 - Lower critical : 10.635 - Lower non-critical : 10.947 - Positive Hysteresis : 0.052 - Negative Hysteresis : 0.052 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +1.35 P1LV AB (0xe4) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : Disabled - Status : Disabled - Nominal Reading : 1.342 - Normal Minimum : 1.275 - Normal Maximum : 1.409 - Upper critical : 1.488 - Upper non-critical : 1.445 - Lower critical : 1.201 - Lower non-critical : 1.244 - Positive Hysteresis : 0.006 - Negative Hysteresis : 0.006 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Event Status : Unavailable - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +5.0V (0xd1) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 4.959 (+/- 0) Volts - Status : ok - Nominal Reading : 4.981 - Normal Minimum : 4.742 - Normal Maximum : 5.241 - Upper critical : 5.566 - Upper non-critical : 5.415 - Lower critical : 4.416 - Lower non-critical : 4.546 - Positive Hysteresis : 0.022 - Negative Hysteresis : 0.022 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) - Entity ID : 10.1 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 11 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -Sensor ID : PS2 Curr Out % (0x59) - Entity ID : 10.2 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 0 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -""" - -sensor_fan_data = """Sensor ID : System Fan 1 (0x30) - Entity ID : 29.1 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 2 (0x32) - Entity ID : 29.2 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 3 (0x34) - Entity ID : 29.3 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 4 (0x36) - Entity ID : 29.4 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4606 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -""" - - -sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' -init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' -sdr_dump_cmd = 'ipmitoolsdrdump' -sdr_info_cmd = 'ipmitoolsdrinfo' - -read_sensor_all_cmd = 'ipmitoolsdr-v' -read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' -read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' -read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' -read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' - -device_id_cmd = 'ipmitoolraw0x060x01' -nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' -nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' -get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' -get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' -get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' -get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' -get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' -get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' - - -device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') -nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') - -nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') -nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') - -# start from byte 3, get cur- 57 00(87), min- 03 00(3) -# max- 37 02(567), avg- 5c 00(92) -power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' - ' 9b 12 01 50\n', '') - -# start from byte 3, get cur- 17 00(23), min- 16 00(22) -# max- 18 00(24), avg- 17 00(23) -inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- 19 00(25), min- 18 00(24) -# max- 1b 00(27), avg- 19 00(25) -outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- be 00(190), min- 96 00(150) -# max- 26 02(550), avg- cb 00(203) -airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' - ' b7 02 00 50\n', '') - -# start from byte 3, cups index 2e 00 (46) -cups_index_data = (' 57 01 00 2e 00\n', '') - -# start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) -# io_util - 00 00 ...(0) -cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' - ' 00 00 00 00 00 00 00 00 00 00 00\n', '') - -sdr_info = ('', '') - -sensor_temperature = (sensor_temperature_data, '') -sensor_voltage = (sensor_voltage_data, '') -sensor_current = (sensor_current_data, '') -sensor_fan = (sensor_fan_data, '') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/test_intel_node_manager.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/test_intel_node_manager.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/test_intel_node_manager.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/test_intel_node_manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import mock -import six - -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.ipmi.platform import fake_utils -from ceilometer import utils - -from oslotest import base - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractmethod - def init_test_engine(self): - """Prepare specific ipmitool as engine for different NM version.""" - - def setUp(self): - super(_Base, self).setUp() - self.init_test_engine() - self.nm = node_manager.NodeManager() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - node_manager.NodeManager()._inited = False - super(_Base, cls).tearDownClass() - - -class TestNodeManagerV3(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - avg_val = node_manager._hex(airflow["Average_value"]) - max_val = node_manager._hex(airflow["Maximum_value"]) - min_val = node_manager._hex(airflow["Minimum_value"]) - cur_val = node_manager._hex(airflow["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(190, cur_val) - self.assertEqual(150, min_val) - self.assertEqual(550, max_val) - self.assertEqual(203, avg_val) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(25, cur_val) - self.assertEqual(24, min_val) - self.assertEqual(27, max_val) - self.assertEqual(25, avg_val) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) - mem_util = node_manager._hex(cups_util["Mem_Utilization"]) - io_util = node_manager._hex(cups_util["IO_Utilization"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(51, cpu_util) - self.assertEqual(5, mem_util) - self.assertEqual(0, io_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - index = node_manager._hex(cups_index["CUPS_Index"]) - self.assertEqual(46, index) - - -class TestNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - - def test_read_power_all(self): - power = self.nm.read_power_all() - - avg_val = node_manager._hex(power["Average_value"]) - max_val = node_manager._hex(power["Maximum_value"]) - min_val = node_manager._hex(power["Minimum_value"]) - cur_val = node_manager._hex(power["Current_value"]) - - # get NM 2.0 - self.assertEqual(3, self.nm.nm_version) - # see ipmi_test_data.py for raw data - self.assertEqual(87, cur_val) - self.assertEqual(3, min_val) - self.assertEqual(567, max_val) - self.assertEqual(92, avg_val) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(23, cur_val) - self.assertEqual(22, min_val) - self.assertEqual(24, max_val) - self.assertEqual(23, avg_val) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - self.assertEqual({}, airflow) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - self.assertEqual({}, temperature) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - self.assertEqual({}, cups_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - self.assertEqual({}, cups_index) - - -class TestNonNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) - - def test_read_power_all(self): - # no NM support - self.assertEqual(0, self.nm.nm_version) - power = self.nm.read_power_all() - - # Non-Node Manager platform return empty data - self.assertEqual({}, power) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - # Non-Node Manager platform return empty data - self.assertEqual({}, temperature) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/test_ipmi_sensor.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/test_ipmi_sensor.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/platform/test_ipmi_sensor.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/platform/test_ipmi_sensor.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base - -from ceilometer.ipmi.platform import ipmi_sensor -from ceilometer.tests.ipmi.platform import fake_utils -from ceilometer import utils - - -class TestIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertTrue(self.ipmi.ipmi_support) - # only temperature data returned. - self.assertIn('Temperature', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Temperature'])) - sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] - self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # only voltage data returned. - self.assertIn('Voltage', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Voltage'])) - sensor = sensors['Voltage']['BB +5.0V (0xd1)'] - self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # only Current data returned. - self.assertIn('Current', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(2, len(sensors['Current'])) - sensor = sensors['Current']['PS1 Curr Out % (0x58)'] - self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # only Fan data returned. - self.assertIn('Fan', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(4, len(sensors['Fan'])) - sensor = sensors['Fan']['System Fan 2 (0x32)'] - self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) - - -class TestNonIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestNonIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestNonIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertFalse(self.ipmi.ipmi_support) - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import mock -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.tests import base - - -@six.add_metaclass(abc.ABCMeta) -class TestPollsterBase(base.BaseTestCase): - - def fake_data(self): - """Fake data used for test.""" - return None - - def fake_sensor_data(self, sensor_type): - """Fake sensor data used for test.""" - return None - - @abc.abstractmethod - def make_pollster(self): - """Produce right pollster for test.""" - - def _test_get_samples(self): - nm = mock.Mock() - nm.read_inlet_temperature.side_effect = self.fake_data - nm.read_outlet_temperature.side_effect = self.fake_data - nm.read_power_all.side_effect = self.fake_data - nm.read_airflow.side_effect = self.fake_data - nm.read_cups_index.side_effect = self.fake_data - nm.read_cups_utilization.side_effect = self.fake_data - nm.read_sensor_any.side_effect = self.fake_sensor_data - # We should mock the pollster first before initialize the Manager - # so that we don't trigger the sudo in pollsters' __init__(). - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', - return_value=nm)) - - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', - return_value=nm)) - - self.mgr = manager.AgentManager(['ipmi']) - - self.pollster = self.make_pollster() - - def _verify_metering(self, length, expected_vol=None, node=None): - cache = {} - resources = ['local_host'] - - samples = list(self.pollster.get_samples(self.mgr, cache, resources)) - self.assertEqual(length, len(samples)) - - if expected_vol: - self.assertTrue(any(s.volume == expected_vol for s in samples)) - if node: - self.assertTrue(any(s.resource_metadata['node'] == node - for s in samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/test_node.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/test_node.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/test_node.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/test_node.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,162 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import node -from ceilometer.tests.ipmi.pollsters import base - - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - - -class TestPowerPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['13', '00']} - - def make_pollster(self): - return node.PowerPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 19(0x13 as current_value) - self._verify_metering(1, 19, CONF.host) - - -class TestInletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['23', '00']} - - def make_pollster(self): - return node.InletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 35(0x23 as current_value) - self._verify_metering(1, 35, CONF.host) - - -class TestOutletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['25', '00']} - - def make_pollster(self): - return node.OutletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 37(0x25 as current_value) - self._verify_metering(1, 37, CONF.host) - - -class TestAirflowPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['be', '00']} - - def make_pollster(self): - return node.AirflowPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe as current_value) - self._verify_metering(1, 190, CONF.host) - - -class TestCUPSIndexPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CUPS_Index": ['2e', '00']} - - def make_pollster(self): - return node.CUPSIndexPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 46, CONF.host) - - -class CPUUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CPU_Utilization": - ['33', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.CPUUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 51, CONF.host) - - -class MemUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Mem_Utilization": - ['05', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.MemUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 5(0x05) - self._verify_metering(1, 5, CONF.host) - - -class IOUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"IO_Utilization": - ['00', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.IOUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 0(0x00) - self._verify_metering(1, 0, CONF.host) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/test_sensor.py ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/test_sensor.py --- ceilometer-5.0.0~b2/ceilometer/tests/ipmi/pollsters/test_sensor.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/ipmi/pollsters/test_sensor.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,146 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import sensor -from ceilometer.tests.ipmi.notifications import ipmi_test_data -from ceilometer.tests.ipmi.pollsters import base - - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - -TEMPERATURE_SENSOR_DATA = { - 'Temperature': ipmi_test_data.TEMPERATURE_DATA -} - -CURRENT_SENSOR_DATA = { - 'Current': ipmi_test_data.CURRENT_DATA -} - -FAN_SENSOR_DATA = { - 'Fan': ipmi_test_data.FAN_DATA -} - -VOLTAGE_SENSOR_DATA = { - 'Voltage': ipmi_test_data.VOLTAGE_DATA -} - -MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] -MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] -MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] - - -class TestTemperatureSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return TEMPERATURE_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(10, float(32), CONF.host) - - -class TestMissingSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMalformedSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MALFORMED_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMissingSensorId(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_ID_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestFanSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return FAN_SENSOR_DATA - - def make_pollster(self): - return sensor.FanSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(12, float(7140), CONF.host) - - -class TestCurrentSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return CURRENT_SENSOR_DATA - - def make_pollster(self): - return sensor.CurrentSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(1, float(130), CONF.host) - - -class TestVoltageSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return VOLTAGE_SENSOR_DATA - - def make_pollster(self): - return sensor.VoltageSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(4, float(3.309), CONF.host) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/key_value_storage/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/key_value_storage/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/key_value_storage/test_notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/key_value_storage/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock - -from oslo_config import fixture as fixture_config - -from ceilometer.meter import notifications -from ceilometer import sample -from ceilometer.tests import base as test - - -def fake_uuid(x): - return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12) - - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - - -TABLE_CREATE_PAYLOAD = { - u'table_uuid': fake_uuid('r'), - u'index_count': 2, - u'table_name': u'email_data' - } - -TABLE_DELETE_PAYLOAD = { - u'table_uuid': fake_uuid('r'), - u'table_name': u'email_data' - } - -NOTIFICATION_TABLE_CREATE = { - u'_context_request_id': u'req-d6e9b7ec-976f-443f-ba6e-e2b89b18aa75', - u'_context_tenant': fake_uuid('t'), - u'_context_user': fake_uuid('u'), - u'_context_auth_token': u'', - u'_context_show_deleted': False, - u'_context_is_admin': u'False', - u'_context_read_only': False, - 'payload': TABLE_CREATE_PAYLOAD, - 'publisher_id': u'magnetodb.winterfell.com', - 'message_id': u'3d71fb8a-f1d7-4a4e-b29f-7a711a761ba1', - 'event_type': u'magnetodb.table.create.end', - 'timestamp': NOW, - 'priority': 'info' - } - -NOTIFICATION_TABLE_DELETE = { - u'_context_request_id': u'req-d6e9b7ec-976f-443f-ba6e-e2b89b18aa75', - u'_context_tenant': fake_uuid('t'), - u'_context_user': fake_uuid('u'), - u'_context_auth_token': u'', - u'_context_show_deleted': False, - u'_context_is_admin': u'False', - u'_context_read_only': False, - 'payload': TABLE_DELETE_PAYLOAD, - 'publisher_id': u'magnetodb.winterfell.com', - 'message_id': u'4c8f5940-3c90-41af-ac16-f0e3055a305d', - 'event_type': u'magnetodb.table.delete.end', - 'timestamp': NOW, - 'priority': 'info' - } - - -class TestNotification(test.BaseTestCase): - - def setUp(self): - super(TestNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'meter_definitions_cfg_file', - self.path_get('etc/ceilometer/meters.yaml'), group='meter') - self.handler = notifications.ProcessMeterNotifications(mock.Mock()) - - def _verify_common_counter(self, c, name, volume): - self.assertIsNotNone(c) - self.assertEqual(name, c.name) - self.assertEqual(fake_uuid('r'), c.resource_id) - self.assertEqual(NOW, c.timestamp) - self.assertEqual(volume, c.volume) - metadata = c.resource_metadata - self.assertEqual(u'magnetodb.winterfell.com', metadata.get('host')) - - def test_create_table(self): - counters = list(self.handler.process_notification( - NOTIFICATION_TABLE_CREATE)) - self.assertEqual(2, len(counters)) - table = [item for item in counters - if item.name == "magnetodb.table.create"][0] - self._verify_common_counter(table, 'magnetodb.table.create', 1) - self.assertEqual(fake_uuid('u'), table.user_id) - self.assertEqual(fake_uuid('t'), table.project_id) - self.assertEqual(sample.TYPE_GAUGE, table.type) - - def test_delete_table(self): - counters = list(self.handler.process_notification( - NOTIFICATION_TABLE_DELETE)) - self.assertEqual(1, len(counters)) - table = counters[0] - self._verify_common_counter(table, 'magnetodb.table.delete', 1) - self.assertEqual(fake_uuid('u'), table.user_id) - self.assertEqual(fake_uuid('t'), table.project_id) - self.assertEqual(sample.TYPE_GAUGE, table.type) - - def test_index_count(self): - counters = list(self.handler.process_notification( - NOTIFICATION_TABLE_CREATE)) - self.assertEqual(2, len(counters)) - table = [item for item in counters - if item.name == "magnetodb.table.index.count"][0] - self._verify_common_counter(table, 'magnetodb.table.index.count', 2) - self.assertEqual(fake_uuid('u'), table.user_id) - self.assertEqual(fake_uuid('t'), table.project_id) - self.assertEqual(sample.TYPE_GAUGE, table.type) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/meter/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/meter/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/meter/test_notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/meter/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.meter.notifications -""" -import mock -import six -import yaml - -from oslo_config import fixture as fixture_config -from oslo_utils import fileutils - -from ceilometer.meter import notifications -from ceilometer.tests import base as test - -NOTIFICATION = { - 'event_type': u'test.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - }, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', - 'publisher_id': "foo123" -} - - -class TestMeterDefinition(test.BaseTestCase): - - def test_config_definition(self): - cfg = dict(name="test", - event_type="test.create", - type="delta", - unit="B", - volume="payload.volume", - resource_id="payload.resource_id", - project_id="payload.project_id") - handler = notifications.MeterDefinition(cfg) - self.assertTrue(handler.match_type("test.create")) - self.assertEqual(1, handler.parse_fields("volume", NOTIFICATION)) - self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", - handler.parse_fields("resource_id", NOTIFICATION)) - self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", - handler.parse_fields("project_id", NOTIFICATION)) - - def test_config_missing_fields(self): - cfg = dict(name="test", type="delta") - handler = notifications.MeterDefinition(cfg) - try: - handler.match_type("test.create") - except notifications.MeterDefinitionException as e: - self.assertEqual("Required field event_type not specified", - e.message) - - def test_bad_type_cfg_definition(self): - cfg = dict(name="test", type="foo") - try: - notifications.MeterDefinition(cfg) - except notifications.MeterDefinitionException as e: - self.assertEqual("Invalid type foo specified", e.message) - - -class TestMeterProcessing(test.BaseTestCase): - - def setUp(self): - super(TestMeterProcessing, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'meter_definitions_cfg_file', - self.path_get('etc/ceilometer/meters.yaml'), group='meter') - self.handler = notifications.ProcessMeterNotifications(mock.Mock()) - - def __setup_meter_def_file(self, cfg): - if six.PY3: - cfg = cfg.encode('utf-8') - meter_cfg_file = fileutils.write_to_tempfile(content=cfg, - prefix="meters", - suffix="yaml") - self.CONF.set_override( - 'meter_definitions_cfg_file', - meter_cfg_file, group='meter') - cfg = notifications.setup_meters_config() - return cfg - - def test_multiple_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="payload.volume", - resource_id="payload.resource_id", - project_id="payload.project_id"), - dict(name="test2", - event_type="test.create", - type="delta", - unit="B", - volume="payload.volume", - resource_id="payload.resource_id", - project_id="payload.project_id")]}) - self.handler.definitions = notifications.load_definitions( - self.__setup_meter_def_file(cfg)) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(2, len(c)) - - def test_unmatched_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.update", - type="delta", - unit="B", - volume="payload.volume", - resource_id="payload.resource_id", - project_id="payload.project_id")]}) - self.handler.definitions = notifications.load_definitions( - self.__setup_meter_def_file(cfg)) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(0, len(c)) - - def test_regex_match_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="payload.volume", - resource_id="payload.resource_id", - project_id="payload.project_id")]}) - self.handler.definitions = notifications.load_definitions( - self.__setup_meter_def_file(cfg)) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_fwaas.py ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_fwaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_fwaas.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_fwaas.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_context import context -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import fwaas - - -class _BaseTestFWPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestFWPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.context = context.get_admin_context() - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - plugin_base._get_keystone.service_catalog.get_endpoints = ( - mock.MagicMock(return_value={'network': mock.ANY})) - - -class TestFirewallPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestFirewallPollster, self).setUp() - self.pollster = fwaas.FirewallPollster() - fake_fw = self.fake_get_fw_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'firewall_get_all', - return_value=fake_fw)) - - @staticmethod - def fake_get_fw_service(): - return [{'status': 'ACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'INACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'PENDING_CREATE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'error', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - ] - - def test_fw_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(set(['network.services.firewall']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_fws = discovery.FirewallDiscovery().discover(self.manager) - self.assertEqual(3, len(discovered_fws)) - - for vpn in self.fake_get_fw_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_fws) - else: - self.assertIn(vpn, discovered_fws) - - -class TestIPSecConnectionsPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = fwaas.FirewallPolicyPollster() - fake_fw_policy = self.fake_get_fw_policy() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'fw_policy_get_all', - return_value=fake_fw_policy)) - - @staticmethod - def fake_get_fw_policy(): - return [{'name': 'my_fw_policy', - 'description': 'fw_policy', - 'admin_state_up': True, - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'firewall_rules': [{'enabled': True, - 'action': 'allow', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '80', - 'source_ip_address': '10.24.4.2'}, - {'enabled': True, - 'action': 'deny', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '22'}], - 'shared': True, - 'audited': True, - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_policy_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_policy()[0][field], - samples[0].resource_metadata[field]) - - def test_get_policy_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(set(['network.services.firewall.policy']), - set([s.name for s in samples])) - - def test_fw_policy_discovery(self): - discovered_policy = discovery.FirewallPolicyDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_policy)) - self.assertEqual(self.fake_get_fw_policy(), discovered_policy) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_lbaas.py ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_lbaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_lbaas.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_lbaas.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,502 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_context import context -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import lbaas - - -class _BaseTestLBPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestLBPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.context = context.get_admin_context() - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - plugin_base._get_keystone.service_catalog.get_endpoints = ( - mock.MagicMock(return_value={'network': mock.ANY})) - - -class TestLBPoolPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBPoolPollster, self).setUp() - self.pollster = lbaas.LBPoolPollster() - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'INACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb02', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'PENDING_CREATE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'UNKNOWN', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'error', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb_error', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - def test_pool_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_pools()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_pool_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(set(['network.services.lb.pool']), - set([s.name for s in samples])) - - def test_pool_discovery(self): - discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_pools)) - for pool in self.fake_get_pools(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_pools) - else: - self.assertIn(pool, discovered_pools) - - -class TestLBVipPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBVipPollster, self).setUp() - self.pollster = lbaas.LBVipPollster() - fake_vips = self.fake_get_vips() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vip_get_all', - return_value=fake_vips)) - - @staticmethod - def fake_get_vips(): - return [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - {'status': 'INACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.3', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip02'}, - {'status': 'PENDING_CREATE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.4', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'UNKNOWN', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'error', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip_error'}, - ] - - def test_vip_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vips()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vip_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(set(['network.services.lb.vip']), - set([s.name for s in samples])) - - def test_vip_discovery(self): - discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_vips)) - for pool in self.fake_get_vips(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_vips) - else: - self.assertIn(pool, discovered_vips) - - -class TestLBMemberPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBMemberPollster, self).setUp() - self.pollster = lbaas.LBMemberPollster() - fake_members = self.fake_get_members() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'member_get_all', - return_value=fake_members)) - - @staticmethod - def fake_get_members(): - return [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'INACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.5', - 'status_description': None, - 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'PENDING_CREATE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'UNKNOWN', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'error', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_members()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(set(['network.services.lb.member']), - set([s.name for s in samples])) - - def test_members_discovery(self): - discovered_members = discovery.LBMembersDiscovery().discover( - self.manager) - self.assertEqual(4, len(discovered_members)) - for pool in self.fake_get_members(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_members) - else: - self.assertIn(pool, discovered_members) - - -class TestLBHealthProbePollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBHealthProbePollster, self).setUp() - self.pollster = lbaas.LBHealthMonitorPollster() - fake_health_monitor = self.fake_get_health_monitor() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'health_monitor_get_all', - return_value=fake_health_monitor)) - - @staticmethod - def fake_get_health_monitor(): - return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_health_monitor()[0][field], - samples[0].resource_metadata[field]) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(set(['network.services.lb.health_monitor']), - set([s.name for s in samples])) - - def test_probes_discovery(self): - discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( - self.manager) - self.assertEqual(discovered_probes, self.fake_get_health_monitor()) - - -class TestLBStatsPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBStatsPollster, self).setUp() - fake_pool_stats = self.fake_pool_stats() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_stats', - return_value=fake_pool_stats)) - - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - @staticmethod - def fake_pool_stats(): - return {'stats': {'active_connections': 2, - 'bytes_in': 1, - 'bytes_out': 3, - 'total_connections': 4 - } - } - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, sample_name, expected_volume, - expected_type): - pollster = factory() - - cache = {} - samples = list(pollster.get_samples(self.manager, cache, - self.fake_get_pools())) - self.assertEqual(1, len(samples)) - self.assertIsNotNone(samples) - self.assertIn('lbstats', cache) - self.assertEqual(set([sample_name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == sample_name] - self.assertEqual(1, len(match), 'missing counter %s' % sample_name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(expected_type, match[0].type) - - def test_lb_total_connections(self): - self._check_get_samples(lbaas.LBTotalConnectionsPollster, - 'network.services.lb.total.connections', - 4, 'cumulative') - - def test_lb_active_connections(self): - self._check_get_samples(lbaas.LBActiveConnectionsPollster, - 'network.services.lb.active.connections', - 2, 'gauge') - - def test_lb_incoming_bytes(self): - self._check_get_samples(lbaas.LBBytesInPollster, - 'network.services.lb.incoming.bytes', - 1, 'cumulative') - - def test_lb_outgoing_bytes(self): - self._check_get_samples(lbaas.LBBytesOutPollster, - 'network.services.lb.outgoing.bytes', - 3, 'cumulative') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_vpnaas.py ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_vpnaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/services/test_vpnaas.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/services/test_vpnaas.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,175 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_context import context -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import vpnaas - - -class _BaseTestVPNPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestVPNPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.context = context.get_admin_context() - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - plugin_base._get_keystone.service_catalog.get_endpoints = ( - mock.MagicMock(return_value={'network': mock.ANY})) - - -class TestVPNServicesPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestVPNServicesPollster, self).setUp() - self.pollster = vpnaas.VPNServicesPollster() - fake_vpn = self.fake_get_vpn_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vpn_get_all', - return_value=fake_vpn)) - - @staticmethod - def fake_get_vpn_service(): - return [{'status': 'ACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'INACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'PENDING_CREATE', - 'name': 'myvpn', - 'description': '', - 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'error', - 'name': 'myvpn', - 'description': '', - 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': False, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - ] - - def test_vpn_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vpn_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(set(['network.services.vpn']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_vpns = discovery.VPNServicesDiscovery().discover( - self.manager) - self.assertEqual(3, len(discovered_vpns)) - - for vpn in self.fake_get_vpn_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_vpns) - else: - self.assertIn(vpn, discovered_vpns) - - -class TestIPSecConnectionsPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = vpnaas.IPSecConnectionsPollster() - fake_conns = self.fake_get_ipsec_connections() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'ipsec_site_connections_get_all', - return_value=fake_conns)) - - @staticmethod - def fake_get_ipsec_connections(): - return [{'name': 'connection1', - 'description': 'Remote-connection1', - 'peer_address': '192.168.1.10', - 'peer_id': '192.168.1.10', - 'peer_cidrs': ['192.168.2.0/24', - '192.168.3.0/24'], - 'mtu': 1500, - 'psk': 'abcd', - 'initiator': 'bi-directional', - 'dpd': { - 'action': 'hold', - 'interval': 30, - 'timeout': 120}, - 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', - 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', - 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', - 'admin_state_up': True, - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_conns_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_ipsec_connections()[0][field], - samples[0].resource_metadata[field]) - - def test_get_conns_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(set(['network.services.vpn.connections']), - set([s.name for s in samples])) - - def test_conns_discovery(self): - discovered_conns = discovery.IPSecConnectionsDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_conns)) - self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class _PollsterTestBase(base.BaseTestCase): - - def _test_pollster(self, pollster_class, meter_name, - meter_type, meter_unit): - - pollster = pollster_class() - - self.assertEqual(pollster.meter_name, meter_name) - self.assertEqual(pollster.meter_type, meter_type) - self.assertEqual(pollster.meter_unit, meter_unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opencontrail/test_client.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opencontrail/test_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opencontrail/test_client.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opencontrail/test_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base - -from ceilometer.network.statistics.opencontrail import client - - -class TestOpencontrailClient(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailClient, self).setUp() - self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) - - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - self.get_resp.raw.version = 1.1 - self.get_resp.status_code = 200 - self.get_resp.reason = 'OK' - self.get_resp.content = '' - - def test_vm_statistics(self): - self.client.networks.get_vm_statistics('bbb') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa'} - self.assertEqual(expected_data, data) - - def test_vm_statistics_params(self): - self.client.networks.get_vm_statistics('bbb', - {'resource': 'fip_stats_list', - 'virtual_network': 'ccc'}) - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa', - 'resource': 'fip_stats_list', - 'virtual_network': 'ccc'} - self.assertEqual(expected_data, data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opencontrail/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opencontrail/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opencontrail/test_driver.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opencontrail/test_driver.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,264 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics.opencontrail import driver - - -class TestOpencontrailDriver(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailDriver, self).setUp() - - self.nc_ports = mock.patch('ceilometer.neutron_client' - '.Client.port_get_all', - return_value=self.fake_ports()) - self.nc_ports.start() - - self.driver = driver.OpencontrailDriver() - self.parse_url = urlparse.ParseResult('opencontrail', - '127.0.0.1:8143', - '/', None, None, None) - self.params = {'password': ['admin'], - 'scheme': ['http'], - 'username': ['admin'], - 'verify_ssl': ['false'], - 'resource': ['if_stats_list']} - - @staticmethod - def fake_ports(): - return [{'admin_state_up': True, - 'device_owner': 'compute:None', - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] - - @staticmethod - def fake_port_stats(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], - "fip_stats_list": [{ - "in_bytes": 33, - "iface_name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442"), - "out_bytes": 44, - "out_pkts": 10, - "virtual_network": "default-domain:openstack:public", - "in_pkts": 11, - "ip_address": "1.1.1.1" - }] - }}}]} - - @staticmethod - def fake_port_stats_with_node(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [ - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], 'node1'], - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 4, - "in_pkts": 13, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], - 'node2'] - ] - }}}]} - - def _test_meter(self, meter_name, expected, fake_port_stats=None): - if not fake_port_stats: - fake_port_stats = self.fake_port_stats() - with mock.patch('ceilometer.network.' - 'statistics.opencontrail.' - 'client.NetworksAPIClient.' - 'get_vm_statistics', - return_value=fake_port_stats) as port_stats: - - samples = self.driver.get_sample_data(meter_name, self.parse_url, - self.params, {}) - - self.assertEqual(expected, [s for s in samples]) - - port_stats.assert_called_with('*') - - def test_switch_port_receive_packets_with_node(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY), - (13, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.receive.packets', expected, - self.fake_port_stats_with_node()) - - def test_switch_port_receive_packets(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets(self): - expected = [(5, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes(self): - expected = [(23, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes(self): - expected = [(22, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_receive_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(11, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(10, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(33, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(44, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'}, - mock.ANY)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_transmit_bytes_non_existing_network(self): - self.params['virtual_network'] = ['aaa'] - self.params['resource'] = ['fip_stats_list'] - self._test_meter('switch.port.transmit.bytes', []) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opendaylight/test_client.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opendaylight/test_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opendaylight/test_client.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opendaylight/test_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import base -from requests import auth as req_auth -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer.network.statistics.opendaylight import client - - -class TestClientHTTPBasicAuth(base.BaseTestCase): - - auth_way = 'basic' - scheme = 'http' - - def setUp(self): - super(TestClientHTTPBasicAuth, self).setUp() - self.parsed_url = urlparse.urlparse( - 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' - 'container_name=egg&auth=%s&user=admin&password=admin_pass&' - 'scheme=%s' % (self.auth_way, self.scheme)) - self.params = urlparse.parse_qs(self.parsed_url.query) - self.endpoint = urlparse.urlunparse( - urlparse.ParseResult(self.scheme, - self.parsed_url.netloc, - self.parsed_url.path, - None, None, None)) - odl_params = {'auth': self.params.get('auth')[0], - 'user': self.params.get('user')[0], - 'password': self.params.get('password')[0]} - self.client = client.Client(self.endpoint, odl_params) - - self.resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.resp).start() - - self.resp.raw.version = 1.1 - self.resp.status_code = 200 - self.resp.reason = 'OK' - self.resp.headers = {} - self.resp.content = 'dummy' - - def _test_request(self, method, url): - data = method('default') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - # check url - real_url = url % {'container_name': 'default', - 'scheme': self.scheme} - self.assertEqual(real_url, call_args[0]) - - # check auth parameters - auth = call_kwargs.get('auth') - if self.auth_way == 'digest': - self.assertIsInstance(auth, req_auth.HTTPDigestAuth) - else: - self.assertIsInstance(auth, req_auth.HTTPBasicAuth) - self.assertEqual('admin', auth.username) - self.assertEqual('admin_pass', auth.password) - - # check header - self.assertEqual( - {'Accept': 'application/json'}, - call_kwargs['headers']) - - # check return value - self.assertEqual(self.get().json(), data) - - def test_flow_statistics(self): - self._test_request( - self.client.statistics.get_flow_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/flow') - - def test_port_statistics(self): - self._test_request( - self.client.statistics.get_port_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/port') - - def test_table_statistics(self): - self._test_request( - self.client.statistics.get_table_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/table') - - def test_topology(self): - self._test_request( - self.client.topology.get_topology, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s') - - def test_user_links(self): - self._test_request( - self.client.topology.get_user_links, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s/userLinks') - - def test_switch(self): - self._test_request( - self.client.switch_manager.get_nodes, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/switchmanager/%(container_name)s/nodes') - - def test_active_hosts(self): - self._test_request( - self.client.host_tracker.get_active_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/active') - - def test_inactive_hosts(self): - self._test_request( - self.client.host_tracker.get_inactive_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/inactive') - - def test_http_error(self): - self.resp.status_code = 404 - self.resp.reason = 'Not Found' - - try: - self.client.statistics.get_flow_statistics('default') - self.fail('') - except client.OpenDaylightRESTAPIFailed as e: - self.assertEqual( - _('OpenDaylitght API returned %(status)s %(reason)s') % - {'status': self.resp.status_code, - 'reason': self.resp.reason}, - six.text_type(e)) - - def test_other_error(self): - - class _Exception(Exception): - pass - - self.get = mock.patch('requests.get', - side_effect=_Exception).start() - - self.assertRaises(_Exception, - self.client.statistics.get_flow_statistics, - 'default') - - -class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): - - auth_way = 'digest' - - -class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): - - scheme = 'https' - - -class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): - - scheme = 'https' diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opendaylight/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opendaylight/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/opendaylight/test_driver.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/opendaylight/test_driver.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1706 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import mock -from oslotest import base -import six -from six import moves -from six.moves.urllib import parse as url_parse - -from ceilometer.network.statistics.opendaylight import driver - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractproperty - def flow_data(self): - pass - - @abc.abstractproperty - def port_data(self): - pass - - @abc.abstractproperty - def table_data(self): - pass - - @abc.abstractproperty - def topology_data(self): - pass - - @abc.abstractproperty - def switch_data(self): - pass - - @abc.abstractproperty - def user_links_data(self): - pass - - @abc.abstractproperty - def active_hosts_data(self): - pass - - @abc.abstractproperty - def inactive_hosts_data(self): - pass - - fake_odl_url = url_parse.ParseResult('opendaylight', - 'localhost:8080', - 'controller/nb/v2', - None, - None, - None) - - fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=default&auth=basic') - - fake_params_multi_container = ( - url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=first&container_name=second&' - 'auth=basic')) - - def setUp(self): - super(_Base, self).setUp() - self.addCleanup(mock.patch.stopall) - - self.driver = driver.OpenDayLightDriver() - - self.get_flow_statistics = mock.patch( - 'ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - return_value=self.flow_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_table_statistics', - return_value=self.table_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_port_statistics', - return_value=self.port_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_topology', - return_value=self.topology_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_user_links', - return_value=self.user_links_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'SwitchManagerAPIClient.get_nodes', - return_value=self.switch_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_active_hosts', - return_value=self.active_hosts_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_inactive_hosts', - return_value=self.inactive_hosts_data).start() - - def _test_for_meter(self, meter_name, expected_data): - sample_data = self.driver.get_sample_data(meter_name, - self.fake_odl_url, - self.fake_params, - {}) - - for sample, expected in moves.zip(sample_data, expected_data): - self.assertEqual(expected[0], sample[0]) # check volume - self.assertEqual(expected[1], sample[1]) # check resource id - self.assertEqual(expected[2], sample[2]) # check resource metadata - self.assertIsNotNone(sample[3]) # timestamp - - -class TestOpenDayLightDriverSpecial(_Base): - - flow_data = {"flowStatistics": []} - port_data = {"portStatistics": []} - table_data = {"tableStatistics": []} - topology_data = {"edgeProperties": []} - switch_data = {"nodeProperties": []} - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_not_implemented_meter(self): - sample_data = self.driver.get_sample_data('egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - sample_data = self.driver.get_sample_data('switch.table.egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - def test_cache(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(1, self.get_flow_statistics.call_count) - - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - def test_multi_container(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('first', odl_data) - self.assertIn('second', odl_data) - - def test_http_error(self): - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=Exception()).start() - - sample_data = self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - {}) - - self.assertEqual(0, len(sample_data)) - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=[Exception(), self.flow_data]).start() - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('second', odl_data) - - -class TestOpenDayLightDriverSimple(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - ] - } - ] - } - topology_data = {"edgeProperties": []} - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - ] - } - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_timeStamp_connectedSince": "1377291227877" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) - - -class TestOpenDayLightDriverComplex(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.2" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "89", - "durationNanoseconds": "200000", - "durationSeconds": "5648", - "packetCount": "30", - "tableId": "1" - } - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12740", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "182", - "transmitBytes": "12110", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "173" - }, - { - "nodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12180", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "174", - "transmitBytes": "12670", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "181" - }, - { - "nodeConnector": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - } - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - { - "activeCount": "20", - "lookupCount": "10", - "matchedCount": "5", - "nodeTable": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - } - ] - } - ] - } - topology_data = { - "edgeProperties": [ - { - "edge": { - "headNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "bandwidth": { - "value": 10000000000 - }, - "config": { - "value": 1 - }, - "name": { - "value": "s2-eth3" - }, - "state": { - "value": 1 - }, - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - }, - { - "edge": { - "headNodeConnector": { - "id": "5", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:04", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - } - ] - } - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "buffers": { - "value": "256" - }, - "capabilities": { - "value": "199" - }, - "description": { - "value": "None" - }, - "macAddress": { - "value": "00:00:00:00:00:02" - }, - "tables": { - "value": "-1" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - { - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "properties": { - "actions": { - "value": "1024" - }, - "buffers": { - "value": "512" - }, - "capabilities": { - "value": "1000" - }, - "description": { - "value": "Foo Bar" - }, - "macAddress": { - "value": "00:00:00:00:00:03" - }, - "tables": { - "value": "10" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291228000" - } - } - } - ] - } - user_links_data = { - "userLinks": [ - { - "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", - "name": "link1", - "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", - "status": "Success" - } - ] - } - active_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:00:01:01", - "networkAddress": "1.1.1.1", - "nodeConnectorId": "9", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:00:02:02", - "networkAddress": "2.2.2.2", - "nodeConnectorId": "1", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "true", - "vlan": "0" - } - ] - } - inactive_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:01:01:01", - "networkAddress": "1.1.1.3", - "nodeConnectorId": "8", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:01:02:02", - "networkAddress": "2.2.2.4", - "nodeConnectorId": "0", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "false", - "vlan": "1" - } - ] - } - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_buffers": "256", - "properties_capabilities": "199", - "properties_description": "None", - "properties_macAddress": "00:00:00:00:00:02", - "properties_tables": "-1", - "properties_timeStamp_connectedSince": "1377291227877" - }), - (1, "00:00:00:00:00:00:00:03", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "1024", - "properties_buffers": "512", - "properties_capabilities": "1000", - "properties_description": "Foo Bar", - "properties_macAddress": "00:00:00:00:00:03", - "properties_tables": "10", - "properties_timeStamp_connectedSince": "1377291228000" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3', - 'user_link_node_id': '00:00:00:00:00:00:00:05', - 'user_link_node_port': '5', - 'user_link_status': 'Success', - 'user_link_name': 'link1', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2', - 'topology_node_id': '00:00:00:00:00:00:00:03', - 'topology_node_port': '2', - "topology_bandwidth": 10000000000, - "topology_config": 1, - "topology_name": "s2-eth3", - "topology_state": 1, - "topology_timeStamp_creation": 1379527162648 - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1', - 'host_status': 'active', - 'host_dataLayerAddress': '00:00:00:00:02:02', - 'host_networkAddress': '2.2.2.2', - 'host_staticHost': 'true', - 'host_vlan': '0', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0', - 'host_status': 'inactive', - 'host_dataLayerAddress': '00:00:00:01:02:02', - 'host_networkAddress': '2.2.2.4', - 'host_staticHost': 'false', - 'host_vlan': '1', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (182, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (174, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (173, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (181, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12740, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12180, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12110, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12670, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (20, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (10, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (5, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (5648, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (200000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (30, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (89, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_driver.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_driver.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.network.statistics import driver - - -class TestDriver(base.BaseTestCase): - - @staticmethod - def test_driver_ok(): - - class OkDriver(driver.Driver): - - def get_sample_data(self, meter_name, resources, cache): - pass - - OkDriver() - - def test_driver_ng(self): - - class NgDriver(driver.Driver): - """get_sample_data method is lost.""" - - self.assertRaises(TypeError, NgDriver) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_flow.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_flow.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_flow.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_flow.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import flow -from ceilometer import sample -from ceilometer.tests.network import statistics - - -class TestFlowPollsters(statistics._PollsterTestBase): - - def test_flow_pollster(self): - self._test_pollster( - flow.FlowPollster, - 'switch.flow', - sample.TYPE_GAUGE, - 'flow') - - def test_flow_pollster_duration_seconds(self): - self._test_pollster( - flow.FlowPollsterDurationSeconds, - 'switch.flow.duration_seconds', - sample.TYPE_GAUGE, - 's') - - def test_flow_pollster_duration_nanoseconds(self): - self._test_pollster( - flow.FlowPollsterDurationNanoseconds, - 'switch.flow.duration_nanoseconds', - sample.TYPE_GAUGE, - 'ns') - - def test_flow_pollster_packets(self): - self._test_pollster( - flow.FlowPollsterPackets, - 'switch.flow.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_flow_pollster_bytes(self): - self._test_pollster( - flow.FlowPollsterBytes, - 'switch.flow.bytes', - sample.TYPE_CUMULATIVE, - 'B') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_port.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_port.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_port.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_port.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import port -from ceilometer import sample -from ceilometer.tests.network import statistics - - -class TestPortPollsters(statistics._PollsterTestBase): - - def test_port_pollster(self): - self._test_pollster( - port.PortPollster, - 'switch.port', - sample.TYPE_GAUGE, - 'port') - - def test_port_pollster_receive_packets(self): - self._test_pollster( - port.PortPollsterReceivePackets, - 'switch.port.receive.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_packets(self): - self._test_pollster( - port.PortPollsterTransmitPackets, - 'switch.port.transmit.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_bytes(self): - self._test_pollster( - port.PortPollsterReceiveBytes, - 'switch.port.receive.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_transmit_bytes(self): - self._test_pollster( - port.PortPollsterTransmitBytes, - 'switch.port.transmit.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_receive_drops(self): - self._test_pollster( - port.PortPollsterReceiveDrops, - 'switch.port.receive.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_drops(self): - self._test_pollster( - port.PortPollsterTransmitDrops, - 'switch.port.transmit.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_errors(self): - self._test_pollster( - port.PortPollsterReceiveErrors, - 'switch.port.receive.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_errors(self): - self._test_pollster( - port.PortPollsterTransmitErrors, - 'switch.port.transmit.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_frame_errors(self): - self._test_pollster( - port.PortPollsterReceiveFrameErrors, - 'switch.port.receive.frame_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_overrun_errors(self): - self._test_pollster( - port.PortPollsterReceiveOverrunErrors, - 'switch.port.receive.overrun_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_crc_errors(self): - self._test_pollster( - port.PortPollsterReceiveCRCErrors, - 'switch.port.receive.crc_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_collision_count(self): - self._test_pollster( - port.PortPollsterCollisionCount, - 'switch.port.collision.count', - sample.TYPE_CUMULATIVE, - 'packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_statistics.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_statistics.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_statistics.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_statistics.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.network import statistics -from ceilometer.network.statistics import driver -from ceilometer import sample - - -class TestBase(base.BaseTestCase): - - @staticmethod - def test_subclass_ok(): - - class OkSubclass(statistics._Base): - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - OkSubclass() - - def test_subclass_ng(self): - - class NgSubclass1(statistics._Base): - """meter_name is lost.""" - - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - class NgSubclass2(statistics._Base): - """meter_type is lost.""" - - meter_name = 'foo' - meter_unit = 'B' - - class NgSubclass3(statistics._Base): - """meter_unit is lost.""" - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - - self.assertRaises(TypeError, NgSubclass1) - self.assertRaises(TypeError, NgSubclass2) - self.assertRaises(TypeError, NgSubclass3) - - -class TestBaseGetSamples(base.BaseTestCase): - - def setUp(self): - super(TestBaseGetSamples, self).setUp() - - class FakePollster(statistics._Base): - meter_name = 'foo' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'bar' - - self.pollster = FakePollster() - - def tearDown(self): - statistics._Base.drivers = {} - super(TestBaseGetSamples, self).tearDown() - - @staticmethod - def _setup_ext_mgr(**drivers): - statistics._Base.drivers = drivers - - def _make_fake_driver(self, *return_values): - class FakeDriver(driver.Driver): - - def __init__(self): - self.index = 0 - - def get_sample_data(self, meter_name, parse_url, params, cache): - if self.index >= len(return_values): - yield None - retval = return_values[self.index] - self.index += 1 - yield retval - return FakeDriver - - @staticmethod - def _make_timestamps(count): - now = timeutils.utcnow() - return [(now + datetime.timedelta(seconds=i)).isoformat() - for i in range(count)] - - def _get_samples(self, *resources): - - return [v for v in self.pollster.get_samples(self, {}, resources)] - - def _assert_sample(self, s, volume, resource_id, resource_metadata, - timestamp): - self.assertEqual('foo', s.name) - self.assertEqual(sample.TYPE_CUMULATIVE, s.type) - self.assertEqual('bar', s.unit) - self.assertEqual(volume, s.volume) - self.assertIsNone(s.user_id) - self.assertIsNone(s.project_id) - self.assertEqual(resource_id, s.resource_id) - self.assertEqual(timestamp, s.timestamp) - self.assertEqual(resource_metadata, s.resource_metadata) - - def test_get_samples_one_driver_one_resource(self): - times = self._make_timestamps(2) - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, - times[0]), - (2, 'b', None, times[1])) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) - - def test_get_samples_one_driver_two_resource(self): - times = self._make_timestamps(3) - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, - times[0]), - (2, 'b', None, times[1]), - (3, 'c', None, times[2])) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo', 'http://bar') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) - self._assert_sample(samples[1], 2, 'b', None, times[1]) - - def test_get_samples_two_driver_one_resource(self): - times = self._make_timestamps(4) - fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'}, - times[0]), - (2, 'b', None), times[1]) - - fake_driver2 = self._make_fake_driver((11, 'A', None, times[2]), - (12, 'B', None, times[3])) - - self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) - - def test_get_samples_multi_samples(self): - times = self._make_timestamps(2) - fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'}, - times[0]), - (2, 'b', None, times[1])]) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) - self._assert_sample(samples[1], 2, 'b', None, times[1]) - - def test_get_samples_return_none(self): - fake_driver = self._make_fake_driver(None) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(0, len(samples)) - - def test_get_samples_return_no_generator(self): - class NoneFakeDriver(driver.Driver): - - def get_sample_data(self, meter_name, parse_url, params, cache): - return None - - self._setup_ext_mgr(http=NoneFakeDriver()) - samples = self._get_samples('http://foo') - self.assertFalse(samples) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_switch.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_switch.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_switch.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_switch.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import switch -from ceilometer import sample -from ceilometer.tests.network import statistics - - -class TestSwitchPollster(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - switch.SWPollster, - 'switch', - sample.TYPE_GAUGE, - 'switch') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_table.py ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_table.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/statistics/test_table.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/statistics/test_table.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import table -from ceilometer import sample -from ceilometer.tests.network import statistics - - -class TestTablePollsters(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - table.TablePollster, - 'switch.table', - sample.TYPE_GAUGE, - 'table') - - def test_table_pollster_active_entries(self): - self._test_pollster( - table.TablePollsterActiveEntries, - 'switch.table.active.entries', - sample.TYPE_GAUGE, - 'entry') - - def test_table_pollster_lookup_packets(self): - self._test_pollster( - table.TablePollsterLookupPackets, - 'switch.table.lookup.packets', - sample.TYPE_GAUGE, - 'packet') - - def test_table_pollster_matched_packets(self): - self._test_pollster( - table.TablePollsterMatchedPackets, - 'switch.table.matched.packets', - sample.TYPE_GAUGE, - 'packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/test_floatingip.py ceilometer-5.0.0~b3/ceilometer/tests/network/test_floatingip.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/test_floatingip.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/test_floatingip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 eNovance -# -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_context import context -from oslotest import base - -from ceilometer.agent import manager -from ceilometer.network import floatingip - - -class TestFloatingIPPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestFloatingIPPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.context = context.get_admin_context() - self.manager = manager.AgentManager() - self.manager.keystone = mock.Mock() - self.manager.keystone.service_catalog.get_endpoints = mock.Mock( - return_value={'network': mock.ANY}) - self.pollster = floatingip.FloatingIPPollster() - fake_ips = self.fake_get_ips() - patch_virt = mock.patch('ceilometer.nova_client.Client.' - 'floating_ip_get_all', - return_value=fake_ips) - patch_virt.start() - - @staticmethod - def fake_get_ips(): - ips = [] - for i in range(1, 4): - ip = mock.MagicMock() - ip.id = i - ip.ip = '1.1.1.%d' % i - ip.pool = 'public' - ips.append(ip) - return ips - - def test_default_discovery(self): - self.assertEqual('endpoint:compute', self.pollster.default_discovery) - - # FIXME(dhellmann): Is there a useful way to define this - # test without a database? - # - # def test_get_samples_none_defined(self): - # try: - # list(self.pollster.get_samples(self.manager, - # self.context) - # ) - # except exception.NoFloatingIpsDefined: - # pass - # else: - # assert False, 'Should have seen an error' - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples(self.manager, {}, ['e'])) - self.assertEqual(3, len(samples)) - # It's necessary to verify all the attributes extracted by Nova - # API /os-floating-ips to make sure they're available and correct. - self.assertEqual(1, samples[0].resource_id) - self.assertEqual("1.1.1.1", samples[0].resource_metadata["address"]) - self.assertEqual("public", samples[0].resource_metadata["pool"]) - - self.assertEqual(2, samples[1].resource_id) - self.assertEqual("1.1.1.2", samples[1].resource_metadata["address"]) - self.assertEqual("public", samples[1].resource_metadata["pool"]) - - self.assertEqual(3, samples[2].resource_id) - self.assertEqual("1.1.1.3", samples[2].resource_metadata["address"]) - self.assertEqual("public", samples[2].resource_metadata["pool"]) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples(self.manager, {}, ['e'])) - self.assertEqual(set(['ip.floating']), set([s.name for s in samples])) - - def test_get_samples_cached(self): - cache = {'e-floating_ips': self.fake_get_ips()[:2]} - samples = list(self.pollster.get_samples(self.manager, cache, ['e'])) - self.assertEqual(2, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/network/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/network/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/network/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/network/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1513 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.network.notifications -""" - -import mock - -from ceilometer.network import notifications -from ceilometer.tests import base as test - -NOTIFICATION_NETWORK_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - -NOTIFICATION_BULK_NETWORK_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 24: 56.335612', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', - u'priority': 'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 24: 56.285975', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'networks': [{u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test2', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', - u'provider: segmentation_id': None}, - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test3', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', - u'provider: segmentation_id': None}] - } -} - -NOTIFICATION_SUBNET_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'timestamp': u'2012-09-27 14:11:27.426620', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'subnet': { - u'name': u'mysubnet', - u'enable_dhcp': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'dns_nameservers': [], - u'allocation_pools': [{u'start': u'192.168.42.2', - u'end': u'192.168.42.254'}], - u'host_routes': [], - u'ip_version': 4, - u'gateway_ip': u'192.168.42.1', - u'cidr': u'192.168.42.0/24', - u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:27.214490', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} - -NOTIFICATION_BULK_SUBNET_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 47: 08.133888', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', - u'priority': u'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 47: 07.970043', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'subnets': [{u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.4.2', - u'end': u'10.0.4.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.4.1', - u'cidr': u'10.0.4.0/24', - u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, - {u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.5.2', - u'end': u'10.0.5.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.5.1', - u'cidr': u'10.0.5.0/24', - u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] - } -} - -NOTIFICATION_PORT_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'timestamp': u'2012-09-27 14:28:31.536370', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:28:31.438919', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} - -NOTIFICATION_BULK_PORT_CREATE = { - u'_context_roles': [u'_member_', - u'SwiftOperator'], - u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'_context_project_name': u'demo', - u'timestamp': u'2014-05-0909: 19: 58.317548', - u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_timestamp': u'2014-05-0909: 19: 58.160011', - u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', - u'payload': { - u'ports': [{u'status': u'DOWN', - u'name': u'port--1501135095', - u'allowed_address_pairs': [], - u'admin_state_up': True, - u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 37: 10: 39', - u'fixed_ips': [], - u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}, - {u'status': u'DOWN', - u'name': u'', - u'allowed_address_pairs': [], - u'admin_state_up': False, - u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', - u'fixed_ips': [], - u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}] - }, - u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', - u'_context_is_admin': False, - u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_tenant_name': u'demo', - u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'_context_user_name': u'demo', - u'publisher_id': u'network.os-ci-test12', - u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', - u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'priority': u'INFO' -} - -NOTIFICATION_PORT_UPDATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.update.end', - u'timestamp': u'2012-09-27 14:35:09.514052', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'bonjour', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:35:09.447682', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} - - -NOTIFICATION_NETWORK_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_ROUTER_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'router.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'router': - {'status': u'ACTIVE', - 'external_gateway_info': - {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, - 'name': u'router1', - 'admin_state_up': True, - 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', - 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'floatingip.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'floatingip': - {'router_id': None, - 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', - 'floating_network_id': - u'001400f7-1710-4245-98c3-39ba131cc39a', - 'fixed_ip_address': None, - 'floating_ip_address': u'172.24.4.227', - 'port_id': None, - 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_UPDATE_START = { - '_context_roles': [u'_member_', - u'admin', - u'heat_stack_owner'], - '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', - '_context_read_deleted': u'no', - 'event_type': u'floatingip.update.start', - '_context_user_name': u'admin', - '_context_project_name': u'admin', - 'timestamp': u'2014-05-3107: 19: 43.463101', - '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_tenant_name': u'admin', - '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', - 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', - 'priority': 'info', - '_context_is_admin': True, - '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_timestamp': u'2014-05-3107: 19: 43.460767', - '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - 'publisher_id': u'network.devstack', - 'payload': { - u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', - u'floatingip': { - u'fixed_ip_address': u'172.24.4.227', - u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' - } - } -} - - -NOTIFICATION_L3_METER = { - u'_context_roles': [u'admin'], - u'_context_read_deleted': u'no', - u'event_type': u'l3.meter', - u'timestamp': u'2013-08-22 13:14:06.880304', - u'_context_tenant_id': None, - u'payload': {u'first_update': 1377176476, - u'bytes': 0, - u'label_id': u'383244a7-e99b-433a-b4a1-d37cf5b17d15', - u'last_update': 1377177246, - u'host': u'precise64', - u'tenant_id': u'admin', - u'time': 30, - u'pkts': 0}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_timestamp': u'2013-08-22 13:01:06.614635', - u'_context_user_id': None, - u'publisher_id': u'metering.precise64', - u'message_id': u'd7aee6e8-c7eb-4d47-9338-f60920d708e4', - u'_unique_id': u'd5a3bdacdcc24644b84e67a4c10e886a', - u'_context_project_id': None} - - -NOTIFICATION_POOL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.create.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_POOL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.update.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - -NOTIFICATION_EMPTY_PAYLOAD = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": {}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -class TestNotifications(test.BaseTestCase): - def test_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.create", samples[1].name) - - def test_bulk_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_BULK_NETWORK_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("network", samples[0].name) - self.assertEqual("network.create", samples[1].name) - self.assertEqual("network", samples[2].name) - self.assertEqual("network.create", samples[3].name) - - def test_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("subnet.create", samples[1].name) - - def test_bulk_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("subnet", samples[0].name) - self.assertEqual("subnet.create", samples[1].name) - self.assertEqual("subnet", samples[2].name) - self.assertEqual("subnet.create", samples[3].name) - - def test_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.create", samples[1].name) - - def test_bulk_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("port", samples[0].name) - self.assertEqual("port.create", samples[1].name) - self.assertEqual("port", samples[2].name) - self.assertEqual("port.create", samples[3].name) - - def test_port_update(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.update", samples[1].name) - - def test_network_exists(self): - v = notifications.Network(mock.Mock()) - samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_router_exists(self): - v = notifications.Router(mock.Mock()) - samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_floatingip_exists(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) - self.assertEqual(1, len(samples)) - self.assertEqual("ip.floating", samples[0].name) - - def test_floatingip_update(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FLOATINGIP_UPDATE_START)) - self.assertEqual(len(samples), 2) - self.assertEqual("ip.floating", samples[0].name) - - def test_metering_report(self): - v = notifications.Bandwidth(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_L3_METER)) - self.assertEqual(1, len(samples)) - self.assertEqual("bandwidth", samples[0].name) - - def test_pool_create(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_create(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_create(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_create(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_create(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_create(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_create(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_create(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_create(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_create(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_create(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_pool_update(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_update(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_update(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_update(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_update(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_update(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_update(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_update(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_update(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_update(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_update(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_empty_event_payload(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_EMPTY_PAYLOAD)) - self.assertEqual(0, len(samples)) - - -class TestEventTypes(test.BaseTestCase): - - def test_network(self): - v = notifications.Network(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_subnet(self): - v = notifications.Subnet(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_port(self): - v = notifications.Port(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_router(self): - self.assertTrue(notifications.Router(mock.Mock()).event_types) - - def test_floatingip(self): - self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) - - def test_bandwidth(self): - self.assertTrue(notifications.Bandwidth(mock.Mock()).event_types) - - def test_pool(self): - self.assertTrue(notifications.Pool(mock.Mock()).event_types) - - def test_vip(self): - self.assertTrue(notifications.Vip(mock.Mock()).event_types) - - def test_member(self): - self.assertTrue(notifications.Member(mock.Mock()).event_types) - - def test_health_monitor(self): - self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) - - def test_firewall(self): - self.assertTrue(notifications.Firewall(mock.Mock()).event_types) - - def test_vpnservice(self): - self.assertTrue(notifications.VPNService(mock.Mock()).event_types) - - def test_ipsec_connection(self): - self.assertTrue(notifications.IPSecSiteConnection( - mock.Mock()).event_types) - - def test_firewall_policy(self): - self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) - - def test_firewall_rule(self): - self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) - - def test_ipsec_policy(self): - self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) - - def test_ike_policy(self): - self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for swift notification events.""" -import copy -import mock - -from ceilometer.objectstore import notifications -from ceilometer.tests import base as test - - -MIDDLEWARE_EVENT = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'objectstore.http.request', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'ceilometermiddleware', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None, - u'payload': { - 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', - 'eventTime': '2015-01-30T16: 38: 43.233621', - 'target': { - 'action': 'get', - 'typeURI': 'service/storage/object', - 'id': 'account', - 'metadata': { - 'path': '/1.0/CUSTOM_account/container/obj', - 'version': '1.0', - 'container': 'container', - 'object': 'obj' - } - }, - 'observer': { - 'id': 'target' - }, - 'eventType': 'activity', - 'measurements': [ - { - 'metric': { - 'metricId': 'openstack: uuid', - 'name': 'storage.objects.outgoing.bytes', - 'unit': 'B' - }, - 'result': 28 - }, - { - 'metric': { - 'metricId': 'openstack: uuid2', - 'name': 'storage.objects.incoming.bytes', - 'unit': 'B' - }, - 'result': 1 - } - ], - 'initiator': { - 'typeURI': 'service/security/account/user', - 'project_id': None, - 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' - }, - 'action': 'read', - 'outcome': 'success', - 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' - } -} - - -class TestMiddlewareNotifications(test.BaseTestCase): - def test_middleware_event(self): - v = notifications.SwiftWsgiMiddleware(mock.Mock()) - samples = list(v.process_notification(MIDDLEWARE_EVENT)) - self.assertEqual(1, len(samples)) - target = MIDDLEWARE_EVENT['payload']['target'] - initiator = MIDDLEWARE_EVENT['payload']['initiator'] - self.assertEqual(target['id'], samples[0].resource_id) - self.assertEqual(initiator['id'], samples[0].user_id) - self.assertEqual(initiator['project_id'], samples[0].project_id) - - def test_middleware_event_meters(self): - v = notifications.SwiftWsgiMiddlewareMeters(mock.Mock()) - samples = list(v.process_notification(MIDDLEWARE_EVENT)) - self.assertEqual(2, len(samples)) - target = MIDDLEWARE_EVENT['payload']['target'] - initiator = MIDDLEWARE_EVENT['payload']['initiator'] - for i in range(2): - measure = MIDDLEWARE_EVENT['payload']['measurements'][i] - self.assertEqual(measure['metric']['name'], samples[i].name) - self.assertEqual(measure['metric']['unit'], samples[i].unit) - self.assertEqual(measure['result'], samples[i].volume) - self.assertEqual(target['id'], samples[i].resource_id) - self.assertEqual(initiator['id'], samples[i].user_id) - self.assertEqual(initiator['project_id'], samples[i].project_id) - - def test_middleware_without_measurements(self): - v = notifications.SwiftWsgiMiddlewareMeters(mock.Mock()) - event = copy.copy(MIDDLEWARE_EVENT) - event['payload'].pop('measurements') - self.assertEqual([], list(v.process_notification(event))) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_rgw_client.py ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_rgw_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_rgw_client.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_rgw_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock -from oslotest import base - -from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed -from ceilometer.objectstore.rgw_client import RGWAdminClient - - -RGW_ADMIN_BUCKETS = ''' -[ - { - "max_marker": "", - "ver": 2001, - "usage": { - "rgw.main": { - "size_kb_actual": 16000, - "num_objects": 1000, - "size_kb": 1000 - } - }, - "bucket": "somefoo", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176126, - "marker": "default.4126.1", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.1", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - }, - { - "max_marker": "", - "ver": 3, - "usage": { - "rgw.main": { - "size_kb_actual": 43, - "num_objects": 1, - "size_kb": 42 - } - }, - "bucket": "somefoo31", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176134, - "marker": "default.4126.5", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.5", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - } -]''' - -RGW_ADMIN_USAGE = ''' -{ "entries": [ - { "owner": "5f7fe2d5352e466f948f49341e33d107", - "buckets": [ - { "bucket": "", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}]}, - { "bucket": "foodsgh", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}]}]}], - "summary": [ - { "user": "5f7fe2d5352e466f948f49341e33d107", - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}], - "total": { "bytes_sent": 46, - "bytes_received": 238, - "ops": 7, - "successful_ops": 6}}]} -''' - -buckets_json = json.loads(RGW_ADMIN_BUCKETS) -usage_json = json.loads(RGW_ADMIN_USAGE) - - -class TestRGWAdminClient(base.BaseTestCase): - - def setUp(self): - super(TestRGWAdminClient, self).setUp() - self.client = RGWAdminClient('http://127.0.0.1:8080/admin', - 'abcde', 'secret') - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - - def test_make_request_exception(self): - self.get_resp.status_code = 403 - self.assertRaises(RGWAdminAPIFailed, self.client._make_request, - *('foo', {})) - - def test_make_request(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client._make_request('foo', []) - self.assertEqual(buckets_json, actual) - - def test_get_buckets(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client.get_bucket('foo') - bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), - RGWAdminClient.Bucket('somefoo31', 1, 42), - ] - expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, - 'buckets': bucket_list} - self.assertEqual(expected, actual) - - def test_get_usage(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = usage_json - actual = self.client.get_usage('foo') - expected = 7 - self.assertEqual(expected, actual) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_rgw.py ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_rgw.py --- ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_rgw.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_rgw.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneclient import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import rgw -from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client - -bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] -bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] -bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] - -GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list1}), - ('tenant-001', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list2}), - ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, - 'buckets': bucket_list3})] - -GET_USAGE = [('tenant-000', 10), - ('tenant-001', 11), - ('tenant-002-ignored', 12)] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self.keystone = mock.MagicMock() - self.keystone.service_catalog.url_for.return_value = '/endpoint' - - -class TestRgwPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('radosgw.objects', - {'factory': rgw.ObjectsPollster}), - ('radosgw.objects.size', - {'factory': rgw.ObjectsSizePollster}), - ('radosgw.objects.containers', - {'factory': rgw.ObjectsContainersPollster}), - ('radosgw.containers.objects', - {'factory': rgw.ContainersObjectsPollster}), - ('radosgw.containers.objects.size', - {'factory': rgw.ContainersSizePollster}), - ('radosgw.api.request', - {'factory': rgw.UsagePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestRgwPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': - self.ACCOUNTS = GET_BUCKETS - else: - self.ACCOUNTS = GET_USAGE - - def tearDown(self): - super(TestRgwPollster, self).tearDown() - rgw._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call rgw_clinet. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = 'get_%s' % self.pollster.METHOD - - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'http://127.0.0.1:8000/admin' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(t.id) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - mock_url_for = mock.MagicMock() - mock_url_for.return_value = '/endpoint' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_swift.py ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_swift.py --- ceilometer-5.0.0~b2/ceilometer/tests/objectstore/test_swift.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/objectstore/test_swift.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneclient import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -from swiftclient import client as swift_client -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import swift - -HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, - 'x-account-bytes-used': 321321321, - 'x-account-container-count': 7, - }), - ('tenant-001', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - }), - ('tenant-002-ignored', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - })] - -GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, - 'x-account-bytes-used': 123123, - 'x-account-container-count': 2, - }, - [{'count': 10, - 'bytes': 123123, - 'name': 'my_container'}, - {'count': 0, - 'bytes': 0, - 'name': 'new_container' - }])), - ('tenant-001', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, [])), - ('tenant-002-ignored', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, []))] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self.keystone = mock.MagicMock() - - -class TestSwiftPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('storage.objects', - {'factory': swift.ObjectsPollster}), - ('storage.objects.size', - {'factory': swift.ObjectsSizePollster}), - ('storage.objects.containers', - {'factory': swift.ObjectsContainersPollster}), - ('storage.containers.objects', - {'factory': swift.ContainersObjectsPollster}), - ('storage.containers.objects.size', - {'factory': swift.ContainersSizePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestSwiftPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': - self.ACCOUNTS = HEAD_ACCOUNTS - else: - self.ACCOUNTS = GET_ACCOUNTS - - def tearDown(self): - super(TestSwiftPollster, self).tearDown() - swift._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call swiftclient. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject(self.factory, '_neaten_url'): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_neaten_url(self): - test_endpoints = ['http://127.0.0.1:8080', - 'http://127.0.0.1:8080/swift'] - test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' - for test_endpoint in test_endpoints: - standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id - - url = swift._Base._neaten_url(test_endpoint, test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/v1', - test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(standard_url, test_tenant_id) - self.assertEqual(standard_url, url) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'end://point/' - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), - self.manager.keystone.auth_token) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - endpoint = 'end://point/' - mock_url_for = mock.MagicMock(return_value=endpoint) - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager.keystone.service_catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/orchestration/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/orchestration/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/orchestration/test_notifications.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/orchestration/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,138 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -from oslo_log import log - -from ceilometer.meter import notifications -from ceilometer import sample -from ceilometer.tests import base as test - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - -TENANT_ID = u'4c35985848bf4419b3f3d52c22e5792d' -STACK_NAME = u'AS1-ASGroup-53sqbo7sor7i' -STACK_ID = u'cb4a6fd1-1f5d-4002-ae91-9b91573cfb03' -USER_NAME = u'demo' -USER_ID = u'2e61f25ec63a4f6c954a6245421448a4' -TRUSTOR_ID = u'foo-Trustor-Id' - -STACK_ARN = u'arn:openstack:heat::%s:stacks/%s/%s' % (TENANT_ID, - STACK_NAME, - STACK_ID) - - -CONF = cfg.CONF -log.register_options(CONF) -CONF.set_override('use_stderr', True) - -LOG = log.getLogger(__name__) - - -def stack_notification_for(operation, use_trust=None): - - if use_trust: - trust_id = 'footrust' - trustor_id = TRUSTOR_ID - else: - trust_id = None - trustor_id = None - - return { - u'event_type': 'orchestration.stack.%s.end' % operation, - u'_context_roles': [ - u'Member', - ], - u'_context_request_id': u'req-cf24cf30-af35-4a47-ae29-e74d75ebc6de', - u'_context_auth_url': u'http://0.1.0.1:1010/v2.0', - u'timestamp': NOW, - u'_unique_id': u'1afb4283660f410c802af4d5992a39f2', - u'_context_tenant_id': TENANT_ID, - u'payload': { - u'state_reason': u'Stack create completed successfully', - u'user_id': USER_NAME, - u'stack_identity': STACK_ARN, - u'stack_name': STACK_NAME, - u'tenant_id': TENANT_ID, - u'create_at': u'2014-01-27T13:13:19Z', - u'state': u'CREATE_COMPLETE' - }, - u'_context_username': USER_NAME, - u'_context_auth_token': u'MIISAwYJKoZIhvcNAQcCoII...', - u'_context_password': u'password', - u'_context_user_id': USER_ID, - u'_context_trustor_user_id': trustor_id, - u'_context_aws_creds': None, - u'_context_show_deleted': False, - u'_context_tenant': USER_NAME, - u'_context_trust_id': trust_id, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_user': USER_NAME, - u'publisher_id': u'orchestration.node-n5x66lxdy67d', - u'message_id': u'ef921faa-7f7b-4854-8b86-a424ab93c96e', - } - - -class TestNotification(test.BaseTestCase): - - def setUp(self): - super(TestNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'meter_definitions_cfg_file', - self.path_get('etc/ceilometer/meters.yaml'), group='meter') - self.handler = notifications.ProcessMeterNotifications(mock.Mock()) - - def _verify_common_sample(self, s, name, volume): - self.assertIsNotNone(s) - self.assertEqual('stack.%s' % name, s.name) - self.assertEqual(NOW, s.timestamp) - self.assertEqual(sample.TYPE_DELTA, s.type) - self.assertEqual(TENANT_ID, s.project_id) - self.assertEqual(STACK_ARN, s.resource_id) - metadata = s.resource_metadata - self.assertEqual(u'orchestration.node-n5x66lxdy67d', - metadata.get('host')) - - def _test_operation(self, operation, trust=None): - notif = stack_notification_for(operation, trust) - - data = list(self.handler.process_notification(notif)) - self.assertEqual(1, len(data)) - if trust: - self.assertEqual(TRUSTOR_ID, data[0].user_id) - else: - self.assertEqual(USER_ID, data[0].user_id) - self._verify_common_sample(data[0], operation, 1) - - def test_create(self): - self._test_operation('create') - - def test_create_trust(self): - self._test_operation('create', trust=True) - - def test_update(self): - self._test_operation('update') - - def test_delete(self): - self._test_operation('delete') - - def test_resume(self): - self._test_operation('resume') - - def test_suspend(self): - self._test_operation('suspend') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/pipeline_base.py ceilometer-5.0.0~b3/ceilometer/tests/pipeline_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/pipeline_base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/pipeline_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -86,6 +86,7 @@ class TransformerClass(transformer.TransformerBase): samples = [] + grouping_keys = ['counter_name'] def __init__(self, append_name='_update'): self.__class__.samples = [] @@ -111,6 +112,7 @@ class TransformerClassDrop(transformer.TransformerBase): samples = [] + grouping_keys = ['resource_id'] def __init__(self): self.__class__.samples = [] @@ -119,6 +121,8 @@ self.__class__.samples.append(counter) class TransformerClassException(object): + grouping_keys = ['resource_id'] + @staticmethod def handle_sample(ctxt, counter): raise Exception() @@ -303,6 +307,72 @@ self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) + @mock.patch('ceilometer.pipeline.LOG') + def test_none_volume_counter(self, LOG): + self._set_pipeline_cfg('counters', ['empty_volume']) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + publisher = pipeline_manager.pipelines[0].publishers[0] + + test_s = sample.Sample( + name='empty_volume', + type=self.test_counter.type, + volume=None, + unit=self.test_counter.unit, + user_id=self.test_counter.user_id, + project_id=self.test_counter.project_id, + resource_id=self.test_counter.resource_id, + timestamp=self.test_counter.timestamp, + resource_metadata=self.test_counter.resource_metadata, + ) + + with pipeline_manager.publisher(None) as p: + p([test_s]) + + LOG.warning.assert_called_once_with( + 'metering data %(counter_name)s for %(resource_id)s ' + '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' + 'sample will be dropped' + % {'counter_name': test_s.name, + 'resource_id': test_s.resource_id, + 'timestamp': test_s.timestamp, + 'counter_volume': test_s.volume}) + + self.assertEqual(0, len(publisher.samples)) + + @mock.patch('ceilometer.pipeline.LOG') + def test_fake_volume_counter(self, LOG): + self._set_pipeline_cfg('counters', ['fake_volume']) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + publisher = pipeline_manager.pipelines[0].publishers[0] + + test_s = sample.Sample( + name='fake_volume', + type=self.test_counter.type, + volume='fake_value', + unit=self.test_counter.unit, + user_id=self.test_counter.user_id, + project_id=self.test_counter.project_id, + resource_id=self.test_counter.resource_id, + timestamp=self.test_counter.timestamp, + resource_metadata=self.test_counter.resource_metadata, + ) + + with pipeline_manager.publisher(None) as p: + p([test_s]) + + LOG.warning.assert_called_once_with( + 'metering data %(counter_name)s for %(resource_id)s ' + '@ %(timestamp)s has volume which is not a number ' + '(volume: %(counter_volume)s), the sample will be dropped' + % {'counter_name': test_s.name, + 'resource_id': test_s.resource_id, + 'timestamp': test_s.timestamp, + 'counter_volume': test_s.volume}) + + self.assertEqual(0, len(publisher.samples)) + def test_counter_dont_match(self): counter_cfg = ['nomatch'] self._set_pipeline_cfg('counters', counter_cfg) @@ -749,39 +819,6 @@ self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) - def test_variable_counter(self): - transformer_cfg = [{ - 'name': "update", - 'parameters': {} - }] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['a:*']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - self.test_counter = sample.Sample( - name='a:b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher(None) as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a:b_update', - getattr(publisher.samples[0], "name")) - self.assertEqual('a:b', - getattr(self.TransformerClass.samples[0], "name")) - def test_global_unit_conversion(self): scale = 'volume / ((10**6) * 60)' transformer_cfg = [ @@ -1010,7 +1047,7 @@ offset=0) def test_rate_of_change_no_predecessor(self): - s = "100.0 / (10**9 * resource_metadata.get('cpu_number', 1))" + s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', @@ -1050,6 +1087,80 @@ pipe.flush(None) self.assertEqual(0, len(publisher.samples)) + @mock.patch('ceilometer.transformer.conversions.LOG') + def test_rate_of_change_out_of_order(self, the_log): + s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" + transformer_cfg = [ + { + 'name': 'rate_of_change', + 'parameters': { + 'source': {}, + 'target': {'name': 'cpu_util', + 'unit': '%', + 'type': sample.TYPE_GAUGE, + 'scale': s} + } + }, + ] + self._set_pipeline_cfg('transformers', transformer_cfg) + self._set_pipeline_cfg('counters', ['cpu']) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + pipe = pipeline_manager.pipelines[0] + + now = timeutils.utcnow() + earlier = now - datetime.timedelta(seconds=10) + later = now + datetime.timedelta(seconds=10) + + counters = [ + sample.Sample( + name='cpu', + type=sample.TYPE_CUMULATIVE, + volume=125000000000, + unit='ns', + user_id='test_user', + project_id='test_proj', + resource_id='test_resource', + timestamp=now.isoformat(), + resource_metadata={'cpu_number': 4} + ), + sample.Sample( + name='cpu', + type=sample.TYPE_CUMULATIVE, + volume=120000000000, + unit='ns', + user_id='test_user', + project_id='test_proj', + resource_id='test_resource', + timestamp=earlier.isoformat(), + resource_metadata={'cpu_number': 4} + ), + sample.Sample( + name='cpu', + type=sample.TYPE_CUMULATIVE, + volume=130000000000, + unit='ns', + user_id='test_user', + project_id='test_proj', + resource_id='test_resource', + timestamp=later.isoformat(), + resource_metadata={'cpu_number': 4} + ), + ] + + pipe.publish_data(None, counters) + publisher = pipe.publishers[0] + self.assertEqual(1, len(publisher.samples)) + pipe.flush(None) + self.assertEqual(1, len(publisher.samples)) + + cpu_util_sample = publisher.samples[0] + self.assertEqual(12.5, cpu_util_sample.volume) + the_log.warn.assert_called_with( + 'dropping out of time order sample: %s', + (counters[1],) + ) + def test_resources(self): resources = ['test1://', 'test2://'] self._set_pipeline_cfg('resources', resources) @@ -1790,3 +1901,48 @@ def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() + + def test_get_pipeline_grouping_key(self): + transformer_cfg = [ + { + 'name': 'update', + 'parameters': {} + }, + { + 'name': 'unit_conversion', + 'parameters': { + 'source': {}, + 'target': {'name': 'cpu_mins', + 'unit': 'min', + 'scale': 'volume'}, + } + }, + { + 'name': 'update', + 'parameters': {} + }, + ] + self._set_pipeline_cfg('transformers', transformer_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + self.assertEqual(set(['resource_id', 'counter_name']), + set(pipeline.get_pipeline_grouping_key( + pipeline_manager.pipelines[0]))) + + def test_get_pipeline_duplicate_grouping_key(self): + transformer_cfg = [ + { + 'name': 'update', + 'parameters': {} + }, + { + 'name': 'update', + 'parameters': {} + }, + ] + self._set_pipeline_cfg('transformers', transformer_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + self.assertEqual(['counter_name'], + pipeline.get_pipeline_grouping_key( + pipeline_manager.pipelines[0])) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/profiler/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/profiler/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/profiler/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/profiler/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.profiler import notifications -from ceilometer import sample - - -NOTIFICATION = { - "event_type": "profiler.compute", - "message_id": "dae6f69c-00e0-41c0-b371-41ec3b7f4451", - "publisher_id": "some_host", - - "payload": { - "user_id": "1e3ce043029547f1a61c1996d1a531a2", - "project_id": "663ce04332954555a61c1996d1a53143", - "base_id": "2e3ce043029547f1a61c1996d1a531a2", - "trace_id": "3e3ce043029547f1a61c1996d1a531a2", - "parent_id": "4e3ce043029547f1a61c1996d1a531a2", - "name": "some_name", - "info": { - "foo": "bar" - } - }, - "priority": "INFO", - "timestamp": "2012-05-08 20:23:48.028195" -} - - -class ProfilerNotificationsTestCase(base.BaseTestCase): - - def test_process_notification(self): - prof = notifications.ProfilerNotifications(None) - info = next(prof.process_notification(NOTIFICATION)) - - self.assertEqual(NOTIFICATION["payload"]["name"], info.name) - self.assertEqual(sample.TYPE_GAUGE, info.type) - self.assertEqual("trace", info.unit) - self.assertEqual(NOTIFICATION["payload"]["user_id"], info.user_id) - self.assertEqual(NOTIFICATION["payload"]["project_id"], - info.project_id) - self.assertEqual("profiler-%s" % NOTIFICATION["payload"]["base_id"], - info.resource_id) - self.assertEqual(1, info.volume) - self.assertEqual(NOTIFICATION["timestamp"], info.timestamp) - self.assertEqual(NOTIFICATION["payload"]["info"], - info.resource_metadata["info"]) - self.assertEqual(NOTIFICATION["publisher_id"], - info.resource_metadata["host"]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_direct.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_direct.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_direct.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_direct.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -# -# Copyright 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/direct.py -""" - -import datetime -import uuid - -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import direct -from ceilometer import sample -from ceilometer.tests import db as tests_db - - -class TestDirectPublisher(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - - resource_id = str(uuid.uuid4()) - - test_data = [ - sample.Sample( - name='alpha', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='beta', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='gamma', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.now().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_direct_publisher(self): - """Test samples are saved.""" - self.CONF.set_override('connection', self.db_manager.url, - group='database') - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_samples(None, - self.test_data) - - meters = list(self.conn.get_meters(resource=self.resource_id)) - names = sorted([meter.name for meter in meters]) - - self.assertEqual(3, len(meters), 'There should be 3 samples') - self.assertEqual(['alpha', 'beta', 'gamma'], names) - - -class TestEventDirectPublisher(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - - test_data = [event.Event(message_id=str(uuid.uuid4()), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5)] - - def test_direct_publisher(self): - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_events(None, self.test_data) - - e_types = list(self.event_conn.get_event_types()) - self.assertEqual(5, len(e_types)) - self.assertEqual(['event_%d' % i for i in range(0, 5)], - sorted(e_types)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_file.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_file.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_file.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_file.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/file.py -""" - -import datetime -import logging.handlers -import os -import tempfile - -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import file -from ceilometer import sample - - -class TestFilePublisher(base.BaseTestCase): - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_file_publisher_maxbytes(self): - # Test valid configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file' % tempdir - parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' - % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(None, - self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([50, name, 3], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # The rotating file gets created since only allow 50 bytes. - self.assertTrue(os.path.exists('%s.1' % name)) - - def test_file_publisher(self): - # Test missing max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file_plain' % tempdir - parsed_url = netutils.urlsplit('file://%s' % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(None, - self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([0, name, 0], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # Test the content is corrected saved in the file - self.assertTrue(os.path.exists(name)) - with open(name, 'r') as f: - content = f.read() - for sample_item in self.test_data: - self.assertIn(sample_item.id, content) - self.assertIn(sample_item.timestamp, content) - - def test_file_publisher_invalid(self): - # Test invalid max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - parsed_url = netutils.urlsplit( - 'file://%s/log_file_bad' - '?max_bytes=yus&backup_count=5y' % tempdir) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(None, - self.test_data) - - self.assertIsNone(publisher.publisher_logger) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_kafka_broker_publisher.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_kafka_broker_publisher.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_kafka_broker_publisher.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_kafka_broker_publisher.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -# -# Copyright 2015 Cisco Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/kafka_broker.py -""" -import datetime -import uuid - -import mock -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import kafka_broker as kafka -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -@mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) -@mock.patch.object(kafka.KafkaBrokerPublisher, '_get_client', mock.Mock()) -class TestKafkaPublisher(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def setUp(self): - super(TestKafkaPublisher, self).setUp() - - def test_publish(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_send') as fake_send: - publisher.publish_samples(mock.MagicMock(), self.test_data) - self.assertEqual(1, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_without_options(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092')) - - with mock.patch.object(publisher, '_send') as fake_send: - publisher.publish_samples(mock.MagicMock(), self.test_data) - self.assertEqual(1, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_without_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - self.assertEqual('default', publisher.policy) - - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) - self.assertEqual('default', publisher.policy) - - def test_publish_to_host_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = TypeError - self.assertRaises(TypeError, publisher.publish_samples, - mock.MagicMock(), self.test_data) - self.assertEqual(100, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_drop_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = Exception("test") - publisher.publish_samples(mock.MagicMock(), self.test_data) - self.assertEqual(1, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_queue_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = Exception("test") - publisher.publish_samples(mock.MagicMock(), self.test_data) - self.assertEqual(1, len(fake_send.mock_calls)) - self.assertEqual(1, len(publisher.local_queue)) - - def test_publish_to_down_host_with_default_queue_size(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = Exception('No Connection') - - for i in range(0, 2000): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(mock.MagicMock(), self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual('test-976', - publisher.local_queue[0][0]['counter_name']) - self.assertEqual('test-1999', - publisher.local_queue[1023][0]['counter_name']) - - def test_publish_to_host_from_down_to_up_with_queue(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = Exception('No Connection') - for i in range(0, 16): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(mock.MagicMock(), self.test_data) - - self.assertEqual(16, len(publisher.local_queue)) - - fake_send.side_effect = None - for s in self.test_data: - s.name = 'test-%d' % 16 - publisher.publish_samples(mock.MagicMock(), self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_event_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_send') as fake_send: - publisher.publish_events(mock.MagicMock(), self.test_event_data) - self.assertEqual(1, len(fake_send.mock_calls)) - - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = TypeError - self.assertRaises(TypeError, publisher.publish_events, - mock.MagicMock(), self.test_event_data) - self.assertEqual(100, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_messaging_publisher.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_messaging_publisher.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_messaging_publisher.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_messaging_publisher.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,401 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/messaging.py -""" -import datetime -import uuid - -import eventlet -import mock -from oslo_config import fixture as fixture_config -from oslo_context import context -import oslo_messaging -from oslo_utils import netutils -import testscenarios.testcase - -from ceilometer.event.storage import models as event -from ceilometer import messaging -from ceilometer.publisher import messaging as msg_publisher -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class BasePublisherTestCase(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_sample_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def setUp(self): - super(BasePublisherTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - -class RpcOnlyPublisherTest(BasePublisherTestCase): - def test_published_no_mock(self): - publisher = msg_publisher.RPCPublisher( - netutils.urlsplit('rpc://')) - - endpoint = mock.MagicMock(['record_metering_data']) - collector = messaging.get_rpc_server( - self.transport, self.CONF.publisher_rpc.metering_topic, endpoint) - endpoint.record_metering_data.side_effect = (lambda *args, **kwds: - collector.stop()) - - collector.start() - eventlet.sleep() - publisher.publish_samples(context.RequestContext(), - self.test_sample_data) - collector.wait() - - class Matcher(object): - @staticmethod - def __eq__(data): - for i, sample_item in enumerate(data): - if (sample_item['counter_name'] != - self.test_sample_data[i].name): - return False - return True - - endpoint.record_metering_data.assert_called_once_with( - mock.ANY, data=Matcher()) - - def test_publish_target(self): - publisher = msg_publisher.RPCPublisher( - netutils.urlsplit('rpc://?target=custom_procedure_call')) - cast_context = mock.MagicMock() - with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: - prepare.return_value = cast_context - publisher.publish_samples(mock.MagicMock(), - self.test_sample_data) - - prepare.assert_called_once_with( - topic=self.CONF.publisher_rpc.metering_topic) - cast_context.cast.assert_called_once_with( - mock.ANY, 'custom_procedure_call', data=mock.ANY) - - def test_published_with_per_meter_topic(self): - publisher = msg_publisher.RPCPublisher( - netutils.urlsplit('rpc://?per_meter_topic=1')) - with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: - publisher.publish_samples(mock.MagicMock(), - self.test_sample_data) - - class MeterGroupMatcher(object): - def __eq__(self, meters): - return len(set(meter['counter_name'] - for meter in meters)) == 1 - - topic = self.CONF.publisher_rpc.metering_topic - expected = [mock.call(topic=topic), - mock.call().cast(mock.ANY, 'record_metering_data', - data=mock.ANY), - mock.call(topic=topic + '.test'), - mock.call().cast(mock.ANY, 'record_metering_data', - data=MeterGroupMatcher()), - mock.call(topic=topic + '.test2'), - mock.call().cast(mock.ANY, 'record_metering_data', - data=MeterGroupMatcher()), - mock.call(topic=topic + '.test3'), - mock.call().cast(mock.ANY, 'record_metering_data', - data=MeterGroupMatcher())] - self.assertEqual(expected, prepare.mock_calls) - - -class NotifierOnlyPublisherTest(BasePublisherTestCase): - - @mock.patch('oslo_messaging.Notifier') - def test_publish_topic_override(self, notifier): - msg_publisher.SampleNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_topic')) - notifier.assert_called_with(mock.ANY, topic='custom_topic', - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - msg_publisher.EventNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_event_topic')) - notifier.assert_called_with(mock.ANY, topic='custom_event_topic', - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - -class TestPublisher(testscenarios.testcase.WithScenarios, - BasePublisherTestCase): - scenarios = [ - ('notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.SampleNotifierPublisher, - test_data=BasePublisherTestCase.test_sample_data, - pub_func='publish_samples', attr='source')), - ('event_notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.EventNotifierPublisher, - test_data=BasePublisherTestCase.test_event_data, - pub_func='publish_events', attr='event_type')), - ('rpc', dict(protocol="rpc", - publisher_cls=msg_publisher.RPCPublisher, - test_data=BasePublisherTestCase.test_sample_data, - pub_func='publish_samples', attr='source')), - ] - - def setUp(self): - super(TestPublisher, self).setUp() - self.topic = (self.CONF.publisher_notifier.event_topic - if self.pub_func == 'publish_events' else - self.CONF.publisher_rpc.metering_topic) - - -class TestPublisherPolicy(TestPublisher): - def test_published_concurrency(self): - """Test concurrent access to the local queue of the rpc publisher.""" - - publisher = self.publisher_cls( - netutils.urlsplit('%s://' % self.protocol)) - - with mock.patch.object(publisher, '_send') as fake_send: - def fake_send_wait(ctxt, topic, meters): - fake_send.side_effect = mock.Mock() - # Sleep to simulate concurrency and allow other threads to work - eventlet.sleep(0) - - fake_send.side_effect = fake_send_wait - - job1 = eventlet.spawn(getattr(publisher, self.pub_func), - mock.MagicMock(), self.test_data) - job2 = eventlet.spawn(getattr(publisher, self.pub_func), - mock.MagicMock(), self.test_data) - - job1.wait() - job2.wait() - - self.assertEqual('default', publisher.policy) - self.assertEqual(2, len(fake_send.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_no_policy(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://' % self.protocol)) - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - oslo_messaging.MessageDeliveryFailure, - getattr(publisher, self.pub_func), - mock.MagicMock(), self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - mock.ANY, self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_block(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=default' % self.protocol)) - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - oslo_messaging.MessageDeliveryFailure, - getattr(publisher, self.pub_func), - mock.MagicMock(), self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - mock.ANY, self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_incorrect(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=notexist' % self.protocol)) - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - oslo_messaging.MessageDeliveryFailure, - getattr(publisher, self.pub_func), - mock.MagicMock(), self.test_data) - self.assertTrue(mylog.warn.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - mock.ANY, self.topic, mock.ANY) - - -@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) -class TestPublisherPolicyReactions(TestPublisher): - - def test_published_with_policy_drop_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=drop' % self.protocol)) - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - mock.ANY, self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - self.assertEqual(1, len(publisher.local_queue)) - fake_send.assert_called_once_with( - mock.ANY, self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down_up(self): - self.rpc_unreachable = True - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - - self.assertEqual(1, len(publisher.local_queue)) - - fake_send.side_effect = mock.MagicMock() - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - - self.assertEqual(0, len(publisher.local_queue)) - - topic = self.topic - expected = [mock.call(mock.ANY, topic, mock.ANY), - mock.call(mock.ANY, topic, mock.ANY), - mock.call(mock.ANY, topic, mock.ANY)] - self.assertEqual(expected, fake_send.mock_calls) - - def test_published_with_policy_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls(netutils.urlsplit( - '%s://?policy=queue&max_queue_length=3' % self.protocol)) - - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 5): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - - self.assertEqual(3, len(publisher.local_queue)) - self.assertEqual( - 'test-2', - publisher.local_queue[0][2][0][self.attr] - ) - self.assertEqual( - 'test-3', - publisher.local_queue[1][2][0][self.attr] - ) - self.assertEqual( - 'test-4', - publisher.local_queue[2][2][0][self.attr] - ) - - def test_published_with_policy_default_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = oslo_messaging.MessageDeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 2000): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(mock.MagicMock(), - self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual( - 'test-976', - publisher.local_queue[0][2][0][self.attr] - ) - self.assertEqual( - 'test-1999', - publisher.local_queue[1023][2][0][self.attr] - ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_udp.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_udp.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_udp.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_udp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/udp.py -""" - -import datetime -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import udp -from ceilometer.publisher import utils -from ceilometer import sample - - -COUNTER_SOURCE = 'testsource' - - -class TestUDPPublisher(base.BaseTestCase): - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - ] - - @staticmethod - def _make_fake_socket(published): - def _fake_socket_socket(family, type): - def record_data(msg, dest): - published.append((msg, dest)) - - udp_socket = mock.Mock() - udp_socket.sendto = record_data - return udp_socket - - return _fake_socket_socket - - def setUp(self): - super(TestUDPPublisher, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.publisher.telemetry_secret = 'not-so-secret' - - def _check_udp_socket(self, url, expected_addr_family): - with mock.patch.object(socket, 'socket') as mock_socket: - udp.UDPPublisher(netutils.urlsplit(url)) - mock_socket.assert_called_with(expected_addr_family, - socket.SOCK_DGRAM) - - def test_publisher_udp_socket_ipv4(self): - self._check_udp_socket('udp://127.0.0.1:4952', - socket.AF_INET) - - def test_publisher_udp_socket_ipv6(self): - self._check_udp_socket('udp://[::1]:4952', - socket.AF_INET6) - - def test_published(self): - self.data_sent = [] - with mock.patch('socket.socket', - self._make_fake_socket(self.data_sent)): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://somehost')) - publisher.publish_samples(None, - self.test_data) - - self.assertEqual(5, len(self.data_sent)) - - sent_counters = [] - - for data, dest in self.data_sent: - counter = msgpack.loads(data, encoding="utf-8") - sent_counters.append(counter) - - # Check destination - self.assertEqual(('somehost', - self.CONF.collector.udp_port), dest) - - # Check that counters are equal - def sort_func(counter): - return counter['counter_name'] - - counters = [utils.meter_message_from_counter(d, "not-so-secret") - for d in self.test_data] - counters.sort(key=sort_func) - sent_counters.sort(key=sort_func) - self.assertEqual(counters, sent_counters) - - @staticmethod - def _raise_ioerror(*args): - raise IOError - - def _make_broken_socket(self, family, type): - udp_socket = mock.Mock() - udp_socket.sendto = self._raise_ioerror - return udp_socket - - def test_publish_error(self): - with mock.patch('socket.socket', - self._make_broken_socket): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://localhost')) - publisher.publish_samples(None, - self.test_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_utils.py ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/publisher/test_utils.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/publisher/test_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/utils.py -""" -from oslo_serialization import jsonutils -from oslotest import base - -from ceilometer.publisher import utils - - -class TestSignature(base.BaseTestCase): - def test_compute_signature_change_key(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_change_value(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_same(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - sig2 = utils.compute_signature(data, 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_use_configured_secret(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - sig2 = utils.compute_signature(data, 'different-value') - self.assertNotEqual(sig1, sig2) - - def test_verify_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unsigned(self): - data = {'a': 'A', 'b': 'B'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_incorrect(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': 'Not the same'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_invalid_encoding(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': ''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unicode(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': u''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested_json(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - 'c': ('c',), - 'd': ['d'] - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_verify_unicode_symbols(self): - data = {u'a\xe9\u0437': 'A', - 'b': u'B\xe9\u0437' - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_besteffort_compare_digest(self): - hash1 = "f5ac3fe42b80b80f979825d177191bc5" - hash2 = "f5ac3fe42b80b80f979825d177191bc5" - hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" - hash4 = b"f5ac3fe42b80b80f979825d177191bc5" - hash5 = b"f5ac3fe42b80b80f979825d177191bc5" - hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" - - self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) - self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) - self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) - self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) - - def test_verify_no_secret(self): - data = {'a': 'A', 'b': 'B'} - self.assertTrue(utils.verify_signature(data, '')) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/sqlalchemy/test_models.py ceilometer-5.0.0~b3/ceilometer/tests/storage/sqlalchemy/test_models.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/sqlalchemy/test_models.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/sqlalchemy/test_models.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslotest import base -import sqlalchemy -from sqlalchemy.dialects.mysql import DECIMAL -from sqlalchemy.types import NUMERIC - -from ceilometer.storage.sqlalchemy import models -from ceilometer import utils - - -class PreciseTimestampTest(base.BaseTestCase): - - @staticmethod - def fake_dialect(name): - def _type_descriptor_mock(desc): - if type(desc) == DECIMAL: - return NUMERIC(precision=desc.precision, scale=desc.scale) - dialect = mock.MagicMock() - dialect.name = name - dialect.type_descriptor = _type_descriptor_mock - return dialect - - def setUp(self): - super(PreciseTimestampTest, self).setUp() - self._mysql_dialect = self.fake_dialect('mysql') - self._postgres_dialect = self.fake_dialect('postgres') - self._type = models.PreciseTimestamp() - self._date = datetime.datetime(2012, 7, 2, 10, 44) - - def test_load_dialect_impl_mysql(self): - result = self._type.load_dialect_impl(self._mysql_dialect) - self.assertEqual(NUMERIC, type(result)) - self.assertEqual(20, result.precision) - self.assertEqual(6, result.scale) - self.assertTrue(result.asdecimal) - - def test_load_dialect_impl_postgres(self): - result = self._type.load_dialect_impl(self._postgres_dialect) - self.assertEqual(sqlalchemy.DateTime, type(result)) - - def test_process_bind_param_store_decimal_mysql(self): - expected = utils.dt_to_decimal(self._date) - result = self._type.process_bind_param(self._date, self._mysql_dialect) - self.assertEqual(expected, result) - - def test_process_bind_param_store_datetime_postgres(self): - result = self._type.process_bind_param(self._date, - self._postgres_dialect) - self.assertEqual(self._date, result) - - def test_process_bind_param_store_none_mysql(self): - result = self._type.process_bind_param(None, self._mysql_dialect) - self.assertIsNone(result) - - def test_process_bind_param_store_none_postgres(self): - result = self._type.process_bind_param(None, - self._postgres_dialect) - self.assertIsNone(result) - - def test_process_result_value_datetime_mysql(self): - dec_value = utils.dt_to_decimal(self._date) - result = self._type.process_result_value(dec_value, - self._mysql_dialect) - self.assertEqual(self._date, result) - - def test_process_result_value_datetime_postgres(self): - result = self._type.process_result_value(self._date, - self._postgres_dialect) - self.assertEqual(self._date, result) - - def test_process_result_value_none_mysql(self): - result = self._type.process_result_value(None, - self._mysql_dialect) - self.assertIsNone(result) - - def test_process_result_value_none_postgres(self): - result = self._type.process_result_value(None, - self._postgres_dialect) - self.assertIsNone(result) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_base.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import math - -from oslotest import base as testbase - -from ceilometer.storage import base - - -class BaseTest(testbase.BaseTestCase): - - def test_iter_period(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 1, 12, 0), - datetime.datetime(2013, 1, 1, 13, 0), - 60)) - self.assertEqual(60, len(times)) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), - datetime.datetime(2013, 1, 1, 12, 11)), times[10]) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), - datetime.datetime(2013, 1, 1, 12, 22)), times[21]) - - def test_iter_period_bis(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 2, 13, 0), - datetime.datetime(2013, 1, 2, 14, 0), - 55)) - self.assertEqual(math.ceil(3600 / 55.0), len(times)) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), - datetime.datetime(2013, 1, 2, 13, 10, 5)), - times[10]) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), - datetime.datetime(2013, 1, 2, 13, 20, 10)), - times[21]) - - def test_handle_sort_key(self): - sort_keys_alarm = base._handle_sort_key('alarm') - self.assertEqual(['name', 'user_id', 'project_id'], sort_keys_alarm) - - sort_keys_meter = base._handle_sort_key('meter', 'foo') - self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) - - sort_keys_resource = base._handle_sort_key('resource', 'project_id') - self.assertEqual(['project_id', 'user_id', 'timestamp'], - sort_keys_resource) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_get_connection.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_get_connection.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_get_connection.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_get_connection.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/ -""" -import unittest - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -import retrying - -from ceilometer.alarm.storage import impl_log as impl_log_alarm -from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqlalchemy_alarm -try: - from ceilometer.event.storage import impl_hbase as impl_hbase_event -except ImportError: - impl_hbase_event = None -from ceilometer import storage -from ceilometer.storage import impl_log -from ceilometer.storage import impl_sqlalchemy - -import six - - -class EngineTest(base.BaseTestCase): - def test_get_connection(self): - engine = storage.get_connection('log://localhost', - 'ceilometer.metering.storage') - self.assertIsInstance(engine, impl_log.Connection) - - def test_get_connection_no_such_engine(self): - try: - storage.get_connection('no-such-engine://localhost', - 'ceilometer.metering.storage') - except RuntimeError as err: - self.assertIn('no-such-engine', six.text_type(err)) - - -class ConnectionRetryTest(base.BaseTestCase): - def setUp(self): - super(ConnectionRetryTest, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_retries(self): - with mock.patch.object(retrying.time, 'sleep') as retry_sleep: - try: - self.CONF.set_override("connection", "no-such-engine://", - group="database") - storage.get_connection_from_config(self.CONF) - except RuntimeError as err: - self.assertIn('no-such-engine', six.text_type(err)) - self.assertEqual(9, retry_sleep.call_count) - retry_sleep.assert_called_with(10.0) - - -class ConnectionConfigTest(base.BaseTestCase): - def setUp(self): - super(ConnectionConfigTest, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_only_default_url(self): - self.CONF.set_override("connection", "log://", group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'alarm') - self.assertIsInstance(conn, impl_log_alarm.Connection) - - def test_two_urls(self): - self.CONF.set_override("connection", "log://", group="database") - self.CONF.set_override("alarm_connection", "sqlite://", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'alarm') - self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls(self): - self.CONF.set_override("connection", "log://", group="database") - self.CONF.set_override("alarm_connection", "sqlite://", - group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'alarm') - self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls_no_default(self): - self.CONF.set_override("connection", None, group="database") - self.CONF.set_override("metering_connection", "log://", - group="database") - self.CONF.set_override("alarm_connection", "sqlite://", - group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'alarm') - self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) - - def test_sqlalchemy_driver(self): - self.CONF.set_override("connection", "sqlite+pysqlite://", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_sqlalchemy.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_sqlalchemy.Connection) - conn = storage.get_connection_from_config(self.CONF, 'alarm') - self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_db2.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_db2.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_db2.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_db2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -# -# Copyright Ericsson AB 2014. All rights reserved -# -# Authors: Ildiko Vancsa -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_db2.py - -.. note:: - In order to run the tests against another MongoDB server set the - environment variable CEILOMETER_TEST_DB2_URL to point to a DB2 - server before running the tests. - -""" - -import bson -import mock -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.alarm.storage import impl_db2 as impl_db2_alarm -from ceilometer.event.storage import impl_db2 as impl_db2_event -from ceilometer.storage import impl_db2 -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer.tests import base as test_base - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'resources': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True, - 'complex': False}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - actual_capabilities = impl_db2.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - actual_capabilities = impl_db2_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_alarm_capabilities(self): - expected_capabilities = { - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, - } - - actual_capabilities = impl_db2_alarm.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = impl_db2.Connection.get_storage_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - -class ConnectionTest(test_base.BaseTestCase): - @mock.patch.object(impl_db2.Connection, '_generate_random_str') - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_upgrade(self, meter_id, timestamp, mongo_connect, - _generate_random_str): - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - _generate_random_str.return_value = 'wew' * 247 + 'x' * 3 - conn_mock.ceilodb2.resource.index_information.return_value = {} - mongo_connect.return_value = conn_mock - meter_id.return_value = '54b8860d75bfe43b54e84ce7' - timestamp.return_value = 'timestamp' - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 256, - group='database') - impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - resource_id = 'wew' * 247 + 'x' * 3 - conn_mock.ceilodb2.resource.insert_one.assert_called_with( - {'_id': resource_id, - 'no_key': resource_id}) - conn_mock.ceilodb2.meter.insert_one.assert_called_with( - {'_id': '54b8860d75bfe43b54e84ce7', - 'no_key': '54b8860d75bfe43b54e84ce7', - 'timestamp': 'timestamp'}) - - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_generate_random_str_with_less_config_len(self, objectid, - mongo_connect): - fake_str = '54b8860d75bfe43b54e84ce7' - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - mongo_connect.return_value = conn_mock - objectid.return_value = fake_str - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 20, - group='database') - conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - rand_str = conn._generate_random_str(20) - self.assertEqual(fake_str, rand_str) - - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_generate_random_str_with_default_config_len(self, objectid, - mongo_connect): - fake_str = '54b8860d75bfe43b54e84ce7' - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - mongo_connect.return_value = conn_mock - objectid.return_value = fake_str - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 512, - group='database') - conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - rand_str = conn._generate_random_str(512) - str_len = len(str(fake_str)) - expect_str = fake_str * int(512 / str_len) + 'x' * (512 % str_len) - self.assertEqual(expect_str, rand_str) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_hbase.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_hbase.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_hbase.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_hbase.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -# -# Copyright 2012, 2013 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_hbase.py - -.. note:: - In order to run the tests against real HBase server set the environment - variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before - running the tests. Make sure the Thrift server is running on that server. - -""" -import mock - - -try: - import happybase # noqa -except ImportError: - import testtools.testcase - raise testtools.testcase.TestSkipped("happybase is needed") - -from ceilometer.alarm.storage import impl_hbase as hbase_alarm -from ceilometer.event.storage import impl_hbase as hbase_event -from ceilometer.storage import impl_hbase as hbase -from ceilometer.tests import base as test_base -from ceilometer.tests import db as tests_db - - -class ConnectionTest(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - - @tests_db.run_with('hbase') - def test_hbase_connection(self): - - class TestConn(object): - def __init__(self, host, port): - self.netloc = '%s:%s' % (host, port) - - def open(self): - pass - - def get_connection_pool(conf): - return TestConn(conf['host'], conf['port']) - - with mock.patch.object(hbase.Connection, '_get_connection_pool', - side_effect=get_connection_pool): - conn = hbase.Connection('hbase://test_hbase:9090') - self.assertIsInstance(conn.conn_pool, TestConn) - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'resources': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'statistics': {'groupby': False, - 'query': {'simple': True, - 'metadata': True, - 'complex': False}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - actual_capabilities = hbase.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_alarm_capabilities(self): - expected_capabilities = { - 'alarms': {'query': {'simple': True, - 'complex': False}, - 'history': {'query': {'simple': True, - 'complex': False}}}, - } - - actual_capabilities = hbase_alarm.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - - actual_capabilities = hbase_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = hbase.Connection.get_storage_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_log.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_log.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_log.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_log.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_log.py -""" -from oslotest import base - -from ceilometer.storage import impl_log - - -class ConnectionTest(base.BaseTestCase): - @staticmethod - def test_get_connection(): - conn = impl_log.Connection(None) - conn.record_metering_data({'counter_name': 'test', - 'resource_id': __name__, - 'counter_volume': 1, - }) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_mongodb.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_mongodb.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_mongodb.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_mongodb.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_mongodb.py - -.. note:: - In order to run the tests against another MongoDB server set the - environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB - server before running the tests. - -""" - -from ceilometer.alarm.storage import impl_mongodb as impl_mongodb_alarm -from ceilometer.event.storage import impl_mongodb as impl_mongodb_event -from ceilometer.storage import impl_mongodb -from ceilometer.tests import base as test_base -from ceilometer.tests import db as tests_db - - -@tests_db.run_with('mongodb') -class MongoDBConnection(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - def test_connection_pooling(self): - test_conn = impl_mongodb.Connection(self.db_manager.url) - self.assertEqual(self.conn.conn, test_conn.conn) - - def test_replica_set(self): - url = self.db_manager._url + '?replicaSet=foobar' - conn = impl_mongodb.Connection(url) - self.assertTrue(conn.conn) - - def test_recurse_sort_keys(self): - sort_keys = ['k1', 'k2', 'k3'] - marker = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} - flag = '$lt' - ret = impl_mongodb.Connection._recurse_sort_keys(sort_keys=sort_keys, - marker=marker, - flag=flag) - expect = {'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1': {'eq': 'v1'}} - self.assertEqual(expect, ret) - - -@tests_db.run_with('mongodb') -class IndexTest(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - - def _test_ttl_index_absent(self, conn, coll_name, ttl_opt): - # create a fake index and check it is deleted - coll = getattr(conn.db, coll_name) - index_name = '%s_ttl' % coll_name - self.CONF.set_override(ttl_opt, -1, group='database') - conn.upgrade() - self.assertNotIn(index_name, coll.index_information()) - - self.CONF.set_override(ttl_opt, 456789, group='database') - conn.upgrade() - self.assertEqual(456789, - coll.index_information() - [index_name]['expireAfterSeconds']) - - def test_meter_ttl_index_absent(self): - self._test_ttl_index_absent(self.conn, 'meter', - 'metering_time_to_live') - - def test_event_ttl_index_absent(self): - self._test_ttl_index_absent(self.event_conn, 'event', - 'event_time_to_live') - - def test_alarm_history_ttl_index_absent(self): - self._test_ttl_index_absent(self.alarm_conn, 'alarm_history', - 'alarm_history_time_to_live') - - def _test_ttl_index_present(self, conn, coll_name, ttl_opt): - coll = getattr(conn.db, coll_name) - self.CONF.set_override(ttl_opt, 456789, group='database') - conn.upgrade() - index_name = '%s_ttl' % coll_name - self.assertEqual(456789, - coll.index_information() - [index_name]['expireAfterSeconds']) - - self.CONF.set_override(ttl_opt, -1, group='database') - conn.upgrade() - self.assertNotIn(index_name, coll.index_information()) - - def test_meter_ttl_index_present(self): - self._test_ttl_index_present(self.conn, 'meter', - 'metering_time_to_live') - - def test_event_ttl_index_present(self): - self._test_ttl_index_present(self.event_conn, 'event', - 'event_time_to_live') - - def test_alarm_history_ttl_index_present(self): - self._test_ttl_index_present(self.alarm_conn, 'alarm_history', - 'alarm_history_time_to_live') - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'resources': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True, - 'complex': False}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_mongodb.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - actual_capabilities = impl_mongodb_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_alarm_capabilities(self): - expected_capabilities = { - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, - } - - actual_capabilities = impl_mongodb_alarm.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_mongodb.Connection. - get_storage_capabilities()) - self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_sqlalchemy.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_sqlalchemy.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_impl_sqlalchemy.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_impl_sqlalchemy.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_sqlalchemy.py - -.. note:: - In order to run the tests against real SQL server set the environment - variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running - the tests. - -""" - -import datetime -from six.moves import reprlib - -import mock -from oslo_utils import timeutils - -from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqla_alarm -from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event -from ceilometer.event.storage import models -from ceilometer.storage import impl_sqlalchemy -from ceilometer.storage.sqlalchemy import models as sql_models -from ceilometer.tests import base as test_base -from ceilometer.tests import db as tests_db -from ceilometer.tests.storage import test_storage_scenarios as scenarios - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class CeilometerBaseTest(tests_db.TestBase): - - def test_ceilometer_base(self): - base = sql_models.CeilometerBase() - base['key'] = 'value' - self.assertEqual('value', base['key']) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class EventTypeTest(tests_db.TestBase): - # EventType is a construct specific to sqlalchemy - # Not applicable to other drivers. - - def test_event_type_exists(self): - et1 = self.event_conn._get_or_create_event_type("foo") - self.assertTrue(et1.id >= 0) - et2 = self.event_conn._get_or_create_event_type("foo") - self.assertEqual(et2.id, et1.id) - self.assertEqual(et2.desc, et1.desc) - - def test_event_type_unique(self): - et1 = self.event_conn._get_or_create_event_type("foo") - self.assertTrue(et1.id >= 0) - et2 = self.event_conn._get_or_create_event_type("blah") - self.assertNotEqual(et1.id, et2.id) - self.assertNotEqual(et1.desc, et2.desc) - # Test the method __repr__ returns a string - self.assertTrue(reprlib.repr(et2)) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class EventTest(tests_db.TestBase): - def _verify_data(self, trait, trait_table): - now = datetime.datetime.utcnow() - ev = models.Event('1', 'name', now, [trait], {}) - self.event_conn.record_events([ev]) - session = self.event_conn._engine_facade.get_session() - t_tables = [sql_models.TraitText, sql_models.TraitFloat, - sql_models.TraitInt, sql_models.TraitDatetime] - for table in t_tables: - if table == trait_table: - self.assertEqual(1, session.query(table).count()) - else: - self.assertEqual(0, session.query(table).count()) - - def test_string_traits(self): - model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text") - self._verify_data(model, sql_models.TraitText) - - def test_int_traits(self): - model = models.Trait("Foo", models.Trait.INT_TYPE, 100) - self._verify_data(model, sql_models.TraitInt) - - def test_float_traits(self): - model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456) - self._verify_data(model, sql_models.TraitFloat) - - def test_datetime_traits(self): - now = datetime.datetime.utcnow() - model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now) - self._verify_data(model, sql_models.TraitDatetime) - - def test_event_repr(self): - ev = sql_models.Event('msg_id', None, False, {}) - ev.id = 100 - self.assertTrue(reprlib.repr(ev)) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class RelationshipTest(scenarios.DBTestBase): - # Note: Do not derive from SQLAlchemyEngineTestBase, since we - # don't want to automatically inherit all the Meter setup. - - @mock.patch.object(timeutils, 'utcnow') - def test_clear_metering_data_meta_tables(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - - session = self.conn._engine_facade.get_session() - self.assertEqual(5, session.query(sql_models.Sample).count()) - - resource_ids = (session.query(sql_models.Resource.internal_id) - .group_by(sql_models.Resource.internal_id)) - meta_tables = [sql_models.MetaText, sql_models.MetaFloat, - sql_models.MetaBigInt, sql_models.MetaBool] - s = set() - for table in meta_tables: - self.assertEqual(0, (session.query(table) - .filter(~table.id.in_(resource_ids)).count() - )) - s.update(session.query(table.id).all()) - self.assertEqual(set(resource_ids.all()), s) - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'resources': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True, - 'complex': False}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - actual_capabilities = impl_sqla_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_alarm_capabilities(self): - expected_capabilities = { - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, - } - - actual_capabilities = impl_sqla_alarm.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_sqlalchemy. - Connection.get_storage_capabilities()) - self.assertEqual(expected_capabilities, actual_capabilities) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_models.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_models.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_models.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_models.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslotest import base as testbase -import six - -from ceilometer.alarm.storage import models as alarm_models -from ceilometer.event.storage import models as event_models -from ceilometer.storage import base -from ceilometer.storage import models - - -class FakeModel(base.Model): - def __init__(self, arg1, arg2): - base.Model.__init__(self, arg1=arg1, arg2=arg2) - - -class ModelTest(testbase.BaseTestCase): - - def test_create_attributes(self): - m = FakeModel(1, 2) - self.assertEqual(1, m.arg1) - self.assertEqual(2, m.arg2) - - def test_as_dict(self): - m = FakeModel(1, 2) - d = m.as_dict() - self.assertEqual({'arg1': 1, 'arg2': 2}, d) - - def test_as_dict_recursive(self): - m = FakeModel(1, FakeModel('a', 'b')) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': {'arg1': 'a', - 'arg2': 'b'}}, - d) - - def test_as_dict_recursive_list(self): - m = FakeModel(1, [FakeModel('a', 'b')]) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': [{'arg1': 'a', - 'arg2': 'b'}]}, - d) - - def test_event_repr_no_traits(self): - x = event_models.Event("1", "name", "now", None, {}) - self.assertEqual("", repr(x)) - - def test_get_field_names_of_sample(self): - sample_fields = ["source", "counter_name", "counter_type", - "counter_unit", "counter_volume", "user_id", - "project_id", "resource_id", "timestamp", - "resource_metadata", "message_id", - "message_signature", "recorded_at"] - - self.assertEqual(set(sample_fields), - set(models.Sample.get_field_names())) - - def test_get_field_names_of_alarm(self): - alarm_fields = ["alarm_id", "type", "enabled", "name", "description", - "timestamp", "user_id", "project_id", "state", - "state_timestamp", "ok_actions", "alarm_actions", - "insufficient_data_actions", "repeat_actions", "rule", - "severity", "time_constraints"] - - self.assertEqual(set(alarm_fields), - set(alarm_models.Alarm.get_field_names())) - - def test_get_field_names_of_alarmchange(self): - alarmchange_fields = ["event_id", "alarm_id", "type", "detail", - "user_id", "project_id", "severity", - "on_behalf_of", "timestamp"] - - self.assertEqual(set(alarmchange_fields), - set(alarm_models.AlarmChange.get_field_names())) - - -class TestTraitModel(testbase.BaseTestCase): - - def test_convert_value(self): - v = event_models.Trait.convert_value( - event_models.Trait.INT_TYPE, '10') - self.assertEqual(10, v) - self.assertIsInstance(v, int) - v = event_models.Trait.convert_value( - event_models.Trait.FLOAT_TYPE, '10') - self.assertEqual(10.0, v) - self.assertIsInstance(v, float) - - v = event_models.Trait.convert_value( - event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) - self.assertIsInstance(v, datetime.datetime) - - v = event_models.Trait.convert_value( - event_models.Trait.TEXT_TYPE, 10) - self.assertEqual("10", v) - self.assertIsInstance(v, six.text_type) - - -class TestClassModel(testbase.BaseTestCase): - - ALARM = { - 'alarm_id': '503490ea-ee9e-40d6-9cad-93a71583f4b2', - 'enabled': True, - 'type': 'threshold', - 'name': 'alarm-test', - 'description': 'alarm-test-description', - 'timestamp': None, - 'user_id': '5c76351f5fef4f6490d1048355094ca3', - 'project_id': 'd83ed14a457141fc8661b4dcb3fd883d', - 'state': "insufficient data", - 'state_timestamp': None, - 'ok_actions': [], - 'alarm_actions': [], - 'insufficient_data_actions': [], - 'repeat_actions': False, - 'time_constraints': [], - 'rule': { - 'comparison_operator': 'lt', - 'threshold': 34, - 'statistic': 'max', - 'evaluation_periods': 1, - 'period': 60, - 'meter_name': 'cpu_util', - 'query': [] - } - } - - def test_timestamp_cannot_be_none(self): - self.ALARM['timestamp'] = None - self.ALARM['state_timestamp'] = datetime.datetime.utcnow() - self.assertRaises(TypeError, - alarm_models.Alarm.__init__, - **self.ALARM) - - def test_state_timestamp_cannot_be_none(self): - self.ALARM['timestamp'] = datetime.datetime.utcnow() - self.ALARM['state_timestamp'] = None - self.assertRaises(TypeError, - alarm_models.Alarm.__init__, - **self.ALARM) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_pymongo_base.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_pymongo_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_pymongo_base.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_pymongo_base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests the mongodb and db2 common functionality -""" - -import copy -import datetime - -import mock - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import constants -from ceilometer.tests import db as tests_db -from ceilometer.tests.storage import test_storage_scenarios - - -@tests_db.run_with('mongodb', 'db2') -class CompatibilityTest(test_storage_scenarios.DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def prepare_data(self): - def old_record_metering_data(self, data): - received_timestamp = datetime.datetime.utcnow() - self.db.resource.update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - # Current metadata being used and when it was - # last updated. - 'timestamp': data['timestamp'], - 'received_timestamp': received_timestamp, - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - }, - }, - }, - upsert=True, - ) - - record = copy.copy(data) - self.db.meter.insert(record) - - # Stubout with the old version DB schema, the one w/o 'counter_unit' - with mock.patch.object(self.conn, 'record_metering_data', - side_effect=old_record_metering_data): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(self.conn, msg) - - # Create the old format alarm with a dict instead of a - # array for matching_metadata - alarm = dict(alarm_id='0ld-4l3rt', - enabled=True, - name='old-alert', - description='old-alert', - timestamp=constants.MIN_DATETIME, - meter_name='cpu', - user_id='me', - project_id='and-da-boys', - comparison_operator='lt', - threshold=36, - statistic='count', - evaluation_periods=1, - period=60, - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - matching_metadata={'key': 'value'}) - - self.alarm_conn.db.alarm.update( - {'alarm_id': alarm['alarm_id']}, - {'$set': alarm}, - upsert=True) - - alarm['alarm_id'] = 'other-kind-of-0ld-4l3rt' - alarm['name'] = 'other-old-alaert' - alarm['matching_metadata'] = [{'key': 'key1', 'value': 'value1'}, - {'key': 'key2', 'value': 'value2'}] - self.alarm_conn.db.alarm.update( - {'alarm_id': alarm['alarm_id']}, - {'$set': alarm}, - upsert=True) - - def test_alarm_get_old_format_matching_metadata_dict(self): - old = list(self.alarm_conn.get_alarms(name='old-alert'))[0] - self.assertEqual('threshold', old.type) - self.assertEqual([{'field': 'key', - 'op': 'eq', - 'value': 'value', - 'type': 'string'}], - old.rule['query']) - self.assertEqual(60, old.rule['period']) - self.assertEqual('cpu', old.rule['meter_name']) - self.assertEqual(1, old.rule['evaluation_periods']) - self.assertEqual('count', old.rule['statistic']) - self.assertEqual('lt', old.rule['comparison_operator']) - self.assertEqual(36, old.rule['threshold']) - - def test_alarm_get_old_format_matching_metadata_array(self): - old = list(self.alarm_conn.get_alarms(name='other-old-alaert'))[0] - self.assertEqual('threshold', old.type) - self.assertEqual(sorted([{'field': 'key1', - 'op': 'eq', - 'value': 'value1', - 'type': 'string'}, - {'field': 'key2', - 'op': 'eq', - 'value': 'value2', - 'type': 'string'}]), - sorted(old.rule['query']),) - self.assertEqual('cpu', old.rule['meter_name']) - self.assertEqual(60, old.rule['period']) - self.assertEqual(1, old.rule['evaluation_periods']) - self.assertEqual('count', old.rule['statistic']) - self.assertEqual('lt', old.rule['comparison_operator']) - self.assertEqual(36, old.rule['threshold']) - - def test_counter_unit(self): - meters = list(self.conn.get_meters()) - self.assertEqual(1, len(meters)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/storage/test_storage_scenarios.py ceilometer-5.0.0~b3/ceilometer/tests/storage/test_storage_scenarios.py --- ceilometer-5.0.0~b2/ceilometer/tests/storage/test_storage_scenarios.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/storage/test_storage_scenarios.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,3698 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Base classes for DB backend implementation test -""" - -import datetime -import operator - -import mock -from oslo_config import cfg -from oslo_db import api -from oslo_db import exception as dbexc -from oslo_utils import timeutils -import pymongo - -import ceilometer -from ceilometer.alarm.storage import models as alarm_models -from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import storage -from ceilometer.tests import constants -from ceilometer.tests import db as tests_db - - -class DBTestBase(tests_db.TestBase): - @staticmethod - def create_side_effect(method, exception_type, test_exception): - def side_effect(*args, **kwargs): - if test_exception.pop(): - raise exception_type - else: - return method(*args, **kwargs) - return side_effect - - def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), - metadata=None, - name='instance', - sample_type=sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user-id', - project_id='project-id', - resource_id='resource-id', source=None): - metadata = metadata or {'display_name': 'test-server', - 'tag': 'self.counter'} - s = sample.Sample( - name, sample_type, unit=unit, volume=volume, user_id=user_id, - project_id=project_id, resource_id=resource_id, - timestamp=timestamp, - resource_metadata=metadata, source=source - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - self.conn.record_metering_data(msg) - return msg - - def setUp(self): - super(DBTestBase, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) - self.prepare_data() - - def prepare_data(self): - original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), - (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), - (2012, 7, 2, 10, 43)] - - timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), - (2011, 5, 30, 18, 3), - (2012, 12, 1, 1, 25), - (2012, 2, 29, 6, 59), - (2013, 5, 31, 23, 7)] - timestamp_list = (original_timestamps + - timestamps_for_test_samples_default_order) - - self.msgs = [] - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[0]), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[1]), - resource_id='resource-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, - source='test-2') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[2]), - resource_id='resource-id-alternate', - user_id='user-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, - source='test-3') - ) - - start_idx = 3 - end_idx = len(timestamp_list) - - for i, ts in zip(range(start_idx - 1, end_idx - 1), - timestamp_list[start_idx:end_idx]): - self.msgs.append( - self.create_and_store_sample( - timestamp=datetime.datetime(*ts), - user_id='user-id-%s' % i, - project_id='project-id-%s' % i, - resource_id='resource-id-%s' % i, - metadata={ - 'display_name': 'test-server', - 'tag': 'counter-%s' % i - }, - source='test') - ) - - -class ResourceTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - def prepare_data(self): - super(ResourceTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_resources(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) - msgs_sources = [msg['source'] for msg in self.msgs] - resources = list(self.conn.get_resources()) - self.assertEqual(10, len(resources)) - for resource in resources: - if resource.resource_id != 'resource-id': - continue - self.assertEqual(expected_first_sample_timestamp, - resource.first_sample_timestamp) - self.assertEqual(expected_last_sample_timestamp, - resource.last_sample_timestamp) - self.assertEqual('resource-id', resource.resource_id) - self.assertEqual('project-id', resource.project_id) - self.assertIn(resource.source, msgs_sources) - self.assertEqual('user-id', resource.user_id) - self.assertEqual('test-server', resource.metadata['display_name']) - break - else: - self.fail('Never found resource-id') - - def test_get_resources_start_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', - 'resource-id-6', 'resource-id-8']) - - resources = list(self.conn.get_resources(start_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='ge')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='gt')) - resource_ids = [r.resource_id for r in resources] - expected.remove('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_end_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id', 'resource-id-alternate', - 'resource-id-5', 'resource-id-7', - 'resource-id-mongo_bad_key']) - - resources = list(self.conn.get_resources(end_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - expected.add('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_both_timestamps(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(0, len(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-3']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2', 'resource-id-3']), - set(resource_ids)) - - def test_get_resources_by_source(self): - resources = list(self.conn.get_resources(source='test-1')) - self.assertEqual(1, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id']), ids) - - def test_get_resources_by_user(self): - resources = list(self.conn.get_resources(user='user-id')) - self.assertTrue(len(resources) == 2 or len(resources) == 1) - ids = set(r.resource_id for r in resources) - # tolerate storage driver only reporting latest owner of resource - resources_ever_owned_by = set(['resource-id', - 'resource-id-alternate']) - resources_now_owned_by = set(['resource-id']) - self.assertTrue(ids == resources_ever_owned_by or - ids == resources_now_owned_by, - 'unexpected resources: %s' % ids) - - def test_get_resources_by_alternate_user(self): - resources = list(self.conn.get_resources(user='user-id-alternate')) - self.assertEqual(1, len(resources)) - # only a single resource owned by this user ever - self.assertEqual('resource-id-alternate', resources[0].resource_id) - - def test_get_resources_by_project(self): - resources = list(self.conn.get_resources(project='project-id')) - self.assertEqual(2, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) - - def test_get_resources_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(9, len(resources)) - - def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.$name_2': 'test-server2', - 'metadata.display.name.name.$1': 'test-server1'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(1, len(resources)) - - def test_get_resources_by_empty_metaquery(self): - resources = list(self.conn.get_resources(metaquery={})) - self.assertEqual(10, len(resources)) - - def test_get_resources_most_recent_metadata_all(self): - resources = self.conn.get_resources() - expected_tags = ['self.counter', 'self.counter3', 'counter-2', - 'counter-3', 'counter-4', 'counter-5', 'counter-6', - 'counter-7', 'counter-8'] - - for resource in resources: - self.assertIn(resource.metadata['tag'], expected_tags) - - def test_get_resources_most_recent_metadata_single(self): - resource = list( - self.conn.get_resources(resource='resource-id-alternate') - )[0] - expected_tag = 'self.counter3' - self.assertEqual(expected_tag, resource.metadata['tag']) - - -class ResourceTestOrdering(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - def prepare_data(self): - sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)]), - ('resource-id-2', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 47)]), - ('resource-id-3', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 50), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)])] - - counter = 0 - for resource, timestamps in sample_timings: - for timestamp in timestamps: - self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp), - resource_id=resource, - user_id=str(counter % 2), - project_id=str(counter % 3), - metadata={ - 'display_name': 'test-server', - 'tag': 'sample-%s' % counter - }, - source='test' - ) - counter += 1 - - def test_get_resources_ordering_all(self): - resources = list(self.conn.get_resources()) - expected = set([ - ('resource-id-1', 'sample-3'), - ('resource-id-2', 'sample-8'), - ('resource-id-3', 'sample-12') - ]) - received = set([(r.resource_id, r.metadata['tag']) for r in resources]) - self.assertEqual(expected, received) - - def test_get_resources_ordering_single(self): - resource = list(self.conn.get_resources(resource='resource-id-2'))[0] - self.assertEqual('resource-id-2', resource.resource_id) - self.assertEqual('sample-8', resource.metadata['tag']) - - -class MeterTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def test_get_meters(self): - msgs_sources = [msg['source'] for msg in self.msgs] - results = list(self.conn.get_meters()) - self.assertEqual(9, len(results)) - for meter in results: - self.assertIn(meter.source, msgs_sources) - - def test_get_meters_by_user(self): - results = list(self.conn.get_meters(user='user-id')) - self.assertEqual(1, len(results)) - - def test_get_meters_by_project(self): - results = list(self.conn.get_meters(project='project-id')) - self.assertEqual(2, len(results)) - - def test_get_meters_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - results = list(self.conn.get_meters(metaquery=q)) - self.assertIsNotEmpty(results) - self.assertEqual(9, len(results)) - - def test_get_meters_by_empty_metaquery(self): - results = list(self.conn.get_meters(metaquery={})) - self.assertEqual(9, len(results)) - - -class RawSampleTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def prepare_data(self): - super(RawSampleTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_samples_limit_zero(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=0)) - self.assertEqual(0, len(results)) - - def test_get_samples_limit(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=3)) - self.assertEqual(3, len(results)) - for result in results: - self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) - - def test_get_samples_in_default_order(self): - f = storage.SampleFilter() - prev_timestamp = None - for sample_item in self.conn.get_samples(f): - if prev_timestamp is not None: - self.assertTrue(prev_timestamp >= sample_item.timestamp) - prev_timestamp = sample_item.timestamp - - def test_get_samples_by_user(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(3, len(results)) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:3]) - - def test_get_samples_by_user_limit(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=1)) - self.assertEqual(1, len(results)) - - def test_get_samples_by_user_limit_bigger(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=42)) - self.assertEqual(3, len(results)) - - def test_get_samples_by_project(self): - f = storage.SampleFilter(project='project-id') - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:4]) - - def test_get_samples_by_resource(self): - f = storage.SampleFilter(user='user-id', resource='resource-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - d = results[1].as_dict() - self.assertEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertEqual(self.msgs[0], d) - - def test_get_samples_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.name.$1': 'test-server1', - 'metadata.display.name.$name_2': 'test-server2'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - self.assertEqual(1, len(results)) - - def test_get_samples_by_start_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 41) - f = storage.SampleFilter( - user='user-id', - start_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'ge' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'gt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - def test_get_samples_by_end_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - f = storage.SampleFilter( - user='user-id', - end_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), - results[1].timestamp) - - def test_get_samples_by_both_times(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - f = storage.SampleFilter( - start_timestamp=start_ts, - end_timestamp=end_ts, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - self.assertEqual(start_ts, results[1].timestamp) - - def test_get_samples_by_name(self): - f = storage.SampleFilter(user='user-id', meter='no-such-meter') - results = list(self.conn.get_samples(f)) - self.assertIsEmpty(results) - - def test_get_samples_by_name2(self): - f = storage.SampleFilter(user='user-id', meter='instance') - results = list(self.conn.get_samples(f)) - self.assertIsNotEmpty(results) - - def test_get_samples_by_source(self): - f = storage.SampleFilter(source='test-1') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') - def test_clear_metering_data(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(5, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(5, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') - def test_clear_metering_data_no_data_to_remove(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(12, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(10, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_clear_metering_data_expire_samples_only(self): - - cfg.CONF.set_override('sql_expire_samples_only', True) - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(4 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(7, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(6, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_success_on_deadlock(self): - raise_deadlock = [False, True] - self.CONF.set_override('max_retries', 2, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - self.conn.record_metering_data(msg) - self.assertEqual(1, retry_sleep.call_count) - - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(13, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_failure_on_deadlock(self): - raise_deadlock = [True, True, True] - self.CONF.set_override('max_retries', 3, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - try: - self.conn.record_metering_data(msg) - except dbexc.DBError as err: - self.assertIn('DBDeadlock', str(type(err))) - self.assertEqual(3, retry_sleep.call_count) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') - def test_clear_metering_data_with_alarms(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - alarm = alarm_models.Alarm(alarm_id='r3d', - enabled=True, - type='threshold', - name='red-alert', - description='my red-alert', - timestamp=constants.MIN_DATETIME, - user_id='user-id', - project_id='project-id', - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='eq', - threshold=36, - statistic='count', - evaluation_periods=1, - period=60, - meter_name='test.one', - query=[{'field': 'key', - 'op': 'eq', - 'value': 'value', - 'type': 'string'}]), - ) - - self.alarm_conn.create_alarm(alarm) - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(5) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(2, len(results)) - - -class ComplexSampleQueryTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - def setUp(self): - super(ComplexSampleQueryTest, self).setUp() - self.complex_filter = { - "and": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}]}]}]} - or_expression = [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-43"}}, - {"=": {"resource_id": "resource-id-44"}}] - and_expression = [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}] - self.complex_filter_list = {"and": - [{"or": or_expression}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - in_expression = {"in": {"resource_id": ["resource-id-42", - "resource-id-43", - "resource-id-44"]}} - self.complex_filter_in = {"and": - [in_expression, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - - def _create_samples(self): - for resource in range(42, 45): - for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: - metadata = {'a_string_key': "meta-value" + str(volume), - 'a_float_key': volume, - 'an_int_key': resource, - 'a_bool_key': (resource == 43)} - - self.create_and_store_sample(resource_id="resource-id-%s" - % resource, - metadata=metadata, - name="cpu_util", - volume=volume) - - def test_no_filter(self): - results = list(self.conn.query_samples()) - self.assertEqual(len(self.msgs), len(results)) - for sample_item in results: - d = sample_item.as_dict() - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_query_complex_filter_with_regexp(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_id": "resource-id.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - - def test_query_complex_filter_with_regexp_metadata(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.4", - sample_item.resource_metadata['a_string_key']) - - def test_no_filter_with_zero_limit(self): - limit = 0 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_no_filter_with_limit(self): - limit = 3 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_simple_filter(self): - simple_filter = {"=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(1, len(results)) - for sample_item in results: - self.assertEqual("resource-id-8", sample_item.resource_id) - - def test_query_simple_filter_with_not_equal_relation(self): - simple_filter = {"!=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(len(self.msgs) - 1, len(results)) - for sample_item in results: - self.assertNotEqual("resource-id-8", sample_item.resource_id) - - def test_query_complex_filter(self): - self._create_samples() - results = list(self.conn.query_samples(filter_expr=( - self.complex_filter))) - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_limit(self): - self._create_samples() - limit = 3 - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filter_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_list(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_list_with_limit(self): - self._create_samples() - limit = 3 - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_list_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filterwith_list_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-43", - "resource-id-42", "resource-id-44", - "resource-id-43", "resource-id-42", - "resource-id-44", "resource-id-43", - "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_wrong_order_in_orderby(self): - self._create_samples() - - orderby = [{"counter_volume": "not valid order"}, - {"resource_id": "desc"}] - - query = lambda: list(self.conn.query_samples(filter_expr=( - self.complex_filter), - orderby=orderby)) - self.assertRaises(KeyError, query) - - def test_query_complex_filter_with_in(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_in)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_simple_metadata_filter(self): - self._create_samples() - - filter_expr = {"=": {"resource_metadata.a_bool_key": True}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertTrue(sample_item.resource_metadata["a_bool_key"]) - - def test_query_simple_metadata_with_in_op(self): - self._create_samples() - - filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(12, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_metadata["an_int_key"], - [42, 43]) - - def test_query_complex_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(8, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) - - def test_query_mixed_data_and_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - - filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertEqual("resource-id-42", sample_item.resource_id) - - def test_query_non_existing_metadata_with_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.81", - sample_item.resource_metadata["a_string_key"]) - - def test_query_non_existing_metadata_without_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.key_not_exists": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - self.assertEqual(0, len(results)) - - def test_query_negated_metadata(self): - self._create_samples() - - filter_expr = { - "and": [{"=": {"resource_id": "resource-id-42"}}, - {"not": {"or": [{">": {"resource_metadata.an_int_key": - 43}}, - {"<=": {"resource_metadata.a_float_key": - 0.41}}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("resource-id-42", sample_item.resource_id) - self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) - self.assertTrue(sample_item.resource_metadata["a_float_key"] > - 0.41) - - def test_query_negated_complex_expression(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_with_double_negation(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_negate_not_equal(self): - self._create_samples() - filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - - def test_query_negated_in_op(self): - self._create_samples() - filter_expr = { - "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, - {"=": {"resource_id": "resource-id-42"}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.counter_volume, - [0.41, 0.8, 0.81]) - - -class StatisticsTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def prepare_data(self): - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'memory', - 'gauge', - 'MB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={}, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - - def test_by_meter(self): - f = storage.SampleFilter( - meter='memory' - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('MB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - results.period_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), - results.period_end) - - def test_by_user(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - - def test_no_period_in_query(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.period) - - def test_period_is_int(self): - f = storage.SampleFilter( - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertIs(int, type(results.period)) - self.assertEqual(6, results.count) - - def test_by_user_period(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(2, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8.5, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(9, r.max) - self.assertEqual(17, r.sum) - self.assertEqual(7200, r.period) - self.assertIsInstance(r.period, int) - expected_end = r.period_start + datetime.timedelta(seconds=7200) - self.assertEqual(expected_end, r.period_end) - self.assertEqual(3660, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), - r.duration_end) - - def test_by_user_period_with_timezone(self): - dates = [ - '2012-09-25T00:28:00-10:00', - '2012-09-25T01:28:00-09:00', - '2012-09-25T02:28:00-08:00', - '2012-09-25T03:28:00-07:00', - '2012-09-25T04:28:00-06:00', - '2012-09-25T05:28:00-05:00', - '2012-09-25T06:28:00-04:00', - '2012-09-25T07:28:00-03:00', - '2012-09-25T08:28:00-02:00', - '2012-09-25T09:28:00-01:00', - '2012-09-25T10:28:00Z', - '2012-09-25T11:28:00+01:00', - '2012-09-25T12:28:00+02:00', - '2012-09-25T13:28:00+03:00', - '2012-09-25T14:28:00+04:00', - '2012-09-25T15:28:00+05:00', - '2012-09-25T16:28:00+06:00', - '2012-09-25T17:28:00+07:00', - '2012-09-25T18:28:00+08:00', - '2012-09-25T19:28:00+09:00', - '2012-09-25T20:28:00+10:00', - '2012-09-25T21:28:00+11:00', - '2012-09-25T22:28:00+12:00', - ] - for date in dates: - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp=date - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - - def test_by_user_period_start_end(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - end_timestamp='2012-09-25T11:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual(1, len(results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(1, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(8, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(1800, r.period) - self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), - r.period_end) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_end) - - def test_by_project(self): - f = storage.SampleFilter( - meter='volume.size', - resource='resource-id', - start_timestamp='2012-09-25T11:30:00', - end_timestamp='2012-09-25T11:32:00', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.duration) - self.assertEqual(1, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(6, results.min) - self.assertEqual(6, results.max) - self.assertEqual(6, results.sum) - self.assertEqual(6, results.avg) - - def test_one_resource(self): - f = storage.SampleFilter( - user='user-id', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(5, results.min) - self.assertEqual(7, results.max) - self.assertEqual(18, results.sum) - self.assertEqual(6, results.avg) - - def test_with_no_sample(self): - f = storage.SampleFilter( - user='user-not-exists', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual([], results) - - -class StatisticsGroupByTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def prepare_data(self): - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '84'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '83'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '84'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3', 'metadata_instance_type': '83'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], - 'instance_type': - test_sample['metadata_instance_type']}, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_resource(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_project(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_source(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['source'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'source': 'source-1'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_unknown_field(self): - f = storage.SampleFilter( - meter='instance', - ) - # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list - # whereas the SQLAlchemy get_meter_statistics() returns a generator. - # You have to apply list() to the SQLAlchemy generator to get it to - # throw an error. The MongoDB get_meter_statistics() will throw an - # error before list() is called. By using lambda, we can cover both - # MongoDB and SQLAlchemy in a single test. - self.assertRaises( - ceilometer.NotImplementedError, - lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) - ) - - def test_group_by_metadata(self): - # This test checks grouping by a single metadata field - # (now only resource_metadata.instance_type is available). - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_multiple_regular(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['user_id', - 'resource_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-2'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-1'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-2'}, - r.groupby, ) - - def test_group_by_multiple_metadata(self): - # TODO(terriyu): test_group_by_multiple_metadata needs to be - # implemented. - # This test should check grouping by multiple metadata fields. - pass - - def test_group_by_multiple_regular_metadata(self): - # This test checks grouping by a combination of regular and - # metadata fields. - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['user_id', 'resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', - '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_metadata.instance_type': '83'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '84'}, - r.groupby) - - def test_group_by_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_metadata_with_query_filter(self): - # This test checks grouping by a metadata field in combination - # with a query filter. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_with_query_filter_multiple(self): - f = storage.SampleFilter( - meter='instance', - user='user-2', - source='source-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['project_id', 'resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'project_id': 'project-2', - 'resource_id': 'resource-1'}, - r.groupby) - - def test_group_by_metadata_with_query_filter_multiple(self): - # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple - # needs to be implemented. - # This test should check grouping by multiple metadata fields in - # combination with a query filter. - pass - - def test_group_by_with_period(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_period(self): - # This test checks grouping by metadata fields in combination - # with period grouping. - f = storage.SampleFilter( - meter='instance') - - results = list(self.conn.get_meter_statistics(f, period=7200, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - - def test_group_by_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_query_filter_and_period(self): - # This test checks grouping with metadata fields in combination - # with a query filter and period grouping. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list( - self.conn.get_meter_statistics( - f, period=7200, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_start_timestamp_after(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_end_timestamp_before(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_start_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(1, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_start_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), - end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_start_end_timestamp_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), - end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_start_end_timestamp_with_period(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=3600, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 15, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 15, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), - datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - - -class CounterDataTypeTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - def prepare_data(self): - c = sample.Sample( - 'dummyBigCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'dummySmallCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=-337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'floatCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=1938495037.53697, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_storage_can_handle_large_values(self): - f = storage.SampleFilter( - meter='dummyBigCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(337203685477580, results[0].counter_volume) - f = storage.SampleFilter( - meter='dummySmallCounter', - ) - results = list(self.conn.get_samples(f)) - observed_num = int(results[0].counter_volume) - self.assertEqual(-337203685477580, observed_num) - - def test_storage_can_handle_float_values(self): - f = storage.SampleFilter( - meter='floatCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(1938495037.53697, results[0].counter_volume) - - -class AlarmTestBase(DBTestBase): - def add_some_alarms(self): - alarms = [alarm_models.Alarm(alarm_id='r3d', - enabled=True, - type='threshold', - name='red-alert', - description='my red-alert', - timestamp=datetime.datetime(2015, 7, - 2, 10, 25), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[dict(name='testcons', - start='0 11 * * *', - duration=300)], - rule=dict(comparison_operator='eq', - threshold=36, - statistic='count', - evaluation_periods=1, - period=60, - meter_name='test.one', - query=[{'field': 'key', - 'op': 'eq', - 'value': 'value', - 'type': 'string'}]), - ), - alarm_models.Alarm(alarm_id='0r4ng3', - enabled=True, - type='threshold', - name='orange-alert', - description='a orange', - timestamp=datetime.datetime(2015, 7, - 2, 10, 40), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=75, - statistic='avg', - evaluation_periods=1, - period=60, - meter_name='test.forty', - query=[{'field': 'key2', - 'op': 'eq', - 'value': 'value2', - 'type': 'string'}]), - ), - alarm_models.Alarm(alarm_id='y3ll0w', - enabled=False, - type='threshold', - name='yellow-alert', - description='yellow', - timestamp=datetime.datetime(2015, 7, - 2, 10, 10), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='lt', - threshold=10, - statistic='min', - evaluation_periods=1, - period=60, - meter_name='test.five', - query=[{'field': 'key2', - 'op': 'eq', - 'value': 'value2', - 'type': 'string'}, - {'field': - 'user_metadata.key3', - 'op': 'eq', - 'value': 'value3', - 'type': 'string'}]), - )] - - for a in alarms: - self.alarm_conn.create_alarm(a) - - -class AlarmTest(AlarmTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def test_empty(self): - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual([], alarms) - - def test_list(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_list_ordered_by_timestamp(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(len(alarms), 3) - alarm_l = [a.timestamp for a in alarms] - alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40), - datetime.datetime(2015, 7, 2, 10, 25), - datetime.datetime(2015, 7, 2, 10, 10)] - self.assertEqual(alarm_l_ordered, alarm_l) - - def test_list_enabled(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(enabled=True)) - self.assertEqual(2, len(alarms)) - - def test_list_disabled(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - - def test_list_by_type(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(alarm_type='threshold')) - self.assertEqual(3, len(alarms)) - alarms = list(self.alarm_conn.get_alarms(alarm_type='combination')) - self.assertEqual(0, len(alarms)) - - def test_add(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - meter_names = sorted([a.rule['meter_name'] for a in alarms]) - self.assertEqual(['test.five', 'test.forty', 'test.one'], meter_names) - - def test_update(self): - self.add_some_alarms() - orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] - orange.enabled = False - orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA - query = [{'field': 'metadata.group', - 'op': 'eq', - 'value': 'test.updated', - 'type': 'string'}] - orange.rule['query'] = query - orange.rule['meter_name'] = 'new_meter_name' - updated = self.alarm_conn.update_alarm(orange) - self.assertEqual(False, updated.enabled) - self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA, - updated.state) - self.assertEqual(query, updated.rule['query']) - self.assertEqual('new_meter_name', updated.rule['meter_name']) - - def test_update_llu(self): - llu = alarm_models.Alarm(alarm_id='llu', - enabled=True, - type='threshold', - name='llu', - description='llu', - timestamp=constants.MIN_DATETIME, - user_id='bla', - project_id='ffo', - state="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=[], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='lt', - threshold=34, - statistic='max', - evaluation_periods=1, - period=60, - meter_name='llt', - query=[]) - ) - updated = self.alarm_conn.update_alarm(llu) - updated.state = alarm_models.Alarm.ALARM_OK - updated.description = ':)' - self.alarm_conn.update_alarm(updated) - - all = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(all)) - - def test_delete(self): - self.add_some_alarms() - victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] - self.alarm_conn.delete_alarm(victim.alarm_id) - survivors = list(self.alarm_conn.get_alarms()) - self.assertEqual(2, len(survivors)) - for s in survivors: - self.assertNotEqual(victim.name, s.name) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') -class AlarmHistoryTest(AlarmTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(AlarmTestBase, self).setUp() - self.add_some_alarms() - self.prepare_alarm_history() - - def prepare_alarm_history(self): - alarms = list(self.alarm_conn.get_alarms()) - for alarm in alarms: - i = alarms.index(alarm) - alarm_change = { - "event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i, - "alarm_id": alarm.alarm_id, - "type": alarm_models.AlarmChange.CREATION, - "detail": "detail %s" % alarm.name, - "user_id": alarm.user_id, - "project_id": alarm.project_id, - "on_behalf_of": alarm.project_id, - "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) - } - self.alarm_conn.record_alarm_change(alarm_change=alarm_change) - - def _clear_alarm_history(self, utcnow, ttl, count): - self.mock_utcnow.return_value = utcnow - self.alarm_conn.clear_expired_alarm_history_data(ttl) - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(count, len(history)) - - def test_clear_alarm_history_no_data_to_remove(self): - utcnow = datetime.datetime(2013, 4, 7, 7, 30) - self._clear_alarm_history(utcnow, 1, 3) - - def test_clear_some_alarm_history(self): - utcnow = datetime.datetime(2014, 4, 7, 7, 35) - self._clear_alarm_history(utcnow, 3 * 60, 1) - - def test_clear_all_alarm_history(self): - utcnow = datetime.datetime(2014, 4, 7, 7, 45) - self._clear_alarm_history(utcnow, 3 * 60, 0) - - -class ComplexAlarmQueryTest(AlarmTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def test_no_filter(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms()) - self.assertEqual(3, len(result)) - - def test_no_filter_with_limit(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms(limit=2)) - self.assertEqual(2, len(result)) - - def test_filter(self): - self.add_some_alarms() - filter_expr = {"and": - [{"or": - [{"=": {"name": "yellow-alert"}}, - {"=": {"name": "red-alert"}}]}, - {"=": {"enabled": True}}]} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertIn(a.name, set(["yellow-alert", "red-alert"])) - self.assertTrue(a.enabled) - - def test_filter_with_regexp(self): - self.add_some_alarms() - filter_expr = {"and": - [{"or": [{"=": {"name": "yellow-alert"}}, - {"=": {"name": "red-alert"}}]}, - {"=~": {"description": "yel.*"}}]} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertEqual("yellow", a.description) - - def test_filter_for_alarm_id(self): - self.add_some_alarms() - filter_expr = {"=": {"alarm_id": "0r4ng3"}} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertEqual("0r4ng3", a.alarm_id) - - def test_filter_and_orderby(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms(filter_expr=( - {"=": {"enabled": True}}), - orderby=[{"name": "asc"}])) - self.assertEqual(2, len(result)) - self.assertEqual(["orange-alert", "red-alert"], - [a.name for a in result]) - for a in result: - self.assertTrue(a.enabled) - - -class ComplexAlarmHistoryQueryTest(AlarmTestBase, - tests_db.MixinTestsWithBackendScenarios): - def setUp(self): - super(DBTestBase, self).setUp() - self.filter_expr = {"and": - [{"or": - [{"=": {"type": "rule change"}}, - {"=": {"type": "state transition"}}]}, - {"=": {"alarm_id": "0r4ng3"}}]} - self.add_some_alarms() - self.prepare_alarm_history() - - def prepare_alarm_history(self): - alarms = list(self.alarm_conn.get_alarms()) - name_index = { - 'red-alert': 0, - 'orange-alert': 1, - 'yellow-alert': 2 - } - - for alarm in alarms: - i = name_index[alarm.name] - alarm_change = dict(event_id=( - "16fd2706-8baf-433b-82eb-8c7fada847c%s" % i), - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.CREATION, - detail="detail %s" % alarm.name, - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 24, - 7 + i, - 30 + i)) - self.alarm_conn.record_alarm_change(alarm_change=alarm_change) - - alarm_change2 = dict(event_id=( - "16fd2706-8baf-433b-82eb-8c7fada847d%s" % i), - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.RULE_CHANGE, - detail="detail %s" % i, - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 25, - 10 + i, - 30 + i)) - self.alarm_conn.record_alarm_change(alarm_change=alarm_change2) - - alarm_change3 = dict( - event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i, - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.STATE_TRANSITION, - detail="detail %s" % (i + 1), - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i) - ) - - if alarm.name == "red-alert": - alarm_change3['on_behalf_of'] = 'and-da-girls' - - self.alarm_conn.record_alarm_change(alarm_change=alarm_change3) - - if alarm.name in ["red-alert", "yellow-alert"]: - alarm_change4 = dict(event_id=( - "16fd2706-8baf-433b-82eb-8c7fada847f%s" - % i), - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.DELETION, - detail="detail %s" % (i + 2), - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 27, - 10 + i, - 30 + i)) - self.alarm_conn.record_alarm_change(alarm_change=alarm_change4) - - def test_alarm_history_with_no_filter(self): - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(11, len(history)) - - def test_alarm_history_with_no_filter_and_limit(self): - history = list(self.alarm_conn.query_alarm_history(limit=3)) - self.assertEqual(3, len(history)) - - def test_alarm_history_with_filter(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr)) - self.assertEqual(2, len(history)) - - def test_alarm_history_with_regexp(self): - filter_expr = {"and": - [{"=~": {"type": "(rule)|(state)"}}, - {"=": {"alarm_id": "0r4ng3"}}]} - history = list( - self.alarm_conn.query_alarm_history(filter_expr=filter_expr)) - self.assertEqual(2, len(history)) - - def test_alarm_history_with_filter_and_orderby(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, - orderby=[{"timestamp": - "asc"}])) - self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE, - alarm_models.AlarmChange.STATE_TRANSITION], - [h.type for h in history]) - - def test_alarm_history_with_filter_and_orderby_and_limit(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, - orderby=[{"timestamp": - "asc"}], - limit=1)) - self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type) - - def test_alarm_history_with_on_behalf_of_filter(self): - filter_expr = {"=": {"on_behalf_of": "and-da-girls"}} - history = list(self.alarm_conn.query_alarm_history( - filter_expr=filter_expr)) - self.assertEqual(1, len(history)) - self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0", - history[0].event_id) - - def test_alarm_history_with_alarm_id_as_filter(self): - filter_expr = {"=": {"alarm_id": "r3d"}} - history = list(self.alarm_conn.query_alarm_history( - filter_expr=filter_expr, orderby=[{"timestamp": "asc"}])) - self.assertEqual(4, len(history)) - self.assertEqual([alarm_models.AlarmChange.CREATION, - alarm_models.AlarmChange.RULE_CHANGE, - alarm_models.AlarmChange.STATE_TRANSITION, - alarm_models.AlarmChange.DELETION], - [h.type for h in history]) - - -class EventTestBase(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - """Separate test base class. - - We don't want to inherit all the Meter stuff. - """ - - def setUp(self): - super(EventTestBase, self).setUp() - self.prepare_data() - - def prepare_data(self): - self.event_models = [] - base = 0 - self.start = datetime.datetime(2013, 12, 31, 5, 0) - now = self.start - for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']: - trait_models = [event_models.Trait(name, dtype, value) - for name, dtype, value in [ - ('trait_A', event_models.Trait.TEXT_TYPE, - "my_%s_text" % event_type), - ('trait_B', event_models.Trait.INT_TYPE, - base + 1), - ('trait_C', event_models.Trait.FLOAT_TYPE, - float(base) + 0.123456), - ('trait_D', event_models.Trait.DATETIME_TYPE, - now)]] - self.event_models.append( - event_models.Event("id_%s_%d" % (event_type, base), - event_type, now, trait_models, - {'status': {'nested': 'started'}})) - base += 100 - now = now + datetime.timedelta(hours=1) - self.end = now - - self.event_conn.record_events(self.event_models) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class EventTTLTest(EventTestBase): - - @mock.patch.object(timeutils, 'utcnow') - def test_clear_expired_event_data(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0) - self.event_conn.clear_expired_event_data(3600) - - events = list(self.event_conn.get_events(storage.EventFilter())) - self.assertEqual(2, len(events)) - event_types = list(self.event_conn.get_event_types()) - self.assertEqual(['Bar', 'Zoo'], event_types) - for event_type in event_types: - trait_types = list(self.event_conn.get_trait_types(event_type)) - self.assertEqual(4, len(trait_types)) - traits = list(self.event_conn.get_traits(event_type)) - self.assertEqual(4, len(traits)) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'db2') -class EventTest(EventTestBase): - def test_duplicate_message_id(self): - now = datetime.datetime.utcnow() - m = [event_models.Event("1", "Foo", now, None, {}), - event_models.Event("1", "Zoo", now, [], {})] - with mock.patch('%s.LOG' % - self.event_conn.record_events.__module__) as log: - self.event_conn.record_events(m) - self.assertEqual(1, log.info.call_count) - - def test_bad_event(self): - now = datetime.datetime.utcnow() - broken_event = event_models.Event("1", "Foo", now, None, {}) - del(broken_event.__dict__['raw']) - m = [broken_event, broken_event] - with mock.patch('%s.LOG' % - self.event_conn.record_events.__module__) as log: - self.assertRaises(AttributeError, self.event_conn.record_events, m) - # ensure that record_events does not break on first error but - # delays exception and tries to record each event. - self.assertEqual(2, log.exception.call_count) - - -class GetEventTest(EventTestBase): - - def test_generated_is_datetime(self): - event_filter = storage.EventFilter(self.start, self.end) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(6, len(events)) - for i, event in enumerate(events): - self.assertIsInstance(event.generated, datetime.datetime) - self.assertEqual(event.generated, - self.event_models[i].generated) - model_traits = self.event_models[i].traits - for j, trait in enumerate(event.traits): - if trait.dtype == event_models.Trait.DATETIME_TYPE: - self.assertIsInstance(trait.value, datetime.datetime) - self.assertEqual(trait.value, model_traits[j].value) - - def test_simple_get(self): - event_filter = storage.EventFilter(self.start, self.end) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(6, len(events)) - start_time = None - for i, type in enumerate(['Foo', 'Bar', 'Zoo']): - self.assertEqual(type, events[i].event_type) - self.assertEqual(4, len(events[i].traits)) - # Ensure sorted results ... - if start_time is not None: - # Python 2.6 has no assertLess :( - self.assertTrue(start_time < events[i].generated) - start_time = events[i].generated - - def test_simple_get_event_type(self): - expected_trait_values = { - 'id_Bar_100': { - 'trait_A': 'my_Bar_text', - 'trait_B': 101, - 'trait_C': 100.123456, - 'trait_D': self.start + datetime.timedelta(hours=1) - }, - 'id_Bar_400': { - 'trait_A': 'my_Bar_text', - 'trait_B': 401, - 'trait_C': 400.123456, - 'trait_D': self.start + datetime.timedelta(hours=4) - } - } - - event_filter = storage.EventFilter(self.start, self.end, "Bar") - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Bar", events[0].event_type) - self.assertEqual("Bar", events[1].event_type) - self.assertEqual(4, len(events[0].traits)) - self.assertEqual(4, len(events[1].traits)) - for event in events: - trait_values = expected_trait_values.get(event.message_id, - None) - if not trait_values: - self.fail("Unexpected event ID returned:" % event.message_id) - - for trait in event.traits: - expected_val = trait_values.get(trait.name) - if not expected_val: - self.fail("Unexpected trait type: %s" % trait.dtype) - self.assertEqual(expected_val, trait.value) - - def test_get_event_trait_filter(self): - trait_filters = [{'key': 'trait_B', 'integer': 101}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Bar", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - - def test_get_event_trait_filter_op_string(self): - trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text', - 'op': 'eq'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Foo", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - trait_filters[0].update({'key': 'trait_A', 'op': 'lt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Bar", events[0].event_type) - trait_filters[0].update({'key': 'trait_A', 'op': 'le'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Bar", events[1].event_type) - trait_filters[0].update({'key': 'trait_A', 'op': 'ne'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Zoo", events[3].event_type) - trait_filters[0].update({'key': 'trait_A', 'op': 'gt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Zoo", events[0].event_type) - trait_filters[0].update({'key': 'trait_A', 'op': 'ge'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Foo", events[2].event_type) - - def test_get_event_trait_filter_op_integer(self): - trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Bar", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - trait_filters[0].update({'key': 'trait_B', 'op': 'lt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Foo", events[0].event_type) - trait_filters[0].update({'key': 'trait_B', 'op': 'le'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Bar", events[1].event_type) - trait_filters[0].update({'key': 'trait_B', 'op': 'ne'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(5, len(events)) - self.assertEqual("Zoo", events[4].event_type) - trait_filters[0].update({'key': 'trait_B', 'op': 'gt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Zoo", events[0].event_type) - trait_filters[0].update({'key': 'trait_B', 'op': 'ge'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(5, len(events)) - self.assertEqual("Foo", events[2].event_type) - - def test_get_event_trait_filter_op_float(self): - trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Foo", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - trait_filters[0].update({'key': 'trait_C', 'op': 'lt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(3, len(events)) - self.assertEqual("Zoo", events[2].event_type) - trait_filters[0].update({'key': 'trait_C', 'op': 'le'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Bar", events[1].event_type) - trait_filters[0].update({'key': 'trait_C', 'op': 'ne'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(5, len(events)) - self.assertEqual("Zoo", events[2].event_type) - trait_filters[0].update({'key': 'trait_C', 'op': 'gt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - self.assertEqual("Bar", events[0].event_type) - trait_filters[0].update({'key': 'trait_C', 'op': 'ge'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(3, len(events)) - self.assertEqual("Zoo", events[2].event_type) - - def test_get_event_trait_filter_op_datetime(self): - trait_filters = [{'key': 'trait_D', - 'datetime': self.start + datetime.timedelta(hours=2), - 'op': 'eq'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Zoo", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - trait_filters[0].update({'key': 'trait_D', 'op': 'lt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(2, len(events)) - trait_filters[0].update({'key': 'trait_D', 'op': 'le'}) - self.assertEqual("Bar", events[1].event_type) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(3, len(events)) - self.assertEqual("Bar", events[1].event_type) - trait_filters[0].update({'key': 'trait_D', 'op': 'ne'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(5, len(events)) - self.assertEqual("Foo", events[2].event_type) - trait_filters[0].update({'key': 'trait_D', 'op': 'gt'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(3, len(events)) - self.assertEqual("Zoo", events[2].event_type) - trait_filters[0].update({'key': 'trait_D', 'op': 'ge'}) - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(4, len(events)) - self.assertEqual("Bar", events[2].event_type) - - def test_get_event_multiple_trait_filter(self): - trait_filters = [{'key': 'trait_B', 'integer': 1}, - {'key': 'trait_A', 'string': 'my_Foo_text'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("Foo", events[0].event_type) - self.assertEqual(4, len(events[0].traits)) - - def test_get_event_multiple_trait_filter_expect_none(self): - trait_filters = [{'key': 'trait_B', 'integer': 1}, - {'key': 'trait_A', 'string': 'my_Zoo_text'}] - event_filter = storage.EventFilter(self.start, self.end, - traits_filter=trait_filters) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(0, len(events)) - - def test_get_event_types(self): - event_types = [e for e in - self.event_conn.get_event_types()] - - self.assertEqual(3, len(event_types)) - self.assertIn("Bar", event_types) - self.assertIn("Foo", event_types) - self.assertIn("Zoo", event_types) - - def test_get_trait_types(self): - trait_types = [tt for tt in - self.event_conn.get_trait_types("Foo")] - self.assertEqual(4, len(trait_types)) - trait_type_names = map(lambda x: x['name'], trait_types) - self.assertIn("trait_A", trait_type_names) - self.assertIn("trait_B", trait_type_names) - self.assertIn("trait_C", trait_type_names) - self.assertIn("trait_D", trait_type_names) - - def test_get_trait_types_unknown_event(self): - trait_types = [tt for tt in - self.event_conn.get_trait_types("Moo")] - self.assertEqual(0, len(trait_types)) - - def test_get_traits(self): - traits = self.event_conn.get_traits("Bar") - # format results in a way that makes them easier to work with - trait_dict = {} - for trait in traits: - trait_dict[trait.name] = trait.dtype - - self.assertIn("trait_A", trait_dict) - self.assertEqual(event_models.Trait.TEXT_TYPE, trait_dict["trait_A"]) - self.assertIn("trait_B", trait_dict) - self.assertEqual(event_models.Trait.INT_TYPE, trait_dict["trait_B"]) - self.assertIn("trait_C", trait_dict) - self.assertEqual(event_models.Trait.FLOAT_TYPE, trait_dict["trait_C"]) - self.assertIn("trait_D", trait_dict) - self.assertEqual(event_models.Trait.DATETIME_TYPE, - trait_dict["trait_D"]) - - def test_get_all_traits(self): - traits = self.event_conn.get_traits("Foo") - traits = sorted([t for t in traits], key=operator.attrgetter('dtype')) - self.assertEqual(8, len(traits)) - trait = traits[0] - self.assertEqual("trait_A", trait.name) - self.assertEqual(event_models.Trait.TEXT_TYPE, trait.dtype) - - def test_simple_get_event_no_traits(self): - new_events = [event_models.Event("id_notraits", "NoTraits", - self.start, [], {})] - self.event_conn.record_events(new_events) - event_filter = storage.EventFilter(self.start, self.end, "NoTraits") - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - self.assertEqual("id_notraits", events[0].message_id) - self.assertEqual("NoTraits", events[0].event_type) - self.assertEqual(0, len(events[0].traits)) - - def test_simple_get_no_filters(self): - event_filter = storage.EventFilter(None, None, None) - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(6, len(events)) - - def test_get_by_message_id(self): - new_events = [event_models.Event("id_testid", - "MessageIDTest", - self.start, - [], {})] - - self.event_conn.record_events(new_events) - event_filter = storage.EventFilter(message_id="id_testid") - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertEqual(1, len(events)) - event = events[0] - self.assertEqual("id_testid", event.message_id) - - def test_simple_get_raw(self): - event_filter = storage.EventFilter() - events = [event for event in self.event_conn.get_events(event_filter)] - self.assertTrue(events) - self.assertEqual({'status': {'nested': 'started'}}, events[0].raw) - - def test_trait_type_enforced_on_none(self): - new_events = [event_models.Event( - "id_testid", "MessageIDTest", self.start, - [event_models.Trait('text', event_models.Trait.TEXT_TYPE, ''), - event_models.Trait('int', event_models.Trait.INT_TYPE, 0), - event_models.Trait('float', event_models.Trait.FLOAT_TYPE, 0.0)], - {})] - self.event_conn.record_events(new_events) - event_filter = storage.EventFilter(message_id="id_testid") - events = [event for event in self.event_conn.get_events(event_filter)] - options = [(event_models.Trait.TEXT_TYPE, ''), - (event_models.Trait.INT_TYPE, 0.0), - (event_models.Trait.FLOAT_TYPE, 0.0)] - for trait in events[0].traits: - options.remove((trait.dtype, trait.value)) - - -class BigIntegerTest(tests_db.TestBase, - tests_db.MixinTestsWithBackendScenarios): - def test_metadata_bigint(self): - metadata = {'bigint': 99999999999999} - s = sample.Sample(name='name', - type=sample.TYPE_GAUGE, - unit='B', - volume=1, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime.utcnow(), - resource_metadata=metadata) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - -@tests_db.run_with('mongodb') -class MongoAutoReconnectTest(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - - def setUp(self): - super(MongoAutoReconnectTest, self).setUp() - self.CONF.set_override('retry_interval', 0, group='database') - - def test_mongo_client(self): - if cfg.CONF.database.mongodb_replica_set: - self.assertIsInstance(self.conn.conn.conn, - pymongo.MongoReplicaSetClient) - else: - self.assertIsInstance(self.conn.conn.conn, - pymongo.MongoClient) - - def test_mongo_cursor_next(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - raise_exc = [False, True] - method = self.conn.db.resource.find().cursor.next - with mock.patch('pymongo.cursor.Cursor.next', - mock.Mock()) as mock_next: - mock_next.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - resource = self.conn.db.resource.find().next() - self.assertEqual(expected_first_sample_timestamp, - resource['first_sample_timestamp']) - - def test_mongo_insert(self): - raise_exc = [False, True] - method = self.conn.db.meter.insert - - with mock.patch('pymongo.collection.Collection.insert', - mock.Mock(return_value=method)) as mock_insert: - mock_insert.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_insert.__name__ = 'insert' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - meters = list(self.conn.db.meter.find()) - self.assertEqual(12, len(meters)) - - def test_mongo_find_and_modify(self): - raise_exc = [False, True] - method = self.conn.db.resource.find_and_modify - - with mock.patch('pymongo.collection.Collection.find_and_modify', - mock.Mock()) as mock_fam: - mock_fam.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_fam.__name__ = 'find_and_modify' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] - self.assertEqual('test-proxy', data) - - def test_mongo_update(self): - raise_exc = [False, True] - method = self.conn.db.resource.update - - with mock.patch('pymongo.collection.Collection.update', - mock.Mock()) as mock_update: - mock_update.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_update.__name__ = 'update' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 17, 39), - source='test-proxy-update') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] - self.assertEqual('test-proxy-update', data) - - -@tests_db.run_with('mongodb') -class MongoTimeToLiveTest(DBTestBase, tests_db.MixinTestsWithBackendScenarios): - - def test_ensure_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - self.assertEqual(5, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(5, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - def test_modification_of_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - cfg.CONF.set_override('metering_time_to_live', 15, group='database') - self.conn.upgrade() - self.assertEqual(15, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(15, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - -class TestRecordUnicodeSamples(DBTestBase, - tests_db.MixinTestsWithBackendScenarios): - def prepare_data(self): - self.msgs = [] - self.msgs.append(self.create_and_store_sample( - name=u'meter.accent\xe9\u0437', - metadata={u"metadata_key\xe9\u0437": "test", - u"metadata_key": u"test\xe9\u0437"}, - )) - - def test_unicode_sample(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - expected = self.msgs[0] - actual = results[0].as_dict() - self.assertEqual(expected['counter_name'], actual['counter_name']) - self.assertEqual(expected['resource_metadata'], - actual['resource_metadata']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/telemetry/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/telemetry/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/telemetry/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/telemetry/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - -from ceilometer.telemetry import notifications - -NOTIFICATION = { - u'_context_domain': None, - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - 'event_type': u'sample.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - u'_context_auth_token': None, - u'_context_read_only': False, - 'payload': [{ - u'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge' - }, - { - u'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature12', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge' - }], - u'_context_resource_uuid': None, - u'_context_user_identity': u'fake_user_identity---', - u'_context_show_deleted': False, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - 'priority': 'info', - u'_context_is_admin': True, - u'_context_project_domain': None, - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'_context_user_domain': None, - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' -} - - -class TelemetryApiPostTestCase(base.BaseTestCase): - - def test_process_notification(self): - sample_creation = notifications.TelemetryApiPost(None) - samples = list(sample_creation.process_notification(NOTIFICATION)) - self.assertEqual(2, len(samples)) - payload = NOTIFICATION["payload"] - for index, sample in enumerate(samples): - self.assertEqual(payload[index]["user_id"], sample.user_id) - self.assertEqual(payload[index]["counter_name"], sample.name) - self.assertEqual(payload[index]["resource_id"], sample.resource_id) - self.assertEqual(payload[index]["timestamp"], sample.timestamp) - self.assertEqual(payload[index]["resource_metadata"], - sample.resource_metadata) - self.assertEqual(payload[index]["counter_volume"], sample.volume) - self.assertEqual(payload[index]["source"], sample.source) - self.assertEqual(payload[index]["counter_type"], sample.type) - self.assertEqual(payload[index]["message_id"], sample.id) - self.assertEqual(payload[index]["counter_unit"], sample.unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_bin.py ceilometer-5.0.0~b3/ceilometer/tests/test_bin.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_bin.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_bin.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,298 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import random -import socket -import subprocess -import time - -import httplib2 -from oslo_utils import fileutils -import six - -from ceilometer.tests import base - - -class BinTestCase(base.BaseTestCase): - def setUp(self): - super(BinTestCase, self).setUp() - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - - def tearDown(self): - super(BinTestCase, self).tearDown() - os.remove(self.tempfile) - - def test_dbsync_run(self): - subp = subprocess.Popen(['ceilometer-dbsync', - "--config-file=%s" % self.tempfile]) - self.assertEqual(0, subp.wait()) - - def test_run_expirer_ttl_disabled(self): - subp = subprocess.Popen(['ceilometer-expirer', - '-d', - "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - __, err = subp.communicate() - self.assertEqual(0, subp.poll()) - self.assertIn(b"Nothing to clean, database metering " - b"time to live is disabled", err) - self.assertIn(b"Nothing to clean, database event " - b"time to live is disabled", err) - self.assertIn(b"Nothing to clean, database alarm history " - b"time to live is disabled", err) - - def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "[database]\n" - "%s=1\n" - "connection=log://localhost\n" % ttl_name) - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - subp = subprocess.Popen(['ceilometer-expirer', - '-d', - "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - __, err = subp.communicate() - self.assertEqual(0, subp.poll()) - msg = "Dropping %s data with TTL 1" % data_name - if six.PY3: - msg = msg.encode('utf-8') - self.assertIn(msg, err) - - def test_run_expirer_ttl_enabled(self): - self._test_run_expirer_ttl_enabled('metering_time_to_live', - 'metering') - self._test_run_expirer_ttl_enabled('time_to_live', 'metering') - self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') - self._test_run_expirer_ttl_enabled('alarm_history_time_to_live', - 'alarm history') - - -class BinSendSampleTestCase(base.BaseTestCase): - def setUp(self): - super(BinSendSampleTestCase, self).setUp() - pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) - if six.PY3: - content = content.encode('utf-8') - - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - - def tearDown(self): - super(BinSendSampleTestCase, self).tearDown() - os.remove(self.tempfile) - - def test_send_counter_run(self): - subp = subprocess.Popen(['ceilometer-send-sample', - "--config-file=%s" % self.tempfile, - "--sample-resource=someuuid", - "--sample-name=mycounter"]) - self.assertEqual(0, subp.wait()) - - -class BinApiTestCase(base.BaseTestCase): - - def setUp(self): - super(BinApiTestCase, self).setUp() - # create api_paste.ini file without authentication - content = ("[pipeline:main]\n" - "pipeline = api-server\n" - "[app:api-server]\n" - "paste.app_factory = ceilometer.api.app:app_factory\n") - if six.PY3: - content = content.encode('utf-8') - self.paste = fileutils.write_to_tempfile(content=content, - prefix='api_paste', - suffix='.ini') - - # create ceilometer.conf file - self.api_port = random.randint(10000, 11000) - self.http = httplib2.Http(proxy_info=None) - self.pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') - self.policy_file = self.path_get('etc/ceilometer/policy.json') - - def tearDown(self): - super(BinApiTestCase, self).tearDown() - try: - self.subp.kill() - self.subp.wait() - except OSError: - pass - os.remove(self.tempfile) - - def get_response(self, path): - url = 'http://%s:%d/%s' % ('127.0.0.1', self.api_port, path) - - for x in range(10): - try: - r, c = self.http.request(url, 'GET') - except socket.error: - time.sleep(.5) - self.assertIsNone(self.subp.poll()) - else: - return r, c - return None, None - - def run_api(self, content, err_pipe=None): - if six.PY3: - content = content.encode('utf-8') - - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - if err_pipe: - return subprocess.Popen(['ceilometer-api', - "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - else: - return subprocess.Popen(['ceilometer-api', - "--config-file=%s" % self.tempfile]) - - def test_v2(self): - - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "auth_strategy=noauth\n" - "debug=true\n" - "pipeline_cfg_file={0}\n" - "api_paste_config={2}\n" - "[api]\n" - "port={3}\n" - "[oslo_policy]\n" - "policy_file={1}\n" - "[database]\n" - "connection=log://localhost\n". - format(self.pipeline_cfg_file, - self.policy_file, - self.paste, - self.api_port)) - - self.subp = self.run_api(content) - - response, content = self.get_response('v2/meters') - self.assertEqual(200, response.status) - if six.PY3: - content = content.decode('utf-8') - self.assertEqual([], json.loads(content)) - - def test_v2_with_bad_storage_conn(self): - - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "auth_strategy=noauth\n" - "debug=true\n" - "pipeline_cfg_file={0}\n" - "policy_file={1}\n" - "api_paste_config={2}\n" - "[api]\n" - "port={3}\n" - "[database]\n" - "max_retries=1\n" - "alarm_connection=log://localhost\n" - "connection=dummy://localhost\n". - format(self.pipeline_cfg_file, - self.policy_file, - self.paste, - self.api_port)) - - self.subp = self.run_api(content, err_pipe=True) - - response, content = self.get_response('v2/alarms') - self.assertEqual(200, response.status) - if six.PY3: - content = content.decode('utf-8') - self.assertEqual([], json.loads(content)) - - response, content = self.get_response('v2/meters') - self.assertEqual(500, response.status) - - def test_v2_with_all_bad_conns(self): - - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "auth_strategy=noauth\n" - "debug=true\n" - "pipeline_cfg_file={0}\n" - "policy_file={1}\n" - "api_paste_config={2}\n" - "[api]\n" - "port={3}\n" - "[database]\n" - "max_retries=1\n" - "alarm_connection=dummy://localhost\n" - "connection=dummy://localhost\n" - "event_connection=dummy://localhost\n". - format(self.pipeline_cfg_file, - self.policy_file, - self.paste, - self.api_port)) - - self.subp = self.run_api(content, err_pipe=True) - - __, err = self.subp.communicate() - - self.assertIn(b"Api failed to start. Failed to connect to" - b" databases, purpose: metering, event, alarm", err) - - -class BinCeilometerPollingServiceTestCase(base.BaseTestCase): - def setUp(self): - super(BinCeilometerPollingServiceTestCase, self).setUp() - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = None - - def tearDown(self): - super(BinCeilometerPollingServiceTestCase, self).tearDown() - if self.subp: - self.subp.kill() - os.remove(self.tempfile) - - def test_starting_with_duplication_namespaces(self): - self.subp = subprocess.Popen(['ceilometer-polling', - "--config-file=%s" % self.tempfile, - "--polling-namespaces", - "compute", - "compute"], - stderr=subprocess.PIPE) - out = self.subp.stderr.read(1024) - self.assertIn(b'Duplicated values: [\'compute\', \'compute\'] ' - b'found in CLI options, auto de-duplidated', out) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_collector.py ceilometer-5.0.0~b3/ceilometer/tests/test_collector.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_collector.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_collector.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,302 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -from oslo_context import context -import oslo_messaging -from oslo_utils import timeutils -from oslotest import mockpatch -from stevedore import extension - -from ceilometer import collector -from ceilometer import dispatcher -from ceilometer import messaging -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class FakeException(Exception): - pass - - -class FakeConnection(object): - def create_worker(self, topic, proxy, pool_name): - pass - - -class TestCollector(tests_base.BaseTestCase): - def setUp(self): - super(TestCollector, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.import_opt("connection", "oslo_db.options", group="database") - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override('telemetry_secret', 'not-so-secret', - group='publisher') - self._setup_messaging() - - self.counter = sample.Sample( - name='foobar', - type='bad', - unit='F', - volume=1, - user_id='jd', - project_id='ceilometer', - resource_id='cat', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={}, - ).as_dict() - - self.utf8_msg = utils.meter_message_from_counter( - sample.Sample( - name=u'test', - type=sample.TYPE_CUMULATIVE, - unit=u'', - volume=1, - user_id=u'test', - project_id=u'test', - resource_id=u'test_run_tasks', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={u'name': [([u'TestPublish'])]}, - source=u'testsource', - ), - 'not-so-secret') - - self.srv = collector.CollectorService() - - self.useFixture(mockpatch.PatchObject( - self.srv.tg, 'add_thread', - side_effect=self._dummy_thread_group_add_thread)) - - @staticmethod - def _dummy_thread_group_add_thread(method): - method() - - def _setup_messaging(self, enabled=True): - if enabled: - self.setup_messaging(self.CONF) - else: - self.useFixture(mockpatch.Patch( - 'ceilometer.messaging.get_transport', - return_value=None)) - - def _setup_fake_dispatcher(self): - plugin = mock.MagicMock() - fake_dispatcher = extension.ExtensionManager.make_test_instance([ - extension.Extension('test', None, None, plugin,), - ], propagate_map_exceptions=True) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.load_dispatcher_manager', - return_value=fake_dispatcher)) - return plugin - - def _make_fake_socket(self, sample): - def recvfrom(size): - # Make the loop stop - self.srv.stop() - return msgpack.dumps(sample), ('127.0.0.1', 12345) - - sock = mock.Mock() - sock.recvfrom = recvfrom - return sock - - def _verify_udp_socket(self, udp_socket): - conf = self.CONF.collector - udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - udp_socket.bind.assert_called_once_with((conf.udp_address, - conf.udp_port)) - - def test_record_metering_data(self): - mock_dispatcher = self._setup_fake_dispatcher() - self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() - self.srv.record_metering_data(None, self.counter) - mock_dispatcher.record_metering_data.assert_called_once_with( - data=self.counter) - - def test_udp_receive_base(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - - with mock.patch('socket.socket') as mock_socket: - mock_socket.return_value = udp_socket - self.srv.start() - mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) - - self._verify_udp_socket(udp_socket) - - mock_dispatcher.record_metering_data.assert_called_once_with( - self.counter) - - def test_udp_socket_ipv6(self): - self._setup_messaging(False) - self.CONF.set_override('udp_address', '::1', group='collector') - self._setup_fake_dispatcher() - sock = self._make_fake_socket('data') - - with mock.patch.object(socket, 'socket') as mock_socket: - mock_socket.return_value = sock - self.srv.start() - mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) - - def test_udp_receive_storage_error(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - mock_dispatcher.record_metering_data.side_effect = self._raise_error - - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - - self._verify_udp_socket(udp_socket) - - mock_dispatcher.record_metering_data.assert_called_once_with( - self.counter) - - @staticmethod - def _raise_error(*args, **kwargs): - raise Exception - - def test_udp_receive_bad_decoding(self): - self._setup_messaging(False) - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - with mock.patch('msgpack.loads', self._raise_error): - self.srv.start() - - self._verify_udp_socket(udp_socket) - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') - @mock.patch.object(collector.CollectorService, 'start_udp') - def test_only_udp(self, udp_start, rpc_start): - """Check that only UDP is started if messaging transport is unset.""" - self._setup_messaging(False) - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - self.assertEqual(0, rpc_start.call_count) - self.assertEqual(1, udp_start.call_count) - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') - @mock.patch.object(collector.CollectorService, 'start_udp') - def test_only_rpc(self, udp_start, rpc_start): - """Check that only RPC is started if udp_address is empty.""" - self.CONF.set_override('udp_address', '', group='collector') - self.srv.start() - # two calls because two servers (notification and rpc) - self.assertEqual(2, rpc_start.call_count) - self.assertEqual(0, udp_start.call_count) - - def test_udp_receive_valid_encoding(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.data_sent = [] - with mock.patch('socket.socket', - return_value=self._make_fake_socket(self.utf8_msg)): - self.srv.start() - self.assertTrue(utils.verify_signature( - mock_dispatcher.method_calls[0][1][0], - "not-so-secret")) - - @mock.patch('ceilometer.storage.impl_log.LOG') - def test_collector_no_mock(self, mylog): - self.CONF.set_override('udp_address', '', group='collector') - self.srv.start() - mylog.info.side_effect = lambda *args: self.srv.stop() - - client = messaging.get_rpc_client(self.transport, version='1.0') - cclient = client.prepare(topic='metering') - cclient.cast(context.RequestContext(), - 'record_metering_data', data=[self.utf8_msg]) - - self.srv.rpc_server.wait() - mylog.info.assert_called_once_with( - 'metering data test for test_run_tasks: 1') - - def _test_collector_requeue(self, listener): - - mock_dispatcher = self._setup_fake_dispatcher() - self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() - mock_dispatcher.record_metering_data.side_effect = Exception('boom') - mock_dispatcher.record_events.side_effect = Exception('boom') - - self.srv.start() - endp = getattr(self.srv, listener).dispatcher.endpoints[0] - ret = endp.sample({}, 'pub_id', 'event', {}, {}) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, - ret) - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.Mock()) - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_sample_requeue(self): - self.CONF.set_override('requeue_sample_on_dispatcher_error', True, - group='collector') - self._test_collector_requeue('sample_listener') - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.Mock()) - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_event_requeue(self): - self.CONF.set_override('requeue_event_on_dispatcher_error', True, - group='collector') - self.CONF.set_override('store_events', True, group='notification') - self._test_collector_requeue('event_listener') - - def _test_collector_no_requeue(self, listener): - mock_dispatcher = self._setup_fake_dispatcher() - self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() - mock_dispatcher.record_metering_data.side_effect = (FakeException - ('boom')) - mock_dispatcher.record_events.side_effect = (FakeException - ('boom')) - - self.srv.start() - endp = getattr(self.srv, listener).dispatcher.endpoints[0] - self.assertRaises(FakeException, endp.sample, {}, 'pub_id', - 'event', {}, {}) - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.Mock()) - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_sample_no_requeue(self): - self.CONF.set_override('requeue_sample_on_dispatcher_error', False, - group='collector') - self._test_collector_no_requeue('sample_listener') - - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.Mock()) - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_event_no_requeue(self): - self.CONF.set_override('requeue_event_on_dispatcher_error', False, - group='collector') - self.CONF.set_override('store_events', True, group='notification') - self._test_collector_no_requeue('event_listener') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_coordination.py ceilometer-5.0.0~b3/ceilometer/tests/test_coordination.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_coordination.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_coordination.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,249 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import mock -from oslo_config import fixture as fixture_config -import tooz.coordination - -from ceilometer import coordination -from ceilometer.tests import base -from ceilometer import utils - - -class MockToozCoordinator(object): - def __init__(self, member_id, shared_storage): - self._member_id = member_id - self._groups = shared_storage - - def start(self): - pass - - def stop(self): - pass - - def heartbeat(self): - pass - - def create_group(self, group_id): - if group_id in self._groups: - return MockAsyncError( - tooz.coordination.GroupAlreadyExist(group_id)) - self._groups[group_id] = {} - return MockAsyncResult(None) - - def join_group(self, group_id, capabilities=b''): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - if self._member_id in self._groups[group_id]: - return MockAsyncError( - tooz.coordination.MemberAlreadyExist(group_id, - self._member_id)) - self._groups[group_id][self._member_id] = { - "capabilities": capabilities, - } - return MockAsyncResult(None) - - def leave_group(self, group_id): - return MockAsyncResult(None) - - def get_members(self, group_id): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - return MockAsyncResult(self._groups[group_id]) - - -class MockToozCoordExceptionRaiser(MockToozCoordinator): - def start(self): - raise tooz.coordination.ToozError('error') - - def heartbeat(self): - raise tooz.coordination.ToozError('error') - - def join_group(self, group_id, capabilities=b''): - raise tooz.coordination.ToozError('error') - - def get_members(self, group_id): - raise tooz.coordination.ToozError('error') - - -class MockAsyncResult(tooz.coordination.CoordAsyncResult): - def __init__(self, result): - self.result = result - - def get(self, timeout=0): - return self.result - - @staticmethod - def done(): - return True - - -class MockAsyncError(tooz.coordination.CoordAsyncResult): - def __init__(self, error): - self.error = error - - def get(self, timeout=0): - raise self.error - - @staticmethod - def done(): - return True - - -class MockLoggingHandler(logging.Handler): - """Mock logging handler to check for expected logs.""" - - def __init__(self, *args, **kwargs): - self.reset() - logging.Handler.__init__(self, *args, **kwargs) - - def emit(self, record): - self.messages[record.levelname.lower()].append(record.getMessage()) - - def reset(self): - self.messages = {'debug': [], - 'info': [], - 'warning': [], - 'error': [], - 'critical': []} - - -class TestPartitioning(base.BaseTestCase): - - def setUp(self): - super(TestPartitioning, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.str_handler = MockLoggingHandler() - coordination.LOG.logger.addHandler(self.str_handler) - self.shared_storage = {} - - def _get_new_started_coordinator(self, shared_storage, agent_id=None, - coordinator_cls=None): - coordinator_cls = coordinator_cls or MockToozCoordinator - self.CONF.set_override('backend_url', 'xxx://yyy', - group='coordination') - with mock.patch('tooz.coordination.get_coordinator', - lambda _, member_id: - coordinator_cls(member_id, shared_storage)): - pc = coordination.PartitionCoordinator(agent_id) - pc.start() - return pc - - def _usage_simulation(self, *agents_kwargs): - partition_coordinators = [] - for kwargs in agents_kwargs: - partition_coordinator = self._get_new_started_coordinator( - self.shared_storage, kwargs['agent_id'], kwargs.get( - 'coordinator_cls')) - partition_coordinator.join_group(kwargs['group_id']) - partition_coordinators.append(partition_coordinator) - - for i, kwargs in enumerate(agents_kwargs): - all_resources = kwargs.get('all_resources', []) - expected_resources = kwargs.get('expected_resources', []) - actual_resources = partition_coordinators[i].extract_my_subset( - kwargs['group_id'], all_resources) - self.assertEqual(expected_resources, actual_resources) - - def test_single_group(self): - agents = [dict(agent_id='agent1', group_id='group'), - dict(agent_id='agent2', group_id='group')] - self._usage_simulation(*agents) - - self.assertEqual(['group'], sorted(self.shared_storage.keys())) - self.assertEqual(['agent1', 'agent2'], - sorted(self.shared_storage['group'].keys())) - - def test_multiple_groups(self): - agents = [dict(agent_id='agent1', group_id='group1'), - dict(agent_id='agent2', group_id='group2')] - self._usage_simulation(*agents) - - self.assertEqual(['group1', 'group2'], - sorted(self.shared_storage.keys())) - - def test_partitioning(self): - all_resources = ['resource_%s' % i for i in range(1000)] - agents = ['agent_%s' % i for i in range(10)] - - expected_resources = [list() for _ in range(len(agents))] - hr = utils.HashRing(agents) - for r in all_resources: - key = agents.index(hr.get_node(r)) - expected_resources[key].append(r) - - agents_kwargs = [] - for i, agent in enumerate(agents): - agents_kwargs.append(dict(agent_id=agent, - group_id='group', - all_resources=all_resources, - expected_resources=expected_resources[i])) - self._usage_simulation(*agents_kwargs) - - def test_coordination_backend_offline(self): - agents = [dict(agent_id='agent1', - group_id='group', - all_resources=['res1', 'res2'], - expected_resources=[], - coordinator_cls=MockToozCoordExceptionRaiser)] - self._usage_simulation(*agents) - expected_errors = ['Error getting group membership info from ' - 'coordination backend.', - 'Error connecting to coordination backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - def test_reconnect(self): - coord = self._get_new_started_coordinator({}, 'a', - MockToozCoordExceptionRaiser) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionRaiser('a', {})): - coord.heartbeat() - expected_errors = ['Error connecting to coordination backend.', - 'Error sending a heartbeat to coordination ' - 'backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - self.str_handler.messages['error'] = [] - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordinator('a', {})): - coord.heartbeat() - for e in expected_errors: - self.assertNotIn(e, self.str_handler.messages['error']) - - def test_group_id_none(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._started) - - with mock.patch.object(coord._coordinator, 'join_group') as mocked: - coord.join_group(None) - self.assertEqual(0, mocked.call_count) - with mock.patch.object(coord._coordinator, 'leave_group') as mocked: - coord.leave_group(None) - self.assertEqual(0, mocked.call_count) - - def test_stop(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._started) - coord.join_group("123") - coord.stop() - self.assertIsEmpty(coord._groups) - self.assertFalse(coord._started) - self.assertIsNone(coord._coordinator) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_decoupled_pipeline.py ceilometer-5.0.0~b3/ceilometer/tests/test_decoupled_pipeline.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_decoupled_pipeline.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_decoupled_pipeline.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,292 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from ceilometer import pipeline -from ceilometer import sample -from ceilometer.tests import pipeline_base - - -class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): - def _setup_pipeline_cfg(self): - source = {'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'transformers': [{'name': 'update', 'parameters': {}}], - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - - def _break_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def test_source_no_sink(self): - del self.pipeline_cfg['sinks'] - self._exception_create_pipelinemanager() - - def test_source_dangling_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self._exception_create_pipelinemanager() - - def test_sink_no_source(self): - del self.pipeline_cfg['sources'] - self._exception_create_pipelinemanager() - - def test_source_with_multiple_sinks(self): - counter_cfg = ['a', 'b'] - self._set_pipeline_cfg('counters', counter_cfg) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher(None) as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher(None) as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('test_source:second_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher, sfx in [(test_publisher, '_update'), - (new_publisher, '_new')]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) - self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) - - def test_multiple_sources_with_single_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher(None) as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher(None) as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('second_source:test_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - another_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher in [test_publisher, another_publisher]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('b_update', getattr(publisher.samples[1], "name")) - - transformed_samples = self.TransformerClass.samples - self.assertEqual(2, len(transformed_samples)) - self.assertEqual(['a', 'b'], - [getattr(s, 'name') for s in transformed_samples]) - - def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, - meters, units): - with open('etc/ceilometer/pipeline.yaml') as fap: - data = fap.read() - pipeline_cfg = yaml.safe_load(data) - for s in pipeline_cfg['sinks']: - s['publishers'] = ['test://'] - pipeline_manager = pipeline.PipelineManager(pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[index] - self._do_test_rate_of_change_mapping(pipe, meters, units) - - def test_rate_of_change_boilerplate_disk_read_cfg(self): - meters = ('disk.read.bytes', 'disk.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, - meters, - units) - - def test_rate_of_change_boilerplate_disk_write_cfg(self): - meters = ('disk.write.bytes', 'disk.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, - meters, - units) - - def test_rate_of_change_boilerplate_network_incoming_cfg(self): - meters = ('network.incoming.bytes', 'network.incoming.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): - meters = ('disk.device.read.bytes', 'disk.device.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): - meters = ('disk.device.write.bytes', 'disk.device.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, - meters, - units) - - def test_rate_of_change_boilerplate_network_outgoing_cfg(self): - meters = ('network.outgoing.bytes', 'network.outgoing.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_duplicated_sinks_names(self): - self.pipeline_cfg['sinks'].append({ - 'name': 'test_sink', - 'publishers': ['except'], - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) - - def test_duplicated_source_names(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_event_pipeline.py ceilometer-5.0.0~b3/ceilometer/tests/test_event_pipeline.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_event_pipeline.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_event_pipeline.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,366 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import traceback -import uuid - -from oslotest import base -from oslotest import mockpatch - -from ceilometer.event.storage import models -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher - - -class EventPipelineTestCase(base.BaseTestCase): - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'except://': self.PublisherClassException} - return fake_drivers[url](url) - - class PublisherClassException(publisher.PublisherBase): - def publish_samples(self, ctxt, samples): - pass - - def publish_events(self, ctxt, events): - raise Exception() - - def setUp(self): - super(EventPipelineTestCase, self).setUp() - self.p_type = pipeline.EVENT_TYPE - self.transformer_manager = None - - self.test_event = models.Event( - message_id=uuid.uuid4(), - event_type='a', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'started'} - ) - - self.test_event2 = models.Event( - message_id=uuid.uuid4(), - event_type='b', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'stopped'} - ) - - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - self._setup_pipeline_cfg() - - self._reraise_exception = True - self.useFixture(mockpatch.Patch( - 'ceilometer.pipeline.LOG.exception', - side_effect=self._handle_reraise_exception)) - - def _handle_reraise_exception(self, msg): - if self._reraise_exception: - raise Exception(traceback.format_exc()) - - def _setup_pipeline_cfg(self): - """Setup the appropriate form of pipeline config.""" - source = {'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - """Augment the pipeline config with an additional element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['new://'], - }) - - def _break_pipeline_cfg(self): - """Break the pipeline config with a malformed element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - """Break the pipeline config with duplicate pipeline name.""" - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def _exception_create_pipelinemanager(self): - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - def test_no_events(self): - self._unset_pipeline_cfg('events') - self._exception_create_pipelinemanager() - - def test_no_name(self): - self._unset_pipeline_cfg('name') - self._exception_create_pipelinemanager() - - def test_name(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - for pipe in pipeline_manager.pipelines: - self.assertTrue(pipe.name.startswith('event:')) - - def test_no_publishers(self): - self._unset_pipeline_cfg('publishers') - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude_same(self): - event_cfg = ['a', '!a'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude(self): - event_cfg = ['a', '!b'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_wildcard_included(self): - event_cfg = ['a', '*'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_publishers_invalid_publisher(self): - publisher_cfg = ['test_invalid'] - self._set_pipeline_cfg('publishers', publisher_cfg) - - def test_multiple_included_events(self): - event_cfg = ['a', 'b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - - with pipeline_manager.publisher(None) as p: - p([self.test_event2]) - - self.assertEqual(2, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - self.assertEqual('b', getattr(publisher.events[1], 'event_type')) - - def test_event_non_match(self): - event_cfg = ['nomatch'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.events)) - self.assertEqual(0, publisher.calls) - - def test_wildcard_event(self): - event_cfg = ['*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_wildcard_excluded_events(self): - event_cfg = ['*', '!a'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - - def test_wildcard_excluded_events_not_excluded(self): - event_cfg = ['*', '!b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_not_excluded(self): - event_cfg = ['!b', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_excluded(self): - event_cfg = ['!a', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) - self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) - - def test_wildcard_and_excluded_wildcard_events(self): - event_cfg = ['*', '!compute.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - - def test_included_event_and_wildcard_events(self): - event_cfg = ['compute.instance.create.start', 'identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_excluded_event_and_excluded_wildcard_events(self): - event_cfg = ['!compute.instance.create.start', '!identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_multiple_pipeline(self): - self._augment_pipeline_cfg() - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event, self.test_event2]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - new_publisher = pipeline_manager.pipelines[1].publishers[0] - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual(1, new_publisher.calls) - self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) - - def test_multiple_publisher(self): - self._set_pipeline_cfg('publishers', ['test://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_multiple_publisher_isolation(self): - self._reraise_exception = False - self._set_pipeline_cfg('publishers', ['except://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher(None) as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_unique_pipeline_names(self): - self._dup_pipeline_name_cfg() - self._exception_create_pipelinemanager() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_hacking.py ceilometer-5.0.0~b3/ceilometer/tests/test_hacking.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_hacking.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_hacking.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import textwrap - -import mock -import pep8 -from testtools import testcase - -from ceilometer.hacking import checks - - -class HackingTestCase(testcase.TestCase): - """Test cases for ceilometer specific hacking rules. - - This class tests the hacking checks in ceilometer.hacking.checks by - passing strings to the check methods like the pep8/flake8 parser would. - The parser loops over each line in the file and then passes the parameters - to the check method. The parameter names in the check method dictate what - type of object is passed to the check method. The parameter types are:: - - logical_line: A processed line with the following modifications: - - Multi-line statements converted to a single line. - - Stripped left and right. - - Contents of strings replaced with "xxx" of same length. - - Comments removed. - physical_line: Raw line of text from the input file. - lines: a list of the raw lines from the input file - tokens: the tokens that contribute to this logical line - line_number: line number in the input file - total_lines: number of lines in the input file - blank_lines: blank lines before this one - indent_char: indentation character in this file (" " or "\t") - indent_level: indentation (with tabs expanded to multiples of 8) - previous_indent_level: indentation on previous line - previous_logical: previous logical line - filename: Path of the file being run through pep8 - - When running a test on a check method the return will be False/None if - there is no violation in the sample input. If there is an error a tuple is - returned with a position in the line, and a message. So to check the result - just assertTrue if the check is expected to fail and assertFalse if it - should pass. - """ - - # We are patching pep8 so that only the check under test is actually - # installed. - @mock.patch('pep8._checks', - {'physical_line': {}, 'logical_line': {}, 'tree': {}}) - def _run_check(self, code, checker, filename=None): - pep8.register_check(checker) - - lines = textwrap.dedent(code).strip().splitlines(True) - - checker = pep8.Checker(filename=filename, lines=lines) - checker.check_all() - checker.report._deferred_print.sort() - return checker.report._deferred_print - - def _assert_has_errors(self, code, checker, expected_errors=None, - filename=None): - actual_errors = [e[:3] for e in - self._run_check(code, checker, filename)] - self.assertEqual(expected_errors or [], actual_errors) - - def test_oslo_namespace_imports_check(self): - codes = [ - "from oslo.concurrency import processutils", - "from oslo.config import cfg", - "import oslo.i18n", - "from oslo.utils import timeutils", - "from oslo.serialization import jsonutils", - ] - for code in codes: - self._assert_has_errors(code, checks.check_oslo_namespace_imports, - expected_errors=[(1, 0, "C300")]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_messaging.py ceilometer-5.0.0~b3/ceilometer/tests/test_messaging.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_messaging.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_messaging.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging.conffixture -from oslotest import base - -from ceilometer import messaging - - -class MessagingTests(base.BaseTestCase): - def setUp(self): - super(MessagingTests, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) - - def test_get_transport_invalid_url(self): - self.assertRaises(oslo_messaging.InvalidTransportURL, - messaging.get_transport, "notvalid!") - - def test_get_transport_url_caching(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://') - self.assertEqual(t1, t2) - - def test_get_transport_default_url_caching(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport() - self.assertEqual(t1, t2) - - def test_get_transport_default_url_no_caching(self): - t1 = messaging.get_transport(cache=False) - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_no_caching(self): - t1 = messaging.get_transport('fake://', cache=False) - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_default_url_caching_mix(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_caching_mix(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_optional(self): - self.CONF.set_override('rpc_backend', '') - self.assertIsNone(messaging.get_transport(optional=True, - cache=False)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_middleware.py ceilometer-5.0.0~b3/ceilometer/tests/test_middleware.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_middleware.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_middleware.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as fixture_config - -from ceilometer import middleware -from ceilometer.tests import base - - -HTTP_REQUEST = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.request', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -HTTP_RESPONSE = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.response', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}, - u'response': {'status': '200 OK'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - - -class TestNotifications(base.BaseTestCase): - - def setUp(self): - super(TestNotifications, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - def test_process_request_notification(self): - sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( - HTTP_REQUEST - ))[0] - self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_process_response_notification(self): - sample = list(middleware.HTTPResponse( - mock.Mock()).process_notification(HTTP_RESPONSE))[0] - self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_targets(self): - targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) - self.assertEqual(4, len(targets)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_neutronclient.py ceilometer-5.0.0~b3/ceilometer/tests/test_neutronclient.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_neutronclient.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_neutronclient.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,193 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base - -from ceilometer import neutron_client - - -class TestNeutronClient(base.BaseTestCase): - - def setUp(self): - super(TestNeutronClient, self).setUp() - self.nc = neutron_client.Client() - - @staticmethod - def fake_ports_list(): - return {'ports': - [{'admin_state_up': True, - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'device_owner': 'network:router_gateway', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, - ]} - - def test_port_get_all(self): - with mock.patch.object(self.nc.client, 'list_ports', - side_effect=self.fake_ports_list): - ports = self.nc.port_get_all() - - self.assertEqual(1, len(ports)) - self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', - ports[0]['id']) - - @staticmethod - def fake_networks_list(): - return {'networks': - [{'admin_state_up': True, - 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'name': 'public', - 'provider:network_type': 'gre', - 'provider:physical_network': None, - 'provider:segmentation_id': 2, - 'router:external': True, - 'shared': False, - 'status': 'ACTIVE', - 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], - 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, - ]} - - def test_network_get_all(self): - with mock.patch.object(self.nc.client, 'list_networks', - side_effect=self.fake_networks_list): - networks = self.nc.network_get_all() - - self.assertEqual(1, len(networks)) - self.assertEqual('298a3088-a446-4d5a-bad8-f92ecacd786b', - networks[0]['id']) - - @staticmethod - def fake_pool_list(): - return {'pools': [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ]} - - def test_pool_list(self): - with mock.patch.object(self.nc.client, 'list_pools', - side_effect=self.fake_pool_list): - pools = self.nc.pool_get_all() - - self.assertEqual(1, len(pools)) - self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', - pools[0]['id']) - - @staticmethod - def fake_vip_list(): - return {'vips': [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - ]} - - def test_vip_list(self): - with mock.patch.object(self.nc.client, 'list_vips', - side_effect=self.fake_vip_list): - vips = self.nc.vip_get_all() - - self.assertEqual(1, len(vips)) - self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - vips[0]['id']) - - @staticmethod - def fake_member_list(): - return {'members': [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ]} - - def test_member_list(self): - with mock.patch.object(self.nc.client, 'list_members', - side_effect=self.fake_member_list): - members = self.nc.member_get_all() - - self.assertEqual(1, len(members)) - self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', - members[0]['id']) - - @staticmethod - def fake_monitors_list(): - return {'health_monitors': - [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }]} - - def test_monitor_list(self): - with mock.patch.object(self.nc.client, 'list_health_monitors', - side_effect=self.fake_monitors_list): - monitors = self.nc.health_monitor_get_all() - - self.assertEqual(1, len(monitors)) - self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', - monitors[0]['id']) - - @staticmethod - def fake_pool_stats(fake_pool): - return {'stats': - [{'active_connections': 1, - 'total_connections': 2, - 'bytes_in': 3, - 'bytes_out': 4 - }]} - - def test_pool_stats(self): - with mock.patch.object(self.nc.client, 'retrieve_pool_stats', - side_effect=self.fake_pool_stats): - stats = self.nc.pool_stats('fake_pool')['stats'] - - self.assertEqual(1, len(stats)) - self.assertEqual(1, stats[0]['active_connections']) - self.assertEqual(2, stats[0]['total_connections']) - self.assertEqual(3, stats[0]['bytes_in']) - self.assertEqual(4, stats[0]['bytes_out']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_notification.py ceilometer-5.0.0~b3/ceilometer/tests/test_notification.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_notification.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_notification.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,451 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import shutil - -import eventlet -import mock -from oslo_config import fixture as fixture_config -from oslo_context import context -import oslo_messaging -import oslo_messaging.conffixture -import oslo_service.service -from oslo_utils import fileutils -from oslo_utils import timeutils -import six -from stevedore import extension -import yaml - -from ceilometer.compute.notifications import instance -from ceilometer import messaging -from ceilometer import notification -from ceilometer.publisher import test as test_publisher -from ceilometer.tests import base as tests_base - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -class TestNotification(tests_base.BaseTestCase): - - def setUp(self): - super(TestNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("store_events", False, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - self.setup_messaging(self.CONF) - self.srv = notification.NotificationService() - - def fake_get_notifications_manager(self, pm): - self.plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('test', - None, - None, - self.plugin) - ] - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.MagicMock()) - @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') - def _do_process_notification_manager_start(self, - fake_event_endpoint_class): - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = self.fake_get_notifications_manager - self.srv.start() - self.fake_event_endpoint = fake_event_endpoint_class.return_value - - def test_start_multiple_listeners(self): - urls = ["fake://vhost1", "fake://vhost2"] - self.CONF.set_override("messaging_urls", urls, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners)) - - def test_process_notification(self): - self._do_process_notification_manager_start() - self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() - - self.plugin.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', - 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) - - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertTrue(self.srv.pipeline_manager.publisher.called) - - def test_process_notification_no_events(self): - self._do_process_notification_manager_start() - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertNotEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) - def test_process_notification_with_events(self): - self.CONF.set_override("store_events", True, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', - mock.MagicMock()) - @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') - def test_unique_consumers(self, fake_event_endpoint_class): - - def fake_get_notifications_manager_dup_targets(pm): - plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [extension.Extension('test', None, None, plugin), - extension.Extension('test', None, None, plugin)]) - - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = fake_get_notifications_manager_dup_targets - self.srv.start() - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.targets)) - - -class BaseRealNotification(tests_base.BaseTestCase): - def setup_pipeline(self, counter_names): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 5, - 'meters': counter_names, - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ['test://'] - }] - }) - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def setUp(self): - super(BaseRealNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF, 'nova') - - pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.expected_samples = 2 - - self.CONF.set_override("store_events", True, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - ev_pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_event', - 'events': ['compute.instance.*'], - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'publishers': ['test://'] - }] - }) - if six.PY3: - ev_pipeline = ev_pipeline.encode('utf-8') - self.expected_events = 1 - ev_pipeline_cfg_file = fileutils.write_to_tempfile( - content=ev_pipeline, prefix="event_pipeline", suffix="yaml") - self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) - self.CONF.set_override( - "definitions_cfg_file", - self.path_get('etc/ceilometer/event_definitions.yaml'), - group='event') - self.publisher = test_publisher.TestPublisher("") - - def _check_notification_service(self): - self.srv.start() - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - notifier.info(context.RequestContext(), 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if (len(self.publisher.samples) >= self.expected_samples and - len(self.publisher.events) >= self.expected_events): - break - eventlet.sleep(0) - self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners) - self.srv.stop() - - resources = list(set(s.resource_id for s in self.publisher.samples)) - self.assertEqual(self.expected_samples, len(self.publisher.samples)) - self.assertEqual(self.expected_events, len(self.publisher.events)) - self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) - - -class TestRealNotificationReloadablePipeline(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationReloadablePipeline, self).setUp() - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 1) - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_pipeline_poller(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.tg = mock.MagicMock() - self.srv.start() - - pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) - self.assertIn(pipeline_poller_call, - self.srv.tg.add_timer.call_args_list) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_reloaded_pipeline(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - - pipeline_cfg_file = self.setup_pipeline(['instance']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.expected_samples = 1 - self.srv.start() - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - notifier.info(context.RequestContext(), 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD) - - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if (len(self.publisher.samples) >= self.expected_samples and - len(self.publisher.events) >= self.expected_events): - break - eventlet.sleep(0) - - self.assertEqual(self.expected_samples, len(self.publisher.samples)) - - # Flush publisher samples to test reloading - self.publisher.samples = [] - # Modify the collection targets - updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', - 'disk.root.size']) - # Move/re-name the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - - self.expected_samples = 2 - # Random sleep to let the pipeline poller complete the reloading - eventlet.sleep(3) - # Send message again to verify the reload works - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - notifier.info(context.RequestContext(), 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD) - - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if (len(self.publisher.samples) >= self.expected_samples and - len(self.publisher.events) >= self.expected_events): - break - eventlet.sleep(0) - - self.assertEqual(self.expected_samples, len(self.publisher.samples)) - - (self.assertIn(sample.name, ['disk.root.size', 'vcpus']) - for sample in self.publisher.samples) - - -class TestRealNotification(BaseRealNotification): - - def setUp(self): - super(TestRealNotification, self).setUp() - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service_error_topic(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.start() - notifier = messaging.get_notifier(self.transport, - 'compute.vagrant-precise') - notifier.error(context.RequestContext(), 'compute.instance.error', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if len(self.publisher.events) >= self.expected_events: - break - eventlet.sleep(0) - self.srv.stop() - self.assertEqual(self.expected_events, len(self.publisher.events)) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_disable_non_metrics(self, fake_publisher_cls): - self.CONF.set_override("disable_non_metric_meters", True, - group="notification") - # instance is a not a metric. we should only get back memory - self.expected_samples = 1 - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - self.assertEqual('memory', self.publisher.samples[0].name) - - @mock.patch('ceilometer.coordination.PartitionCoordinator') - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_ha_configured_agent_coord_disabled(self, fake_publisher_cls, - fake_coord): - fake_publisher_cls.return_value = self.publisher - fake_coord1 = mock.MagicMock() - fake_coord1.extract_my_subset.side_effect = lambda x, y: y - fake_coord.return_value = fake_coord1 - self._check_notification_service() - - @mock.patch.object(oslo_service.service.Service, 'stop') - def test_notification_service_start_abnormal(self, mocked): - try: - self.srv.stop() - except Exception: - pass - self.assertEqual(1, mocked.call_count) - - -class TestRealNotificationHA(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationHA, self).setUp() - self.CONF.set_override('workload_partitioning', True, - group='notification') - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.coordination.PartitionCoordinator') - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls, fake_coord): - fake_publisher_cls.return_value = self.publisher - fake_coord1 = mock.MagicMock() - fake_coord1.extract_my_subset.side_effect = lambda x, y: y - fake_coord.return_value = fake_coord1 - self._check_notification_service() - - @mock.patch('hmac.new') - @mock.patch('ceilometer.coordination.PartitionCoordinator') - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service_no_secret(self, fake_publisher_cls, - fake_coord, fake_hmac): - self.CONF.set_override('telemetry_secret', None, group='publisher') - fake_publisher_cls.return_value = self.publisher - fake_coord1 = mock.MagicMock() - fake_coord1.extract_my_subset.side_effect = lambda x, y: y - fake_coord.return_value = fake_coord1 - self._check_notification_service() - self.assertFalse(fake_hmac.called) - - def test_reset_listeners_on_refresh(self): - self.srv.start() - self.assertEqual(2, len(self.srv.pipeline_listeners)) - self.srv._refresh_agent(None) - self.assertEqual(2, len(self.srv.pipeline_listeners)) - self.srv.stop() - - @mock.patch('oslo_messaging.Notifier.sample') - def test_broadcast_to_relevant_pipes_only(self, mock_notifier): - self.srv.start() - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, 'nonmatching.end', - None, None)): - continue - endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', - 'nonmatching.end', - TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) - self.assertFalse(mock_notifier.called) - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, - 'compute.instance.create.end', - None, None)): - continue - endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', - 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) - self.assertTrue(mock_notifier.called) - self.assertEqual(3, mock_notifier.call_count) - self.assertEqual('pipeline.event', - mock_notifier.call_args_list[0][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[1][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[2][1]['event_type']) - self.srv.stop() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_novaclient.py ceilometer-5.0.0~b3/ceilometer/tests/test_novaclient.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_novaclient.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_novaclient.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import novaclient -from oslo_config import fixture as fixture_config -from oslotest import base -from oslotest import mockpatch - -from ceilometer import nova_client - - -class TestNovaClient(base.BaseTestCase): - - def setUp(self): - super(TestNovaClient, self).setUp() - self._flavors_count = 0 - self._images_count = 0 - self.nv = nova_client.Client() - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.flavors, 'get', - side_effect=self.fake_flavors_get)) - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.images, 'get', - side_effect=self.fake_images_get)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def fake_flavors_get(self, *args, **kwargs): - self._flavors_count += 1 - a = mock.MagicMock() - a.id = args[0] - if a.id == 1: - a.name = 'm1.tiny' - elif a.id == 2: - a.name = 'm1.large' - else: - raise novaclient.exceptions.NotFound('foobar') - return a - - def fake_images_get(self, *args, **kwargs): - self._images_count += 1 - a = mock.MagicMock() - a.id = args[0] - image_details = { - 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), - 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), - 3: ('rhel-6-x64', None), - 4: ('rhel-6-x64', dict()), - 5: ('rhel-6-x64', dict(kernel_id=11)), - 6: ('rhel-6-x64', dict(ramdisk_id=21)) - } - - if a.id in image_details: - a.name = image_details[a.id][0] - a.metadata = image_details[a.id][1] - else: - raise novaclient.exceptions.NotFound('foobar') - - return a - - @staticmethod - def fake_servers_list(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 1} - b = mock.MagicMock() - b.id = 43 - b.flavor = {'id': 2} - b.image = {'id': 2} - return [a, b] - - def test_instance_get_all_by_host(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(2, len(instances)) - self.assertEqual('m1.tiny', instances[0].flavor['name']) - self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) - self.assertEqual(11, instances[0].kernel_id) - self.assertEqual(21, instances[0].ramdisk_id) - - def test_instance_get_all(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all() - - self.assertEqual(2, len(instances)) - self.assertEqual(42, instances[0].id) - self.assertEqual(1, instances[0].flavor['id']) - self.assertEqual(1, instances[0].image['id']) - - @staticmethod - def fake_servers_list_unknown_flavor(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = {'id': 1} - return [a] - - def test_instance_get_all_by_host_unknown_flavor(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_flavor): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].flavor['name']) - - @staticmethod - def fake_servers_list_unknown_image(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 666} - return [a] - - @staticmethod - def fake_servers_list_image_missing_metadata(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': args[0]} - return [a] - - @staticmethod - def fake_instance_image_missing(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = None - return [a] - - def test_instance_get_all_by_host_unknown_image(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_image): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].image['name']) - - def test_with_flavor_and_image(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - instance = results[0] - self.assertEqual(2, len(results)) - self.assertEqual('ubuntu-12.04-x86', instance.image['name']) - self.assertEqual('m1.tiny', instance.flavor['name']) - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_image(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_flavor(self): - instances = self.fake_servers_list_unknown_flavor() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.flavor['name']) - self.assertEqual(0, instance.flavor['vcpus']) - self.assertEqual(0, instance.flavor['ram']) - self.assertEqual(0, instance.flavor['disk']) - self.assertNotEqual(instance.image['name'], 'unknown-id-666') - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_none_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(3) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(4) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_ramdisk(self): - instances = self.fake_servers_list_image_missing_metadata(5) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual(11, instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_kernel(self): - instances = self.fake_servers_list_image_missing_metadata(6) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_no_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - self.assertEqual(2, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) - self.assertEqual(4, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_unknown_image_cache(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances * 2) - self.assertEqual(2, len(results)) - self.assertEqual(1, self._flavors_count) - self.assertEqual(1, self._images_count) - for instance in results: - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_missing_image_instance(self): - instances = self.fake_instance_image_missing() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.image) - self.assertIsNone(instance.ramdisk_id) - - def test_with_nova_http_log_debug(self): - self.CONF.set_override("nova_http_log_debug", True) - self.nv = nova_client.Client() - self.assertTrue(self.nv.nova_client.client.http_log_debug) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_sample.py ceilometer-5.0.0~b3/ceilometer/tests/test_sample.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_sample.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_sample.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for ceilometer/sample.py""" - -import datetime - -from ceilometer import sample -from ceilometer.tests import base - - -class TestSample(base.BaseTestCase): - SAMPLE = sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume='1234567', - user_id='56c5692032f34041900342503fecab30', - project_id='ac9494df2d9d4e709bac378cceabaf23', - resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', - timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), - resource_metadata={} - ) - - def test_sample_string_format(self): - expected = ('') - self.assertEqual(expected, str(self.SAMPLE)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/test_utils.py ceilometer-5.0.0~b3/ceilometer/tests/test_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/test_utils.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/test_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/utils.py -""" -import datetime -import decimal - -from oslotest import base - -from ceilometer import utils - - -class TestUtils(base.BaseTestCase): - - def test_datetime_to_decimal(self): - expected = 1356093296.12 - utc_datetime = datetime.datetime.utcfromtimestamp(expected) - actual = utils.dt_to_decimal(utc_datetime) - self.assertAlmostEqual(expected, float(actual), places=5) - - def test_decimal_to_datetime(self): - expected = 1356093296.12 - dexpected = decimal.Decimal(str(expected)) # Python 2.6 wants str() - expected_datetime = datetime.datetime.utcfromtimestamp(expected) - actual_datetime = utils.decimal_to_dt(dexpected) - # Python 3 have rounding issue on this, so use float - self.assertAlmostEqual(utils.dt_to_decimal(expected_datetime), - utils.dt_to_decimal(actual_datetime), - places=5) - - def test_recursive_keypairs(self): - data = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B'}} - pairs = list(utils.recursive_keypairs(data)) - self.assertEqual([('a', 'A'), ('b', 'B'), - ('nested:a', 'A'), ('nested:b', 'B')], - pairs) - - def test_recursive_keypairs_with_separator(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - } - separator = '.' - pairs = list(utils.recursive_keypairs(data, separator)) - self.assertEqual([('a', 'A'), - ('b', 'B'), - ('nested.a', 'A'), - ('nested.b', 'B')], - pairs) - - def test_recursive_keypairs_with_list_of_dict(self): - small = 1 - big = 1 << 64 - expected = [('a', 'A'), - ('b', 'B'), - ('nested:list', [{small: 99, big: 42}])] - data = {'a': 'A', - 'b': 'B', - 'nested': {'list': [{small: 99, big: 42}]}} - pairs = list(utils.recursive_keypairs(data)) - self.assertEqual(len(expected), len(pairs)) - for k, v in pairs: - # the keys 1 and 1<<64 cause a hash collision on 64bit platforms - if k == 'nested:list': - self.assertIn(v, - [[{small: 99, big: 42}], - [{big: 42, small: 99}]]) - else: - self.assertIn((k, v), expected) - - def test_restore_nesting_unested(self): - metadata = {'a': 'A', 'b': 'B'} - unwound = utils.restore_nesting(metadata) - self.assertIs(metadata, unwound) - - def test_restore_nesting(self): - metadata = {'a': 'A', 'b': 'B', - 'nested:a': 'A', - 'nested:b': 'B', - 'nested:twice:c': 'C', - 'nested:twice:d': 'D', - 'embedded:e': 'E'} - unwound = utils.restore_nesting(metadata) - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - - def test_restore_nesting_with_separator(self): - metadata = {'a': 'A', 'b': 'B', - 'nested.a': 'A', - 'nested.b': 'B', - 'nested.twice.c': 'C', - 'nested.twice.d': 'D', - 'embedded.e': 'E'} - unwound = utils.restore_nesting(metadata, separator='.') - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - - def test_decimal_to_dt_with_none_parameter(self): - self.assertIsNone(utils.decimal_to_dt(None)) - - def test_dict_to_kv(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - 'nested2': [{'c': 'A'}, {'c': 'B'}] - } - pairs = list(utils.dict_to_keyval(data)) - self.assertEqual([('a', 'A'), - ('b', 'B'), - ('nested.a', 'A'), - ('nested.b', 'B'), - ('nested2[0].c', 'A'), - ('nested2[1].c', 'B')], - sorted(pairs, key=lambda x: x[0])) - - def test_hash_of_set(self): - x = ['a', 'b'] - y = ['a', 'b', 'a'] - z = ['a', 'c'] - self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) - self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) - self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) - - def test_hash_ring(self): - num_nodes = 10 - num_keys = 1000 - - nodes = [str(x) for x in range(num_nodes)] - hr = utils.HashRing(nodes) - - buckets = [0] * num_nodes - assignments = [-1] * num_keys - for k in range(num_keys): - n = int(hr.get_node(str(k))) - self.assertTrue(0 <= n <= num_nodes) - buckets[n] += 1 - assignments[k] = n - - # at least something in each bucket - self.assertTrue(all((c > 0 for c in buckets))) - - # approximately even distribution - diff = max(buckets) - min(buckets) - self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) - - # consistency - num_nodes += 1 - nodes.append(str(num_nodes + 1)) - hr = utils.HashRing(nodes) - for k in range(num_keys): - n = int(hr.get_node(str(k))) - assignments[k] -= n - reassigned = len([c for c in assignments if c != 0]) - self.assertTrue(reassigned < num_keys / num_nodes) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/agentbase.py ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/agentbase.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/agentbase.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/agentbase.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,716 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# Copyright 2013 Intel corp. +# Copyright 2013 eNovance +# Copyright 2014 Red Hat, Inc +# +# Authors: Yunhong Jiang +# Julien Danjou +# Eoghan Glynn +# Nejc Saje +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import copy +import datetime + +import mock +from oslo_config import fixture as fixture_config +from oslotest import mockpatch +import six +from stevedore import extension + +from ceilometer.agent import plugin_base +from ceilometer import pipeline +from ceilometer import publisher +from ceilometer.publisher import test as test_publisher +from ceilometer import sample +from ceilometer.tests import base +from ceilometer import utils + + +class TestSample(sample.Sample): + def __init__(self, name, type, unit, volume, user_id, project_id, + resource_id, timestamp, resource_metadata, source=None): + super(TestSample, self).__init__(name, type, unit, volume, user_id, + project_id, resource_id, timestamp, + resource_metadata, source) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +default_test_data = TestSample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'Pollster'}, +) + + +class TestPollster(plugin_base.PollsterBase): + test_data = default_test_data + discovery = None + + @property + def default_discovery(self): + return self.discovery + + def get_samples(self, manager, cache, resources): + resources = resources or [] + self.samples.append((manager, resources)) + self.resources.extend(resources) + c = copy.deepcopy(self.test_data) + c.resource_metadata['resources'] = resources + return [c] + + +class BatchTestPollster(TestPollster): + test_data = default_test_data + discovery = None + + @property + def default_discovery(self): + return self.discovery + + def get_samples(self, manager, cache, resources): + resources = resources or [] + self.samples.append((manager, resources)) + self.resources.extend(resources) + for resource in resources: + c = copy.deepcopy(self.test_data) + c.timestamp = datetime.datetime.utcnow().isoformat() + c.resource_id = resource + c.resource_metadata['resource'] = resource + yield c + + +class TestPollsterException(TestPollster): + def get_samples(self, manager, cache, resources): + resources = resources or [] + self.samples.append((manager, resources)) + self.resources.extend(resources) + raise Exception() + + +class TestDiscovery(plugin_base.DiscoveryBase): + def discover(self, manager, param=None): + self.params.append(param) + return self.resources + + +class TestDiscoveryException(plugin_base.DiscoveryBase): + def discover(self, manager, param=None): + self.params.append(param) + raise Exception() + + +@six.add_metaclass(abc.ABCMeta) +class BaseAgentManagerTestCase(base.BaseTestCase): + + class Pollster(TestPollster): + samples = [] + resources = [] + test_data = default_test_data + + class BatchPollster(BatchTestPollster): + samples = [] + resources = [] + test_data = default_test_data + + class PollsterAnother(TestPollster): + samples = [] + resources = [] + test_data = TestSample( + name='testanother', + type=default_test_data.type, + unit=default_test_data.unit, + volume=default_test_data.volume, + user_id=default_test_data.user_id, + project_id=default_test_data.project_id, + resource_id=default_test_data.resource_id, + timestamp=default_test_data.timestamp, + resource_metadata=default_test_data.resource_metadata) + + class PollsterException(TestPollsterException): + samples = [] + resources = [] + test_data = TestSample( + name='testexception', + type=default_test_data.type, + unit=default_test_data.unit, + volume=default_test_data.volume, + user_id=default_test_data.user_id, + project_id=default_test_data.project_id, + resource_id=default_test_data.resource_id, + timestamp=default_test_data.timestamp, + resource_metadata=default_test_data.resource_metadata) + + class PollsterExceptionAnother(TestPollsterException): + samples = [] + resources = [] + test_data = TestSample( + name='testexceptionanother', + type=default_test_data.type, + unit=default_test_data.unit, + volume=default_test_data.volume, + user_id=default_test_data.user_id, + project_id=default_test_data.project_id, + resource_id=default_test_data.resource_id, + timestamp=default_test_data.timestamp, + resource_metadata=default_test_data.resource_metadata) + + class Discovery(TestDiscovery): + params = [] + resources = [] + + class DiscoveryAnother(TestDiscovery): + params = [] + resources = [] + + @property + def group_id(self): + return 'another_group' + + class DiscoveryException(TestDiscoveryException): + params = [] + + def setup_polling(self): + self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) + + def create_extension_list(self): + return [extension.Extension('test', + None, + None, + self.Pollster(), ), + extension.Extension('testbatch', + None, + None, + self.BatchPollster(), ), + extension.Extension('testanother', + None, + None, + self.PollsterAnother(), ), + extension.Extension('testexception', + None, + None, + self.PollsterException(), ), + extension.Extension('testexceptionanother', + None, + None, + self.PollsterExceptionAnother(), )] + + def create_discovery_manager(self): + return extension.ExtensionManager.make_test_instance( + [ + extension.Extension( + 'testdiscovery', + None, + None, + self.Discovery(), ), + extension.Extension( + 'testdiscoveryanother', + None, + None, + self.DiscoveryAnother(), ), + extension.Extension( + 'testdiscoveryexception', + None, + None, + self.DiscoveryException(), ), + ], + ) + + @abc.abstractmethod + def create_manager(self): + """Return subclass specific manager.""" + + @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) + def setUp(self): + super(BaseAgentManagerTestCase, self).setUp() + self.mgr = self.create_manager() + self.mgr.extensions = self.create_extension_list() + self.mgr.partition_coordinator = mock.MagicMock() + fake_subset = lambda _, x: x + p_coord = self.mgr.partition_coordinator + p_coord.extract_my_subset.side_effect = fake_subset + self.mgr.tg = mock.MagicMock() + self.pipeline_cfg = { + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 60, + 'meters': ['test'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + } + self.setup_polling() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override( + 'pipeline_cfg_file', + self.path_get('etc/ceilometer/pipeline.yaml') + ) + self.useFixture(mockpatch.PatchObject( + publisher, 'get_publisher', side_effect=self.get_publisher)) + + @staticmethod + def get_publisher(url, namespace=''): + fake_drivers = {'test://': test_publisher.TestPublisher, + 'new://': test_publisher.TestPublisher, + 'rpc://': test_publisher.TestPublisher} + return fake_drivers[url](url) + + def tearDown(self): + self.Pollster.samples = [] + self.Pollster.discovery = [] + self.PollsterAnother.samples = [] + self.PollsterAnother.discovery = [] + self.PollsterException.samples = [] + self.PollsterException.discovery = [] + self.PollsterExceptionAnother.samples = [] + self.PollsterExceptionAnother.discovery = [] + self.Pollster.resources = [] + self.PollsterAnother.resources = [] + self.PollsterException.resources = [] + self.PollsterExceptionAnother.resources = [] + self.Discovery.params = [] + self.DiscoveryAnother.params = [] + self.DiscoveryException.params = [] + self.Discovery.resources = [] + self.DiscoveryAnother.resources = [] + super(BaseAgentManagerTestCase, self).tearDown() + + @mock.patch('ceilometer.pipeline.setup_polling') + def test_start(self, setup_polling): + self.mgr.join_partitioning_groups = mock.MagicMock() + self.mgr.setup_polling_tasks = mock.MagicMock() + self.CONF.set_override('heartbeat', 1.0, group='coordination') + self.mgr.start() + setup_polling.assert_called_once_with() + self.mgr.partition_coordinator.start.assert_called_once_with() + self.mgr.join_partitioning_groups.assert_called_once_with() + self.mgr.setup_polling_tasks.assert_called_once_with() + timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) + self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) + self.mgr.stop() + self.mgr.partition_coordinator.stop.assert_called_once_with() + + @mock.patch('ceilometer.pipeline.setup_polling') + def test_start_with_pipeline_poller(self, setup_polling): + self.mgr.join_partitioning_groups = mock.MagicMock() + self.mgr.setup_polling_tasks = mock.MagicMock() + + self.CONF.set_override('heartbeat', 1.0, group='coordination') + self.CONF.set_override('refresh_pipeline_cfg', True) + self.CONF.set_override('pipeline_polling_interval', 5) + self.mgr.start() + setup_polling.assert_called_once_with() + self.mgr.partition_coordinator.start.assert_called_once_with() + self.mgr.join_partitioning_groups.assert_called_once_with() + self.mgr.setup_polling_tasks.assert_called_once_with() + timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) + pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) + self.assertEqual([timer_call, pipeline_poller_call], + self.mgr.tg.add_timer.call_args_list) + + def test_join_partitioning_groups(self): + self.mgr.discovery_manager = self.create_discovery_manager() + self.mgr.join_partitioning_groups() + p_coord = self.mgr.partition_coordinator + static_group_ids = [utils.hash_of_set(p['resources']) + for p in self.pipeline_cfg['sources'] + if p['resources']] + expected = [mock.call(self.mgr.construct_group_id(g)) + for g in ['another_group', 'global'] + static_group_ids] + self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) + for c in expected: + self.assertIn(c, p_coord.join_group.call_args_list) + + def test_setup_polling_tasks(self): + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + per_task_resources = polling_tasks[60].resources + self.assertEqual(1, len(per_task_resources)) + self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), + set(per_task_resources['test_pipeline-test'].get({}))) + + def test_setup_polling_tasks_multiple_interval(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline_1', + 'interval': 10, + 'meters': ['test'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink'] + }) + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(2, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + self.assertTrue(10 in polling_tasks.keys()) + + def test_setup_polling_tasks_mismatch_counter(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline_1', + 'interval': 10, + 'meters': ['test_invalid'], + 'resources': ['invalid://'], + 'sinks': ['test_sink'] + }) + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + self.assertFalse(10 in polling_tasks.keys()) + + def test_setup_polling_task_same_interval(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline_1', + 'interval': 60, + 'meters': ['testanother'], + 'resources': ['testanother://'] if self.source_resources else [], + 'sinks': ['test_sink'] + }) + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + pollsters = polling_tasks.get(60).pollster_matches + self.assertEqual(2, len(pollsters)) + per_task_resources = polling_tasks[60].resources + self.assertEqual(2, len(per_task_resources)) + key = 'test_pipeline-test' + self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), + set(per_task_resources[key].get({}))) + key = 'test_pipeline_1-testanother' + self.assertEqual(set(self.pipeline_cfg['sources'][1]['resources']), + set(per_task_resources[key].get({}))) + + def test_agent_manager_start(self): + mgr = self.create_manager() + mgr.extensions = self.mgr.extensions + mgr.create_polling_task = mock.MagicMock() + mgr.tg = mock.MagicMock() + mgr.start() + self.assertTrue(mgr.tg.add_timer.called) + + def test_manager_exception_persistency(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline_1', + 'interval': 60, + 'meters': ['testanother'], + 'sinks': ['test_sink'] + }) + self.setup_polling() + + def _verify_discovery_params(self, expected): + self.assertEqual(expected, self.Discovery.params) + self.assertEqual(expected, self.DiscoveryAnother.params) + self.assertEqual(expected, self.DiscoveryException.params) + + def _do_test_per_pollster_discovery(self, discovered_resources, + static_resources): + self.Pollster.discovery = 'testdiscovery' + self.mgr.discovery_manager = self.create_discovery_manager() + self.Discovery.resources = discovered_resources + self.DiscoveryAnother.resources = [d[::-1] + for d in discovered_resources] + if static_resources: + # just so we can test that static + pre_pipeline amalgamated + # override per_pollster + self.pipeline_cfg['sources'][0]['discovery'] = [ + 'testdiscoveryanother', + 'testdiscoverynonexistent', + 'testdiscoveryexception'] + self.pipeline_cfg['sources'][0]['resources'] = static_resources + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(polling_tasks.get(60)) + if static_resources: + self.assertEqual(set(static_resources + + self.DiscoveryAnother.resources), + set(self.Pollster.resources)) + else: + self.assertEqual(set(self.Discovery.resources), + set(self.Pollster.resources)) + + # Make sure no duplicated resource from discovery + for x in self.Pollster.resources: + self.assertEqual(1, self.Pollster.resources.count(x)) + + def test_per_pollster_discovery(self): + self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], + []) + + def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): + # ensure static+per_source_discovery overrides per_pollster_discovery + self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], + ['static_1', 'static_2']) + + def test_per_pollster_discovery_duplicated(self): + self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], + []) + + def test_per_pollster_discovery_overridden_by_duplicated_static(self): + self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], + ['static_1', 'dup', 'dup']) + + def test_per_pollster_discovery_caching(self): + # ensure single discovery associated with multiple pollsters + # only called once per polling cycle + discovered_resources = ['discovered_1', 'discovered_2'] + self.Pollster.discovery = 'testdiscovery' + self.PollsterAnother.discovery = 'testdiscovery' + self.mgr.discovery_manager = self.create_discovery_manager() + self.Discovery.resources = discovered_resources + self.pipeline_cfg['sources'][0]['meters'].append('testanother') + self.pipeline_cfg['sources'][0]['resources'] = [] + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(polling_tasks.get(60)) + self.assertEqual(1, len(self.Discovery.params)) + self.assertEqual(discovered_resources, self.Pollster.resources) + self.assertEqual(discovered_resources, self.PollsterAnother.resources) + + def _do_test_per_pipeline_discovery(self, + discovered_resources, + static_resources): + self.mgr.discovery_manager = self.create_discovery_manager() + self.Discovery.resources = discovered_resources + self.DiscoveryAnother.resources = [d[::-1] + for d in discovered_resources] + self.pipeline_cfg['sources'][0]['discovery'] = [ + 'testdiscovery', 'testdiscoveryanother', + 'testdiscoverynonexistent', 'testdiscoveryexception'] + self.pipeline_cfg['sources'][0]['resources'] = static_resources + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(polling_tasks.get(60)) + discovery = self.Discovery.resources + self.DiscoveryAnother.resources + # compare resource lists modulo ordering + self.assertEqual(set(static_resources + discovery), + set(self.Pollster.resources)) + + # Make sure no duplicated resource from discovery + for x in self.Pollster.resources: + self.assertEqual(1, self.Pollster.resources.count(x)) + + def test_per_pipeline_discovery_discovered_only(self): + self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], + []) + + def test_per_pipeline_discovery_static_only(self): + self._do_test_per_pipeline_discovery([], + ['static_1', 'static_2']) + + def test_per_pipeline_discovery_discovered_augmented_by_static(self): + self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], + ['static_1', 'static_2']) + + def test_per_pipeline_discovery_discovered_duplicated_static(self): + self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], + ['dup', 'static_1', 'dup']) + + def test_multiple_pipelines_different_static_resources(self): + # assert that the individual lists of static and discovered resources + # for each pipeline with a common interval are passed to individual + # pollsters matching each pipeline + self.pipeline_cfg['sources'][0]['resources'] = ['test://'] + self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] + self.pipeline_cfg['sources'].append({ + 'name': 'another_pipeline', + 'interval': 60, + 'meters': ['test'], + 'resources': ['another://'], + 'discovery': ['testdiscoveryanother'], + 'sinks': ['test_sink_new'] + }) + self.mgr.discovery_manager = self.create_discovery_manager() + self.Discovery.resources = ['discovered_1', 'discovered_2'] + self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + self.mgr.interval_task(polling_tasks.get(60)) + self.assertEqual([None], self.Discovery.params) + self.assertEqual([None], self.DiscoveryAnother.params) + self.assertEqual(2, len(self.Pollster.samples)) + samples = self.Pollster.samples + test_resources = ['test://', 'discovered_1', 'discovered_2'] + another_resources = ['another://', 'discovered_3', 'discovered_4'] + if samples[0][1] == test_resources: + self.assertEqual(another_resources, samples[1][1]) + elif samples[0][1] == another_resources: + self.assertEqual(test_resources, samples[1][1]) + else: + self.fail('unexpected sample resources %s' % samples) + + def test_multiple_sources_different_discoverers(self): + self.Discovery.resources = ['discovered_1', 'discovered_2'] + self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] + sources = [{'name': 'test_source_1', + 'interval': 60, + 'meters': ['test'], + 'discovery': ['testdiscovery'], + 'sinks': ['test_sink_1']}, + {'name': 'test_source_2', + 'interval': 60, + 'meters': ['testanother'], + 'discovery': ['testdiscoveryanother'], + 'sinks': ['test_sink_2']}] + sinks = [{'name': 'test_sink_1', + 'transformers': [], + 'publishers': ['test://']}, + {'name': 'test_sink_2', + 'transformers': [], + 'publishers': ['test://']}] + self.pipeline_cfg = {'sources': sources, 'sinks': sinks} + self.mgr.discovery_manager = self.create_discovery_manager() + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + self.mgr.interval_task(polling_tasks.get(60)) + self.assertEqual(1, len(self.Pollster.samples)) + self.assertEqual(['discovered_1', 'discovered_2'], + self.Pollster.resources) + self.assertEqual(1, len(self.PollsterAnother.samples)) + self.assertEqual(['discovered_3', 'discovered_4'], + self.PollsterAnother.resources) + + def test_multiple_sinks_same_discoverer(self): + self.Discovery.resources = ['discovered_1', 'discovered_2'] + sources = [{'name': 'test_source_1', + 'interval': 60, + 'meters': ['test'], + 'discovery': ['testdiscovery'], + 'sinks': ['test_sink_1', 'test_sink_2']}] + sinks = [{'name': 'test_sink_1', + 'transformers': [], + 'publishers': ['test://']}, + {'name': 'test_sink_2', + 'transformers': [], + 'publishers': ['test://']}] + self.pipeline_cfg = {'sources': sources, 'sinks': sinks} + self.mgr.discovery_manager = self.create_discovery_manager() + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.assertEqual(1, len(polling_tasks)) + self.assertTrue(60 in polling_tasks.keys()) + self.mgr.interval_task(polling_tasks.get(60)) + self.assertEqual(1, len(self.Pollster.samples)) + self.assertEqual(['discovered_1', 'discovered_2'], + self.Pollster.resources) + + def test_discovery_partitioning(self): + self.mgr.discovery_manager = self.create_discovery_manager() + p_coord = self.mgr.partition_coordinator + self.pipeline_cfg['sources'][0]['discovery'] = [ + 'testdiscovery', 'testdiscoveryanother', + 'testdiscoverynonexistent', 'testdiscoveryexception'] + self.pipeline_cfg['sources'][0]['resources'] = [] + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(polling_tasks.get(60)) + expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), + d.obj.resources) + for d in self.mgr.discovery_manager + if hasattr(d.obj, 'resources')] + self.assertEqual(len(expected), + len(p_coord.extract_my_subset.call_args_list)) + for c in expected: + self.assertIn(c, p_coord.extract_my_subset.call_args_list) + + def test_static_resources_partitioning(self): + p_coord = self.mgr.partition_coordinator + static_resources = ['static_1', 'static_2'] + static_resources2 = ['static_3', 'static_4'] + self.pipeline_cfg['sources'][0]['resources'] = static_resources + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline2', + 'interval': 60, + 'meters': ['test', 'test2'], + 'resources': static_resources2, + 'sinks': ['test_sink'] + }) + # have one pipeline without static resources defined + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline3', + 'interval': 60, + 'meters': ['test', 'test2'], + 'resources': [], + 'sinks': ['test_sink'] + }) + self.setup_polling() + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(polling_tasks.get(60)) + # Only two groups need to be created, one for each pipeline, + # even though counter test is used twice + expected = [mock.call(self.mgr.construct_group_id( + utils.hash_of_set(resources)), + resources) + for resources in [static_resources, + static_resources2]] + self.assertEqual(len(expected), + len(p_coord.extract_my_subset.call_args_list)) + for c in expected: + self.assertIn(c, p_coord.extract_my_subset.call_args_list) + + @mock.patch('ceilometer.agent.base.LOG') + def test_polling_and_notify_with_resources(self, LOG): + self.setup_polling() + polling_task = list(self.mgr.setup_polling_tasks().values())[0] + polling_task.poll_and_notify() + LOG.info.assert_called_with( + 'Polling pollster %(poll)s in the context of %(src)s', + {'poll': 'test', 'src': 'test_pipeline'}) + + @mock.patch('ceilometer.agent.base.LOG') + def test_skip_polling_and_notify_with_no_resources(self, LOG): + self.pipeline_cfg['sources'][0]['resources'] = [] + self.setup_polling() + polling_task = list(self.mgr.setup_polling_tasks().values())[0] + pollster = list(polling_task.pollster_matches['test_pipeline'])[0] + polling_task.poll_and_notify() + LOG.info.assert_called_with( + 'Skip pollster %(name)s, no %(p_context)sresources found this ' + 'cycle', {'name': pollster.name, 'p_context': ''}) + + @mock.patch('ceilometer.agent.base.LOG') + def test_skip_polling_polled_resources(self, LOG): + self.pipeline_cfg['sources'].append({ + 'name': 'test_pipeline_1', + 'interval': 60, + 'meters': ['test'], + 'resources': ['test://'], + 'sinks': ['test_sink'] + }) + self.setup_polling() + polling_task = list(self.mgr.setup_polling_tasks().values())[0] + polling_task.poll_and_notify() + LOG.info.assert_called_with( + 'Skip pollster %(name)s, no %(p_context)sresources found this ' + 'cycle', {'name': 'test', 'p_context': 'new '}) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_discovery.py ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_discovery.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_discovery.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_discovery.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,108 @@ +# +# Copyright 2014 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/central/manager.py +""" + +import mock +from oslo_config import fixture as fixture_config +from oslotest import base + +from ceilometer.agent.discovery import endpoint +from ceilometer.agent.discovery import localnode +from ceilometer.hardware import discovery as hardware + + +class TestEndpointDiscovery(base.BaseTestCase): + + def setUp(self): + super(TestEndpointDiscovery, self).setUp() + self.discovery = endpoint.EndpointDiscovery() + self.manager = mock.MagicMock() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override('os_endpoint_type', 'test-endpoint-type', + group='service_credentials') + self.CONF.set_override('os_region_name', 'test-region-name', + group='service_credentials') + + def test_keystone_called(self): + self.discovery.discover(self.manager, param='test-service-type') + expected = [mock.call(service_type='test-service-type', + endpoint_type='test-endpoint-type', + region_name='test-region-name')] + self.assertEqual(expected, + self.manager.keystone.service_catalog.get_urls + .call_args_list) + + def test_keystone_called_no_service_type(self): + self.discovery.discover(self.manager) + expected = [mock.call(service_type=None, + endpoint_type='test-endpoint-type', + region_name='test-region-name')] + self.assertEqual(expected, + self.manager.keystone.service_catalog.get_urls + .call_args_list) + + def test_keystone_called_no_endpoints(self): + self.manager.keystone.service_catalog.get_urls.return_value = [] + self.assertEqual([], self.discovery.discover(self.manager)) + + +class TestLocalnodeDiscovery(base.BaseTestCase): + def setUp(self): + super(TestLocalnodeDiscovery, self).setUp() + self.discovery = localnode.LocalNodeDiscovery() + self.manager = mock.MagicMock() + + def test_lockalnode_discovery(self): + self.assertEqual(['local_host'], self.discovery.discover(self.manager)) + + +class TestHardwareDiscovery(base.BaseTestCase): + class MockInstance(object): + addresses = {'ctlplane': [ + {'addr': '0.0.0.0', + 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} + ]} + id = 'resource_id' + image = {'id': 'image_id'} + flavor = {'id': 'flavor_id'} + + expected = { + 'resource_id': 'resource_id', + 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', + 'mac_addr': '01-23-45-67-89-ab', + 'image_id': 'image_id', + 'flavor_id': 'flavor_id', + } + + def setUp(self): + super(TestHardwareDiscovery, self).setUp() + self.discovery = hardware.NodesDiscoveryTripleO() + self.discovery.nova_cli = mock.MagicMock() + self.manager = mock.MagicMock() + + def test_hardware_discovery(self): + self.discovery.nova_cli.instance_get_all.return_value = [ + self.MockInstance()] + resources = self.discovery.discover(self.manager) + self.assertEqual(1, len(resources)) + self.assertEqual(self.expected, resources[0]) + + def test_hardware_discovery_without_flavor(self): + instance = self.MockInstance() + instance.flavor = {} + self.discovery.nova_cli.instance_get_all.return_value = [instance] + resources = self.discovery.discover(self.manager) + self.assertEqual(0, len(resources)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_manager.py ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_manager.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_manager.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,412 @@ +# +# Copyright 2013 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/central/manager.py +""" + +import shutil + +import eventlet +import mock +from oslo_service import service as os_service +from oslo_utils import fileutils +from oslo_utils import timeutils +from oslotest import base +from oslotest import mockpatch +import six +from stevedore import extension +import yaml + +from ceilometer.agent import base as agent_base +from ceilometer.agent import manager +from ceilometer.agent import plugin_base +from ceilometer import pipeline +from ceilometer.tests.unit.agent import agentbase + + +class PollingException(Exception): + pass + + +@mock.patch('ceilometer.compute.pollsters.' + 'BaseComputePollster.setup_environment', + mock.Mock(return_value=None)) +class TestManager(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) + def test_load_plugins(self): + mgr = manager.AgentManager() + self.assertIsNotNone(list(mgr.extensions)) + + def test_load_plugins_pollster_list(self): + mgr = manager.AgentManager(pollster_list=['disk.*']) + # currently we do have 26 disk-related pollsters + self.assertEqual(26, len(list(mgr.extensions))) + + def test_load_plugins_no_intersection(self): + # Let's test nothing will be polled if namespace and pollsters + # list have no intersection. + mgr = manager.AgentManager(namespaces=['compute'], + pollster_list=['storage.*']) + self.assertEqual(0, len(list(mgr.extensions))) + + # Test plugin load behavior based on Node Manager pollsters. + # pollster_list is just a filter, so sensor pollsters under 'ipmi' + # namespace would be also instanced. Still need mock __init__ for it. + @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', + mock.Mock(return_value=None)) + @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', + mock.Mock(return_value=None)) + def test_load_normal_plugins(self): + mgr = manager.AgentManager(namespaces=['ipmi'], + pollster_list=['hardware.ipmi.node.*']) + # 8 pollsters for Node Manager + self.assertEqual(8, len(mgr.extensions)) + + # Skip loading pollster upon ExtensionLoadError + @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', + mock.Mock(side_effect=plugin_base.ExtensionLoadError)) + @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', + mock.Mock(return_value=None)) + @mock.patch('ceilometer.agent.base.LOG') + def test_load_failed_plugins(self, LOG): + # Here we additionally check that namespaces will be converted to the + # list if param was not set as a list. + mgr = manager.AgentManager(namespaces='ipmi', + pollster_list=['hardware.ipmi.node.*']) + # 0 pollsters + self.assertEqual(0, len(mgr.extensions)) + + err_msg = 'Skip loading extension for hardware.ipmi.node.%s' + pollster_names = [ + 'power', 'temperature', 'outlet_temperature', + 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] + calls = [mock.call(err_msg % n) for n in pollster_names] + LOG.error.assert_has_calls(calls=calls, + any_order=True) + + # Skip loading pollster upon ImportError + @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', + mock.Mock(side_effect=ImportError)) + @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', + mock.Mock(return_value=None)) + def test_import_error_in_plugin(self): + mgr = manager.AgentManager(namespaces=['ipmi'], + pollster_list=['hardware.ipmi.node.*']) + # 0 pollsters + self.assertEqual(0, len(mgr.extensions)) + + # Exceptions other than ExtensionLoadError are propagated + @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', + mock.Mock(side_effect=PollingException)) + @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', + mock.Mock(return_value=None)) + def test_load_exceptional_plugins(self): + self.assertRaises(PollingException, + manager.AgentManager, + ['ipmi'], + ['hardware.ipmi.node.*']) + + def test_load_plugins_pollster_list_forbidden(self): + manager.cfg.CONF.set_override('backend_url', 'http://', + group='coordination') + self.assertRaises(agent_base.PollsterListForbidden, + manager.AgentManager, + pollster_list=['disk.*']) + manager.cfg.CONF.reset() + + +class TestPollsterKeystone(agentbase.TestPollster): + @plugin_base.check_keystone + def get_samples(self, manager, cache, resources): + func = super(TestPollsterKeystone, self).get_samples + return func(manager=manager, + cache=cache, + resources=resources) + + +class TestPollsterPollingException(agentbase.TestPollster): + polling_failures = 0 + + def get_samples(self, manager, cache, resources): + func = super(TestPollsterPollingException, self).get_samples + sample = func(manager=manager, + cache=cache, + resources=resources) + + # Raise polling exception after 2 times + self.polling_failures += 1 + if self.polling_failures > 2: + raise plugin_base.PollsterPermanentError(resources) + + return sample + + +class TestRunTasks(agentbase.BaseAgentManagerTestCase): + + class PollsterKeystone(TestPollsterKeystone): + samples = [] + resources = [] + test_data = agentbase.TestSample( + name='testkeystone', + type=agentbase.default_test_data.type, + unit=agentbase.default_test_data.unit, + volume=agentbase.default_test_data.volume, + user_id=agentbase.default_test_data.user_id, + project_id=agentbase.default_test_data.project_id, + resource_id=agentbase.default_test_data.resource_id, + timestamp=agentbase.default_test_data.timestamp, + resource_metadata=agentbase.default_test_data.resource_metadata) + + class PollsterPollingException(TestPollsterPollingException): + samples = [] + resources = [] + test_data = agentbase.TestSample( + name='testpollingexception', + type=agentbase.default_test_data.type, + unit=agentbase.default_test_data.unit, + volume=agentbase.default_test_data.volume, + user_id=agentbase.default_test_data.user_id, + project_id=agentbase.default_test_data.project_id, + resource_id=agentbase.default_test_data.resource_id, + timestamp=agentbase.default_test_data.timestamp, + resource_metadata=agentbase.default_test_data.resource_metadata) + + @staticmethod + @mock.patch('ceilometer.compute.pollsters.' + 'BaseComputePollster.setup_environment', + mock.Mock(return_value=None)) + def create_manager(): + return manager.AgentManager() + + @staticmethod + def setup_pipeline_file(pipeline): + if six.PY3: + pipeline = pipeline.encode('utf-8') + + pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, + prefix="pipeline", + suffix="yaml") + return pipeline_cfg_file + + def fake_notifier_sample(self, ctxt, event_type, payload): + for m in payload['samples']: + del m['message_signature'] + self.notified_samples.append(m) + + def setUp(self): + self.notified_samples = [] + self.notifier = mock.Mock() + self.notifier.info.side_effect = self.fake_notifier_sample + self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', + return_value=self.notifier)) + self.source_resources = True + super(TestRunTasks, self).setUp() + self.useFixture(mockpatch.Patch( + 'keystoneclient.v2_0.client.Client', + return_value=mock.Mock())) + + def tearDown(self): + self.PollsterKeystone.samples = [] + self.PollsterKeystone.resources = [] + self.PollsterPollingException.samples = [] + self.PollsterPollingException.resources = [] + super(TestRunTasks, self).tearDown() + + def create_extension_list(self): + exts = super(TestRunTasks, self).create_extension_list() + exts.extend([extension.Extension('testkeystone', + None, + None, + self.PollsterKeystone(), ), + extension.Extension('testpollingexception', + None, + None, + self.PollsterPollingException(), )]) + return exts + + def test_get_sample_resources(self): + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(list(polling_tasks.values())[0]) + self.assertTrue(self.Pollster.resources) + + def test_when_keystone_fail(self): + """Test for bug 1316532.""" + self.useFixture(mockpatch.Patch( + 'keystoneclient.v2_0.client.Client', + side_effect=Exception)) + self.pipeline_cfg = { + 'sources': [{ + 'name': "test_keystone", + 'interval': 10, + 'meters': ['testkeystone'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + } + self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) + polling_tasks = self.mgr.setup_polling_tasks() + self.mgr.interval_task(list(polling_tasks.values())[0]) + self.assertFalse(self.PollsterKeystone.samples) + self.assertFalse(self.notified_samples) + + @mock.patch('ceilometer.agent.base.LOG') + def test_polling_exception(self, LOG): + source_name = 'test_pollingexception' + self.pipeline_cfg = { + 'sources': [{ + 'name': source_name, + 'interval': 10, + 'meters': ['testpollingexception'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + } + self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) + polling_task = list(self.mgr.setup_polling_tasks().values())[0] + pollster = list(polling_task.pollster_matches[source_name])[0] + + # 2 samples after 4 pollings, as pollster got disabled upon exception + for x in range(0, 4): + self.mgr.interval_task(polling_task) + samples = self.notified_samples + self.assertEqual(2, len(samples)) + LOG.error.assert_called_once_with(( + 'Prevent pollster %(name)s for ' + 'polling source %(source)s anymore!') + % ({'name': pollster.name, 'source': source_name})) + + def test_batching_polled_samples_false(self): + self.CONF.set_override('batch_polled_samples', False) + self._batching_samples(4, 4) + + def test_batching_polled_samples_true(self): + self.CONF.set_override('batch_polled_samples', True) + self._batching_samples(4, 1) + + def test_batching_polled_samples_default(self): + self._batching_samples(4, 1) + + def _batching_samples(self, expected_samples, call_count): + pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 1, + 'meters': ['testbatch'], + 'resources': ['alpha', 'beta', 'gamma', 'delta'], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + }) + + pipeline_cfg_file = self.setup_pipeline_file(pipeline) + + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + + self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) + self.mgr.start() + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if len(self.notified_samples) >= expected_samples: + break + eventlet.sleep(0) + + samples = self.notified_samples + self.assertEqual(expected_samples, len(samples)) + self.assertEqual(call_count, self.notifier.info.call_count) + + def test_start_with_reloadable_pipeline(self): + + self.CONF.set_override('heartbeat', 1.0, group='coordination') + self.CONF.set_override('refresh_pipeline_cfg', True) + self.CONF.set_override('pipeline_polling_interval', 2) + + pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 1, + 'meters': ['test'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + }) + + pipeline_cfg_file = self.setup_pipeline_file(pipeline) + + self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) + self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) + self.mgr.start() + expected_samples = 1 + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if len(self.notified_samples) >= expected_samples: + break + eventlet.sleep(0) + + # we only got the old name of meters + for sample in self.notified_samples: + self.assertEqual('test', sample['counter_name']) + self.assertEqual(1, sample['counter_volume']) + self.assertEqual('test_run_tasks', sample['resource_id']) + + # Modify the collection targets + pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_pipeline', + 'interval': 1, + 'meters': ['testanother'], + 'resources': ['test://'] if self.source_resources else [], + 'sinks': ['test_sink']}], + 'sinks': [{ + 'name': 'test_sink', + 'transformers': [], + 'publishers': ["test"]}] + }) + + updated_pipeline_cfg_file = self.setup_pipeline_file(pipeline) + + # Move/re-name the updated pipeline file to the original pipeline + # file path as recorded in oslo config + shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) + + # Random sleep to let the pipeline poller complete the reloading + eventlet.sleep(3) + + # Flush notified samples to test only new, nothing latent on + # fake message bus. + self.notified_samples = [] + + expected_samples = 1 + start = timeutils.utcnow() + while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: + if len(self.notified_samples) >= expected_samples: + break + eventlet.sleep(0) + + # we only got the new name of meters + for sample in self.notified_samples: + self.assertEqual('testanother', sample['counter_name']) + self.assertEqual(1, sample['counter_volume']) + self.assertEqual('test_run_tasks', sample['resource_id']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_plugin.py ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_plugin.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/agent/test_plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/agent/test_plugin.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,58 @@ +# +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import fixture as fixture_config +from oslotest import base + +from ceilometer.agent import plugin_base + + +class NotificationBaseTestCase(base.BaseTestCase): + def setUp(self): + super(NotificationBaseTestCase, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + class FakePlugin(plugin_base.NotificationBase): + event_types = ['compute.*'] + + def process_notification(self, message): + pass + + def get_targets(self, conf): + pass + + def test_plugin_info(self): + plugin = self.FakePlugin(mock.Mock()) + plugin.to_samples_and_publish = mock.Mock() + ctxt = {'user_id': 'fake_user_id', 'project_id': 'fake_project_id'} + publisher_id = 'fake.publisher_id' + event_type = 'fake.event' + payload = {'foo': 'bar'} + metadata = {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', + 'timestamp': '2015-06-1909:19:35.786893'} + plugin.info(ctxt, publisher_id, event_type, payload, metadata) + notification = { + 'priority': 'info', + 'event_type': 'fake.event', + 'timestamp': '2015-06-1909:19:35.786893', + '_context_user_id': 'fake_user_id', + '_context_project_id': 'fake_project_id', + 'publisher_id': 'fake.publisher_id', + 'payload': {'foo': 'bar'}, + 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' + } + plugin.to_samples_and_publish.assert_called_with(mock.ANY, + notification) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,43 @@ +# +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Base class for tests in ceilometer/alarm/evaluator/ +""" +import mock +from oslotest import base + + +class TestEvaluatorBase(base.BaseTestCase): + def setUp(self): + super(TestEvaluatorBase, self).setUp() + self.api_client = mock.Mock() + self.notifier = mock.MagicMock() + self.evaluator = self.EVALUATOR(self.notifier) + self.prepare_alarms() + + @staticmethod + def prepare_alarms(self): + self.alarms = [] + + def _evaluate_all_alarms(self): + for alarm in self.alarms: + self.evaluator.evaluate(alarm) + + def _set_all_alarms(self, state): + for alarm in self.alarms: + alarm.state = state + + def _assert_all_alarms(self, state): + for alarm in self.alarms: + self.assertEqual(state, alarm.state) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,156 @@ +# +# Copyright 2013 IBM Corp +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""class for tests in ceilometer/alarm/evaluator/__init__.py +""" +import datetime + +import mock +from oslo_utils import timeutils +from oslotest import base + +from ceilometer.alarm import evaluator + + +class TestEvaluatorBaseClass(base.BaseTestCase): + def setUp(self): + super(TestEvaluatorBaseClass, self).setUp() + self.called = False + + def _notify(self, alarm, previous, reason, details): + self.called = True + raise Exception('Boom!') + + def test_base_refresh(self): + notifier = mock.MagicMock() + notifier.notify = self._notify + + class EvaluatorSub(evaluator.Evaluator): + def evaluate(self, alarm): + pass + + ev = EvaluatorSub(notifier) + ev.api_client = mock.MagicMock() + ev._refresh(mock.MagicMock(), mock.MagicMock(), + mock.MagicMock(), mock.MagicMock()) + self.assertTrue(self.called) + + @mock.patch.object(timeutils, 'utcnow') + def test_base_time_constraints(self, mock_utcnow): + alarm = mock.MagicMock() + alarm.time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': ''}, + {'name': 'test2', + 'description': 'test', + 'start': '0 23 * * *', # daily at 23:00 + 'duration': 10800, # 3 hours + 'timezone': ''}, + ] + cls = evaluator.Evaluator + mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + + mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + + mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + + @mock.patch.object(timeutils, 'utcnow') + def test_base_time_constraints_by_month(self, mock_utcnow): + alarm = mock.MagicMock() + alarm.time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00 + 'duration': 10800, # 3 hours + 'timezone': ''}, + ] + cls = evaluator.Evaluator + mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + + @mock.patch.object(timeutils, 'utcnow') + def test_base_time_constraints_complex(self, mock_utcnow): + alarm = mock.MagicMock() + alarm.time_constraints = [ + {'name': 'test', + 'description': 'test', + # Every consecutive 2 minutes (from the 3rd to the 57th) past + # every consecutive 2 hours (between 3:00 and 12:59) on every day. + 'start': '3-57/2 3-12/2 * * *', + 'duration': 30, + 'timezone': ''} + ] + cls = evaluator.Evaluator + + # test minutes inside + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + + # test minutes outside + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + + # test hours inside + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0) + self.assertTrue(cls.within_time_constraint(alarm)) + + # test hours outside + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0) + self.assertFalse(cls.within_time_constraint(alarm)) + + @mock.patch.object(timeutils, 'utcnow') + def test_base_time_constraints_timezone(self, mock_utcnow): + alarm = mock.MagicMock() + cls = evaluator.Evaluator + mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0) + + alarm.time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.assertTrue(cls.within_time_constraint(alarm)) + + alarm.time_constraints = [ + {'name': 'test2', + 'description': 'test2', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'US/Eastern'} + ] + self.assertFalse(cls.within_time_constraint(alarm)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_combination.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_combination.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_combination.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_combination.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,408 @@ +# +# Copyright 2013 eNovance +# +# Authors: Mehdi Abaakouk +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for ceilometer/alarm/evaluator/combination.py +""" + +import datetime +import uuid + +from ceilometerclient import exc +from ceilometerclient.v2 import alarms +import mock +from oslo_utils import timeutils +import pytz + +from ceilometer.alarm.evaluator import combination +from ceilometer.alarm.storage import models +from ceilometer.tests import constants +from ceilometer.tests.unit.alarm.evaluator import base + + +class TestEvaluate(base.TestEvaluatorBase): + EVALUATOR = combination.CombinationEvaluator + + def prepare_alarms(self): + self.alarms = [ + models.Alarm(name='or-alarm', + description='the or alarm', + type='combination', + enabled=True, + user_id='foobar', + project_id='snafu', + alarm_id=str(uuid.uuid4()), + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict( + alarm_ids=[ + '9cfc3e51-2ff1-4b1d-ac01-c1bd4c6d0d1e', + '1d441595-d069-4e05-95ab-8693ba6a8302'], + operator='or', + ), + severity='critical'), + models.Alarm(name='and-alarm', + description='the and alarm', + type='combination', + enabled=True, + user_id='foobar', + project_id='snafu', + alarm_id=str(uuid.uuid4()), + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict( + alarm_ids=[ + 'b82734f4-9d06-48f3-8a86-fa59a0c99dc8', + '15a700e5-2fe8-4b3d-8c55-9e92831f6a2b'], + operator='and', + ), + severity='critical') + ] + + @staticmethod + def _get_alarm(state): + return alarms.Alarm(None, {'state': state}) + + @staticmethod + def _reason_data(alarm_ids): + return {'type': 'combination', 'alarm_ids': alarm_ids} + + def _combination_transition_reason(self, state, alarm_ids1, alarm_ids2): + return ([('Transition to %(state)s due to alarms %(alarm_ids)s' + ' in state %(state)s') + % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, + ('Transition to %(state)s due to alarms %(alarm_ids)s' + ' in state %(state)s') + % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], + [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) + + def _combination_remaining_reason(self, state, alarm_ids1, alarm_ids2): + return ([('Remaining as %(state)s due to alarms %(alarm_ids)s' + ' in state %(state)s') + % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, + ('Remaining as %(state)s due to alarms %(alarm_ids)s' + ' in state %(state)s') + % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], + [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) + + def test_retry_transient_api_failure(self): + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + broken = exc.CommunicationError(message='broken') + self.api_client.alarms.get.side_effect = [ + broken, + broken, + broken, + broken, + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + + def test_simple_insufficient(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + broken = exc.CommunicationError(message='broken') + self.api_client.alarms.get.side_effect = broken + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + expected = [mock.call(alarm.alarm_id, state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + expected = [mock.call( + alarm, + 'ok', + ('Alarms %s are in unknown state' % + (",".join(alarm.rule['alarm_ids']))), + self._reason_data(alarm.rule['alarm_ids'])) + for alarm in self.alarms] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_ok_with_all_ok(self): + self._set_all_alarms('insufficient data') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons, reason_datas = self._combination_transition_reason( + 'ok', + self.alarms[0].rule['alarm_ids'], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'insufficient data', + reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_ok_with_one_alarm(self): + self._set_all_alarms('alarm') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('alarm'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons, reason_datas = self._combination_transition_reason( + 'ok', + self.alarms[0].rule['alarm_ids'], + [self.alarms[1].rule['alarm_ids'][1]]) + expected = [mock.call(alarm, 'alarm', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_alarm_with_all_alarm(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('alarm'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons, reason_datas = self._combination_transition_reason( + 'alarm', + self.alarms[0].rule['alarm_ids'], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_alarm_with_one_insufficient_data(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('insufficient data'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons, reason_datas = self._combination_transition_reason( + 'alarm', + [self.alarms[0].rule['alarm_ids'][1]], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_alarm_with_one_ok(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + self._get_alarm('alarm'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons, reason_datas = self._combination_transition_reason( + 'alarm', + [self.alarms[0].rule['alarm_ids'][1]], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_to_unknown(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + broken = exc.CommunicationError(message='broken') + self.api_client.alarms.get.side_effect = [ + broken, + self._get_alarm('ok'), + self._get_alarm('insufficient data'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Alarms %s are in unknown state' + % self.alarms[0].rule['alarm_ids'][0], + 'Alarms %s are in unknown state' + % self.alarms[1].rule['alarm_ids'][0]] + reason_datas = [ + self._reason_data([self.alarms[0].rule['alarm_ids'][0]]), + self._reason_data([self.alarms[1].rule['alarm_ids'][0]])] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_no_state_change(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual([], update_calls) + self.assertEqual([], self.notifier.notify.call_args_list) + + def test_no_state_change_and_repeat_actions(self): + self.alarms[0].repeat_actions = True + self.alarms[1].repeat_actions = True + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual([], update_calls) + reasons, reason_datas = self._combination_remaining_reason( + 'ok', + self.alarms[0].rule['alarm_ids'], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + + self.assertEqual(expected, self.notifier.notify.call_args_list) + + @mock.patch.object(timeutils, 'utcnow') + def test_state_change_inside_time_constraint(self, mock_utcnow): + self._set_all_alarms('insufficient data') + self.alarms[0].time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.alarms[1].time_constraints = self.alarms[0].time_constraints + dt = datetime.datetime(2014, 1, 1, 12, 0, 0, + tzinfo=pytz.timezone('Europe/Ljubljana')) + mock_utcnow.return_value = dt.astimezone(pytz.UTC) + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls, + "Alarm should change state if the current " + "time is inside its time constraint.") + reasons, reason_datas = self._combination_transition_reason( + 'ok', + self.alarms[0].rule['alarm_ids'], + self.alarms[1].rule['alarm_ids']) + expected = [mock.call(alarm, 'insufficient data', + reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + @mock.patch.object(timeutils, 'utcnow') + def test_no_state_change_outside_time_constraint(self, mock_utcnow): + self._set_all_alarms('insufficient data') + self.alarms[0].time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.alarms[1].time_constraints = self.alarms[0].time_constraints + dt = datetime.datetime(2014, 1, 1, 15, 0, 0, + tzinfo=pytz.timezone('Europe/Ljubljana')) + mock_utcnow.return_value = dt.astimezone(pytz.UTC) + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.alarms.get.side_effect = [ + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + self._get_alarm('ok'), + ] + self._evaluate_all_alarms() + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual([], update_calls, + "Alarm should not change state if the current " + " time is outside its time constraint.") + self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_gnocchi.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_gnocchi.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_gnocchi.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,438 @@ +# +# Copyright 2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import unittest +import uuid + +import mock +from oslo_serialization import jsonutils +from oslo_utils import timeutils +from oslotest import mockpatch +import pytz +import six +from six import moves + +from ceilometer.alarm.evaluator import gnocchi +from ceilometer.alarm.storage import models +from ceilometer.tests import constants +from ceilometer.tests.unit.alarm.evaluator import base + + +class FakeResponse(object): + def __init__(self, code, data): + if code == 200: + self.values = [d[2] for d in data] + else: + self.values = [] + self.text = jsonutils.dumps(data) + self.status_code = code + + +class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase): + EVALUATOR = gnocchi.GnocchiThresholdEvaluator + + def setUp(self): + ks_client = mock.Mock(auth_token='fake_token') + ks_client.users.find.return_value = 'gnocchi' + self.useFixture(mockpatch.Patch( + 'keystoneclient.v2_0.client.Client', + return_value=ks_client)) + + super(TestGnocchiThresholdEvaluate, self).setUp() + + self.useFixture(mockpatch.Patch('ceilometerclient.client.get_client', + return_value=self.api_client)) + self.requests = self.useFixture(mockpatch.Patch( + 'ceilometer.alarm.evaluator.gnocchi.requests')).mock + + def prepare_alarms(self): + self.alarms = [ + models.Alarm(name='instance_running_hot', + description='instance_running_hot', + type='gnocchi_resources_threshold', + enabled=True, + user_id='foobar', + project_id='snafu', + alarm_id=str(uuid.uuid4()), + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict( + comparison_operator='gt', + threshold=80.0, + evaluation_periods=5, + aggregation_method='mean', + granularity=60, + metric='cpu_util', + resource_type='instance', + resource_id='my_instance') + ), + models.Alarm(name='group_running_idle', + description='group_running_idle', + type='gnocchi_aggregation_by_metrics_threshold', + enabled=True, + user_id='foobar', + project_id='snafu', + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + alarm_id=str(uuid.uuid4()), + time_constraints=[], + rule=dict( + comparison_operator='le', + threshold=10.0, + evaluation_periods=4, + aggregation_method='max', + granularity=300, + metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', + '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), + ), + models.Alarm(name='instance_not_running', + description='instance_running_hot', + type='gnocchi_aggregation_by_resources_threshold', + enabled=True, + user_id='foobar', + project_id='snafu', + alarm_id=str(uuid.uuid4()), + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict( + comparison_operator='gt', + threshold=80.0, + evaluation_periods=6, + aggregation_method='mean', + granularity=50, + metric='cpu_util', + resource_type='instance', + query='{"=": {"server_group": ' + '"my_autoscaling_group"}}') + ), + + ] + + @staticmethod + def _get_stats(granularity, values): + now = timeutils.utcnow_ts() + return FakeResponse( + 200, [[six.text_type(now - len(values) * granularity), + granularity, value] for value in values]) + + @staticmethod + def _reason_data(disposition, count, most_recent): + return {'type': 'threshold', 'disposition': disposition, + 'count': count, 'most_recent': most_recent} + + def _set_all_rules(self, field, value): + for alarm in self.alarms: + alarm.rule[field] = value + + def test_retry_transient_api_failure(self): + means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v + for v in moves.xrange(5)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v + for v in moves.xrange(1, 4)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v + for v in moves.xrange(6)]) + self.requests.get.side_effect = [Exception('boom'), + FakeResponse(500, "error"), + means, + maxs] + self.requests.post.side_effect = [FakeResponse(500, "error"), avgs2] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + + def test_simple_insufficient(self): + self._set_all_alarms('ok') + self.requests.get.return_value = FakeResponse(200, []) + self.requests.post.return_value = FakeResponse(200, []) + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + expected = [mock.call(alarm.alarm_id, state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + expected = [mock.call( + alarm, + 'ok', + ('%d datapoints are unknown' + % alarm.rule['evaluation_periods']), + self._reason_data('unknown', + alarm.rule['evaluation_periods'], + None)) + for alarm in self.alarms] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + @mock.patch.object(timeutils, 'utcnow') + def test_simple_alarm_trip(self, utcnow): + utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) + self._set_all_alarms('ok') + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(1, 6)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(4)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(1, 7)]) + + self.requests.get.side_effect = [avgs, maxs] + self.requests.post.side_effect = [avgs2] + self._evaluate_all_alarms() + + expected_headers = {'X-Auth-Token': 'fake_token', + 'Content-Type': 'application/json'} + + start_alarm1 = "2015-01-26T12:51:00" + start_alarm2 = "2015-01-26T12:32:00" + start_alarm3 = "2015-01-26T12:51:10" + end = "2015-01-26T12:57:00" + + self.assertEqual([ + mock.call(url='http://localhost:8041/v1/resource/instance/' + 'my_instance/metric/cpu_util/measures', + params={'aggregation': 'mean', + 'start': start_alarm1, 'end': end}, + headers=expected_headers), + mock.call(url='http://localhost:8041/v1/aggregation/metric', + params={'aggregation': 'max', + 'start': start_alarm2, 'end': end, + 'metric[]': [ + '0bb1604d-1193-4c0a-b4b8-74b170e35e83', + '9ddc209f-42f8-41e1-b8f1-8804f59c4053']}, + headers=expected_headers)], + + self.requests.get.mock_calls) + self.assertEqual([ + mock.call(url='http://localhost:8041/v1/aggregation/resource/' + 'instance/metric/cpu_util', + params={'aggregation': 'mean', + 'start': start_alarm3, 'end': end}, + data='{"=": {"server_group": "my_autoscaling_group"}}', + headers=expected_headers), + ], + self.requests.post.mock_calls) + + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs.values[-1], + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs.values[-1], + 'Transition to alarm due to 6 samples outside' + ' threshold, most recent: %s' % avgs2.values[-1], + ] + reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), + self._reason_data('outside', 4, maxs.values[-1]), + self._reason_data('outside', 6, avgs2.values[-1])] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_simple_alarm_clear(self): + self._set_all_alarms('alarm') + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v + for v in moves.xrange(5)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v + for v in moves.xrange(1, 5)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v + for v in moves.xrange(6)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to ok due to 5 samples inside' + ' threshold, most recent: %s' % avgs.values[-1], + 'Transition to ok due to 4 samples inside' + ' threshold, most recent: %s' % maxs.values[-1], + 'Transition to ok due to 6 samples inside' + ' threshold, most recent: %s' % avgs2.values[-1]] + reason_datas = [self._reason_data('inside', 5, avgs.values[-1]), + self._reason_data('inside', 4, maxs.values[-1]), + self._reason_data('inside', 6, avgs2.values[-1])] + expected = [mock.call(alarm, 'alarm', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_equivocal_from_known_state(self): + self._set_all_alarms('ok') + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(5)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(-1, 3)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(6)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + self.assertEqual( + [], + self.api_client.alarms.set_state.call_args_list) + self.assertEqual([], self.notifier.notify.call_args_list) + + def test_equivocal_from_known_state_and_repeat_actions(self): + self._set_all_alarms('ok') + self.alarms[1].repeat_actions = True + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(5)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(-1, 3)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(6)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + self.assertEqual([], self.api_client.alarms.set_state.call_args_list) + reason = ('Remaining as ok due to 4 samples inside' + ' threshold, most recent: 8.0') + reason_datas = self._reason_data('inside', 4, 8.0) + expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_unequivocal_from_known_state_and_repeat_actions(self): + self._set_all_alarms('alarm') + self.alarms[1].repeat_actions = True + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(1, 6)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(4)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(6)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + self.assertEqual([], self.api_client.alarms.set_state.call_args_list) + reason = ('Remaining as alarm due to 4 samples outside' + ' threshold, most recent: 7.0') + reason_datas = self._reason_data('outside', 4, 7.0) + expected = [mock.call(self.alarms[1], 'alarm', + reason, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_state_change_and_repeat_actions(self): + self._set_all_alarms('ok') + self.alarms[0].repeat_actions = True + self.alarms[1].repeat_actions = True + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(1, 6)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(4)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(1, 7)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs.values[-1], + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs.values[-1], + 'Transition to alarm due to 6 samples outside' + ' threshold, most recent: %s' % avgs2.values[-1]] + reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), + self._reason_data('outside', 4, maxs.values[-1]), + self._reason_data('outside', 6, avgs2.values[-1])] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_equivocal_from_unknown(self): + self._set_all_alarms('insufficient data') + avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v + for v in moves.xrange(1, 6)]) + maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v + for v in moves.xrange(4)]) + avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v + for v in moves.xrange(1, 7)]) + self.requests.post.side_effect = [avgs2] + self.requests.get.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs.values[-1], + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs.values[-1], + 'Transition to alarm due to 6 samples outside' + ' threshold, most recent: %s' % avgs2.values[-1]] + reason_datas = [self._reason_data('outside', 5, avgs.values[-1]), + self._reason_data('outside', 4, maxs.values[-1]), + self._reason_data('outside', 6, avgs2.values[-1])] + expected = [mock.call(alarm, 'insufficient data', + reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + @unittest.skipIf(six.PY3, + "the ceilometer base class is not python 3 ready") + @mock.patch.object(timeutils, 'utcnow') + def test_no_state_change_outside_time_constraint(self, mock_utcnow): + self._set_all_alarms('ok') + self.alarms[0].time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.alarms[1].time_constraints = self.alarms[0].time_constraints + self.alarms[2].time_constraints = self.alarms[0].time_constraints + dt = datetime.datetime(2014, 1, 1, 15, 0, 0, + tzinfo=pytz.timezone('Europe/Ljubljana')) + mock_utcnow.return_value = dt.astimezone(pytz.UTC) + self.requests.get.return_value = [] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual([], update_calls, + "Alarm should not change state if the current " + " time is outside its time constraint.") + self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_threshold.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_threshold.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/evaluator/test_threshold.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/evaluator/test_threshold.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,540 @@ +# +# Copyright 2013 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/alarm/evaluator/threshold.py +""" +import datetime +import uuid + +from ceilometerclient import exc +from ceilometerclient.v2 import statistics +import mock +from oslo_config import cfg +from oslo_utils import timeutils +import pytz +from six import moves + +from ceilometer.alarm.evaluator import threshold +from ceilometer.alarm.storage import models +from ceilometer.tests import constants +from ceilometer.tests.unit.alarm.evaluator import base + + +class TestEvaluate(base.TestEvaluatorBase): + EVALUATOR = threshold.ThresholdEvaluator + + def prepare_alarms(self): + self.alarms = [ + models.Alarm(name='instance_running_hot', + description='instance_running_hot', + type='threshold', + enabled=True, + user_id='foobar', + project_id='snafu', + alarm_id=str(uuid.uuid4()), + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + time_constraints=[], + rule=dict( + comparison_operator='gt', + threshold=80.0, + evaluation_periods=5, + statistic='avg', + period=60, + meter_name='cpu_util', + query=[{'field': 'meter', + 'op': 'eq', + 'value': 'cpu_util'}, + {'field': 'resource_id', + 'op': 'eq', + 'value': 'my_instance'}]), + severity='critical' + ), + models.Alarm(name='group_running_idle', + description='group_running_idle', + type='threshold', + enabled=True, + user_id='foobar', + project_id='snafu', + state='insufficient data', + state_timestamp=constants.MIN_DATETIME, + timestamp=constants.MIN_DATETIME, + insufficient_data_actions=[], + ok_actions=[], + alarm_actions=[], + repeat_actions=False, + alarm_id=str(uuid.uuid4()), + time_constraints=[], + rule=dict( + comparison_operator='le', + threshold=10.0, + evaluation_periods=4, + statistic='max', + period=300, + meter_name='cpu_util', + query=[{'field': 'meter', + 'op': 'eq', + 'value': 'cpu_util'}, + {'field': 'metadata.user_metadata.AS', + 'op': 'eq', + 'value': 'my_group'}]), + severity='critical' + ), + ] + + @staticmethod + def _get_stat(attr, value, count=1): + return statistics.Statistics(None, {attr: value, 'count': count}) + + @staticmethod + def _reason_data(disposition, count, most_recent): + return {'type': 'threshold', 'disposition': disposition, + 'count': count, 'most_recent': most_recent} + + def _set_all_rules(self, field, value): + for alarm in self.alarms: + alarm.rule[field] = value + + def test_retry_transient_api_failure(self): + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + broken = exc.CommunicationError(message='broken') + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) + for v in moves.xrange(5)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) + for v in moves.xrange(1, 5)] + self.api_client.statistics.list.side_effect = [broken, + broken, + avgs, + maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + + def test_simple_insufficient(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.statistics.list.return_value = [] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + expected = [mock.call(alarm.alarm_id, state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + expected = [mock.call( + alarm, + 'ok', + ('%d datapoints are unknown' + % alarm.rule['evaluation_periods']), + self._reason_data('unknown', + alarm.rule['evaluation_periods'], + None)) + for alarm in self.alarms] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_less_insufficient_data(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) + for v in moves.xrange(4)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(1, 4)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + expected = [mock.call(alarm.alarm_id, state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(update_calls, expected) + expected = [mock.call( + alarm, + 'ok', + ('%d datapoints are unknown' + % alarm.rule['evaluation_periods']), + self._reason_data('unknown', + alarm.rule['evaluation_periods'], + alarm.rule['threshold'] - 3)) + for alarm in self.alarms] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_simple_alarm_trip(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(1, 6)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(4)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs[-1].avg, + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs[-1].max] + reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), + self._reason_data('outside', 4, maxs[-1].max)] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_simple_alarm_clear(self): + self._set_all_alarms('alarm') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) + for v in moves.xrange(5)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) + for v in moves.xrange(1, 5)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to ok due to 5 samples inside' + ' threshold, most recent: %s' % avgs[-1].avg, + 'Transition to ok due to 4 samples inside' + ' threshold, most recent: %s' % maxs[-1].max] + reason_datas = [self._reason_data('inside', 5, avgs[-1].avg), + self._reason_data('inside', 4, maxs[-1].max)] + expected = [mock.call(alarm, 'alarm', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_equivocal_from_known_state(self): + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(5)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(-1, 3)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + self.assertEqual( + [], + self.api_client.alarms.set_state.call_args_list) + self.assertEqual([], self.notifier.notify.call_args_list) + + def test_equivocal_from_known_state_and_repeat_actions(self): + self._set_all_alarms('ok') + self.alarms[1].repeat_actions = True + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', + self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(5)] + maxs = [self._get_stat('max', + self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(-1, 3)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + self.assertEqual([], + self.api_client.alarms.set_state.call_args_list) + reason = ('Remaining as ok due to 4 samples inside' + ' threshold, most recent: 8.0') + reason_datas = self._reason_data('inside', 4, 8.0) + expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_unequivocal_from_known_state_and_repeat_actions(self): + self._set_all_alarms('alarm') + self.alarms[1].repeat_actions = True + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', + self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(1, 6)] + maxs = [self._get_stat('max', + self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(4)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + self.assertEqual([], + self.api_client.alarms.set_state.call_args_list) + reason = ('Remaining as alarm due to 4 samples outside' + ' threshold, most recent: 7.0') + reason_datas = self._reason_data('outside', 4, 7.0) + expected = [mock.call(self.alarms[1], 'alarm', + reason, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_state_change_and_repeat_actions(self): + self._set_all_alarms('ok') + self.alarms[0].repeat_actions = True + self.alarms[1].repeat_actions = True + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(1, 6)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(4)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs[-1].avg, + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs[-1].max] + reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), + self._reason_data('outside', 4, maxs[-1].max)] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_equivocal_from_unknown(self): + self._set_all_alarms('insufficient data') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) + for v in moves.xrange(1, 6)] + maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) + for v in moves.xrange(4)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm') + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs[-1].avg, + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs[-1].max] + reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), + self._reason_data('outside', 4, maxs[-1].max)] + expected = [mock.call(alarm, 'insufficient data', + reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def _do_test_bound_duration(self, start, exclude_outliers=None): + alarm = self.alarms[0] + if exclude_outliers is not None: + alarm.rule['exclude_outliers'] = exclude_outliers + with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: + mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) + constraint = self.evaluator._bound_duration(alarm, []) + self.assertEqual([ + {'field': 'timestamp', + 'op': 'le', + 'value': timeutils.utcnow().isoformat()}, + {'field': 'timestamp', + 'op': 'ge', + 'value': start}, + ], constraint) + + def test_bound_duration_outlier_exclusion_defaulted(self): + self._do_test_bound_duration('2012-07-02T10:39:00') + + def test_bound_duration_outlier_exclusion_clear(self): + self._do_test_bound_duration('2012-07-02T10:39:00', False) + + def test_bound_duration_outlier_exclusion_set(self): + self._do_test_bound_duration('2012-07-02T10:35:00', True) + + def test_threshold_endpoint_types(self): + endpoint_types = ["internalURL", "publicURL"] + for endpoint_type in endpoint_types: + cfg.CONF.set_override('os_endpoint_type', + endpoint_type, + group='service_credentials') + with mock.patch('ceilometerclient.client.get_client') as client: + self.evaluator.api_client = None + self._evaluate_all_alarms() + conf = cfg.CONF.service_credentials + expected = [mock.call(2, + os_auth_url=conf.os_auth_url, + os_region_name=conf.os_region_name, + os_tenant_name=conf.os_tenant_name, + os_password=conf.os_password, + os_username=conf.os_username, + os_cacert=conf.os_cacert, + os_endpoint_type=conf.os_endpoint_type, + timeout=cfg.CONF.http_timeout, + insecure=conf.insecure)] + actual = client.call_args_list + self.assertEqual(expected, actual) + + def _do_test_simple_alarm_trip_outlier_exclusion(self, exclude_outliers): + self._set_all_rules('exclude_outliers', exclude_outliers) + self._set_all_alarms('ok') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + # most recent datapoints inside threshold but with + # anomalously low sample count + threshold = self.alarms[0].rule['threshold'] + avgs = [self._get_stat('avg', + threshold + (v if v < 10 else -v), + count=20 if v < 10 else 1) + for v in moves.xrange(1, 11)] + threshold = self.alarms[1].rule['threshold'] + maxs = [self._get_stat('max', + threshold - (v if v < 7 else -v), + count=20 if v < 7 else 1) + for v in moves.xrange(8)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('alarm' if exclude_outliers else 'ok') + if exclude_outliers: + expected = [mock.call(alarm.alarm_id, state='alarm') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to alarm due to 5 samples outside' + ' threshold, most recent: %s' % avgs[-2].avg, + 'Transition to alarm due to 4 samples outside' + ' threshold, most recent: %s' % maxs[-2].max] + reason_datas = [self._reason_data('outside', 5, avgs[-2].avg), + self._reason_data('outside', 4, maxs[-2].max)] + expected = [mock.call(alarm, 'ok', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_simple_alarm_trip_with_outlier_exclusion(self): + self. _do_test_simple_alarm_trip_outlier_exclusion(True) + + def test_simple_alarm_no_trip_without_outlier_exclusion(self): + self. _do_test_simple_alarm_trip_outlier_exclusion(False) + + def _do_test_simple_alarm_clear_outlier_exclusion(self, exclude_outliers): + self._set_all_rules('exclude_outliers', exclude_outliers) + self._set_all_alarms('alarm') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + # most recent datapoints outside threshold but with + # anomalously low sample count + threshold = self.alarms[0].rule['threshold'] + avgs = [self._get_stat('avg', + threshold - (v if v < 9 else -v), + count=20 if v < 9 else 1) + for v in moves.xrange(10)] + threshold = self.alarms[1].rule['threshold'] + maxs = [self._get_stat('max', + threshold + (v if v < 8 else -v), + count=20 if v < 8 else 1) + for v in moves.xrange(1, 9)] + self.api_client.statistics.list.side_effect = [avgs, maxs] + self._evaluate_all_alarms() + self._assert_all_alarms('ok' if exclude_outliers else 'alarm') + if exclude_outliers: + expected = [mock.call(alarm.alarm_id, state='ok') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls) + reasons = ['Transition to ok due to 5 samples inside' + ' threshold, most recent: %s' % avgs[-2].avg, + 'Transition to ok due to 4 samples inside' + ' threshold, most recent: %s' % maxs[-2].max] + reason_datas = [self._reason_data('inside', 5, avgs[-2].avg), + self._reason_data('inside', 4, maxs[-2].max)] + expected = [mock.call(alarm, 'alarm', reason, reason_data) + for alarm, reason, reason_data + in zip(self.alarms, reasons, reason_datas)] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + def test_simple_alarm_clear_with_outlier_exclusion(self): + self. _do_test_simple_alarm_clear_outlier_exclusion(True) + + def test_simple_alarm_no_clear_without_outlier_exclusion(self): + self. _do_test_simple_alarm_clear_outlier_exclusion(False) + + @mock.patch.object(timeutils, 'utcnow') + def test_state_change_inside_time_constraint(self, mock_utcnow): + self._set_all_alarms('ok') + self.alarms[0].time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.alarms[1].time_constraints = self.alarms[0].time_constraints + dt = datetime.datetime(2014, 1, 1, 12, 0, 0, + tzinfo=pytz.timezone('Europe/Ljubljana')) + mock_utcnow.return_value = dt.astimezone(pytz.UTC) + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + # the following part based on test_simple_insufficient + self.api_client.statistics.list.return_value = [] + self._evaluate_all_alarms() + self._assert_all_alarms('insufficient data') + expected = [mock.call(alarm.alarm_id, + state='insufficient data') + for alarm in self.alarms] + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual(expected, update_calls, + "Alarm should change state if the current " + "time is inside its time constraint.") + expected = [mock.call( + alarm, + 'ok', + ('%d datapoints are unknown' + % alarm.rule['evaluation_periods']), + self._reason_data('unknown', + alarm.rule['evaluation_periods'], + None)) + for alarm in self.alarms] + self.assertEqual(expected, self.notifier.notify.call_args_list) + + @mock.patch.object(timeutils, 'utcnow') + def test_no_state_change_outside_time_constraint(self, mock_utcnow): + self._set_all_alarms('ok') + self.alarms[0].time_constraints = [ + {'name': 'test', + 'description': 'test', + 'start': '0 11 * * *', # daily at 11:00 + 'duration': 10800, # 3 hours + 'timezone': 'Europe/Ljubljana'} + ] + self.alarms[1].time_constraints = self.alarms[0].time_constraints + dt = datetime.datetime(2014, 1, 1, 15, 0, 0, + tzinfo=pytz.timezone('Europe/Ljubljana')) + mock_utcnow.return_value = dt.astimezone(pytz.UTC) + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.api_client.statistics.list.return_value = [] + self._evaluate_all_alarms() + self._assert_all_alarms('ok') + update_calls = self.api_client.alarms.set_state.call_args_list + self.assertEqual([], update_calls, + "Alarm should not change state if the current " + " time is outside its time constraint.") + self.assertEqual([], self.notifier.notify.call_args_list) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_alarm_svc.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_alarm_svc.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_alarm_svc.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_alarm_svc.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,158 @@ +# +# Copyright 2013 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer.alarm.service.SingletonAlarmService. +""" +import mock +from oslo_config import fixture as fixture_config +from stevedore import extension + +from ceilometer.alarm import service +from ceilometer.tests import base as tests_base + + +class TestAlarmEvaluationService(tests_base.BaseTestCase): + def setUp(self): + super(TestAlarmEvaluationService, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + + self.threshold_eval = mock.Mock() + self.evaluators = extension.ExtensionManager.make_test_instance( + [ + extension.Extension( + 'threshold', + None, + None, + self.threshold_eval), + ] + ) + self.api_client = mock.MagicMock() + self.svc = service.AlarmEvaluationService() + self.svc.tg = mock.Mock() + self.svc.partition_coordinator = mock.MagicMock() + p_coord = self.svc.partition_coordinator + p_coord.extract_my_subset.side_effect = lambda _, x: x + self.svc.evaluators = self.evaluators + self.svc.supported_evaluators = ['threshold'] + + def _do_test_start(self, test_interval=120, + coordination_heartbeat=1.0, + coordination_active=False): + self.CONF.set_override('evaluation_interval', + test_interval, + group='alarm') + self.CONF.set_override('heartbeat', + coordination_heartbeat, + group='coordination') + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + p_coord_mock = self.svc.partition_coordinator + p_coord_mock.is_active.return_value = coordination_active + + self.svc.start() + self.svc.partition_coordinator.start.assert_called_once_with() + self.svc.partition_coordinator.join_group.assert_called_once_with( + self.svc.PARTITIONING_GROUP_NAME) + + initial_delay = test_interval if coordination_active else None + expected = [ + mock.call(test_interval, + self.svc._evaluate_assigned_alarms, + initial_delay=initial_delay), + mock.call(604800, mock.ANY), + ] + if coordination_active: + hb_interval = min(coordination_heartbeat, test_interval / 4) + hb_call = mock.call(hb_interval, + self.svc.partition_coordinator.heartbeat) + expected.insert(1, hb_call) + actual = self.svc.tg.add_timer.call_args_list + self.assertEqual(expected, actual) + + def test_start_singleton(self): + self._do_test_start(coordination_active=False) + + def test_start_coordinated(self): + self._do_test_start(coordination_active=True) + + def test_start_coordinated_high_hb_interval(self): + self._do_test_start(coordination_active=True, test_interval=10, + coordination_heartbeat=5) + + def test_evaluation_cycle(self): + alarm = mock.Mock(type='threshold') + self.api_client.alarms.list.return_value = [alarm] + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + p_coord_mock = self.svc.partition_coordinator + p_coord_mock.extract_my_subset.return_value = [alarm] + + self.svc._evaluate_assigned_alarms() + + p_coord_mock.extract_my_subset.assert_called_once_with( + self.svc.PARTITIONING_GROUP_NAME, [alarm]) + self.threshold_eval.evaluate.assert_called_once_with(alarm) + + def test_evaluation_cycle_with_bad_alarm(self): + alarms = [ + mock.Mock(type='threshold', name='bad'), + mock.Mock(type='threshold', name='good'), + ] + self.api_client.alarms.list.return_value = alarms + self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + p_coord_mock = self.svc.partition_coordinator + p_coord_mock.extract_my_subset.return_value = alarms + + self.svc._evaluate_assigned_alarms() + self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])], + self.threshold_eval.evaluate.call_args_list) + + def test_unknown_extension_skipped(self): + alarms = [ + mock.Mock(type='not_existing_type'), + mock.Mock(type='threshold') + ] + + self.api_client.alarms.list.return_value = alarms + with mock.patch('ceilometerclient.client.get_client', + return_value=self.api_client): + self.svc.start() + self.svc._evaluate_assigned_alarms() + self.threshold_eval.evaluate.assert_called_once_with(alarms[1]) + + def test_singleton_endpoint_types(self): + endpoint_types = ["internalURL", "publicURL"] + for endpoint_type in endpoint_types: + self.CONF.set_override('os_endpoint_type', + endpoint_type, + group='service_credentials') + with mock.patch('ceilometerclient.client.get_client') as client: + self.svc.api_client = None + self.svc._evaluate_assigned_alarms() + conf = self.CONF.service_credentials + expected = [mock.call(2, + os_auth_url=conf.os_auth_url, + os_region_name=conf.os_region_name, + os_tenant_name=conf.os_tenant_name, + os_password=conf.os_password, + os_username=conf.os_username, + os_cacert=conf.os_cacert, + os_endpoint_type=conf.os_endpoint_type, + timeout=self.CONF.http_timeout, + insecure=conf.insecure)] + actual = client.call_args_list + self.assertEqual(expected, actual) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_notifier.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_notifier.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_notifier.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_notifier.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,266 @@ +# +# Copyright 2013-2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import fixture as fixture_config +from oslo_context import context +from oslo_serialization import jsonutils +from oslotest import mockpatch +import requests +import six.moves.urllib.parse as urlparse + +from ceilometer import alarm as ceilometer_alarm +from ceilometer.alarm import service +from ceilometer.tests import base as tests_base + + +DATA_JSON = jsonutils.loads( + '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' + ' "severity": "critical", "reason": "what ?",' + ' "reason_data": {"test": "test"}, "previous": "OK"}' +) +NOTIFICATION = dict(alarm_id='foobar', + alarm_name='testalarm', + severity='critical', + condition=dict(threshold=42), + reason='what ?', + reason_data={'test': 'test'}, + previous='OK', + current='ALARM') + + +class TestAlarmNotifier(tests_base.BaseTestCase): + + def setUp(self): + super(TestAlarmNotifier, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + self.service = service.AlarmNotifierService() + self.useFixture(mockpatch.Patch( + 'oslo_context.context.generate_request_id', + self._fake_generate_request_id)) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_init_host(self): + # If we try to create a real RPC connection, init_host() never + # returns. Mock it out so we can establish the service + # configuration. + with mock.patch.object(self.service.rpc_server, 'start'): + self.service.start() + + def test_notify_alarm(self): + data = { + 'actions': ['test://'], + 'alarm_id': 'foobar', + 'alarm_name': 'testalarm', + 'severity': 'critical', + 'previous': 'OK', + 'current': 'ALARM', + 'reason': 'Everything is on fire', + 'reason_data': {'fire': 'everywhere'} + } + self.service.notify_alarm(context.get_admin_context(), data) + notifications = ceilometer_alarm.NOTIFIERS['test'].obj.notifications + self.assertEqual(1, len(notifications)) + self.assertEqual((urlparse.urlsplit(data['actions'][0]), + data['alarm_id'], + data['alarm_name'], + data['severity'], + data['previous'], + data['current'], + data['reason'], + data['reason_data']), + notifications[0]) + + def test_notify_alarm_no_action(self): + self.service.notify_alarm(context.get_admin_context(), {}) + + def test_notify_alarm_log_action(self): + self.service.notify_alarm(context.get_admin_context(), + { + 'actions': ['log://'], + 'alarm_id': 'foobar', + 'condition': {'threshold': 42}}) + + @staticmethod + def _fake_spawn_n(func, *args, **kwargs): + func(*args, **kwargs) + + @staticmethod + def _notification(action): + notification = {} + notification.update(NOTIFICATION) + notification['actions'] = [action] + return notification + + HTTP_HEADERS = {'x-openstack-request-id': 'fake_request_id', + 'content-type': 'application/json'} + + def _fake_generate_request_id(self): + return self.HTTP_HEADERS['x-openstack-request-id'] + + def test_notify_alarm_rest_action_ok(self): + action = 'http://host/action' + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + def test_notify_alarm_rest_action_with_ssl_client_cert(self): + action = 'https://host/action' + certificate = "/etc/ssl/cert/whatever.pem" + + self.CONF.set_override("rest_notifier_certificate_file", certificate, + group='alarm') + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY, + cert=certificate, verify=True) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self): + action = 'https://host/action' + certificate = "/etc/ssl/cert/whatever.pem" + key = "/etc/ssl/cert/whatever.key" + + self.CONF.set_override("rest_notifier_certificate_file", certificate, + group='alarm') + self.CONF.set_override("rest_notifier_certificate_key", key, + group='alarm') + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY, + cert=(certificate, key), verify=True) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self): + action = 'https://host/action' + + self.CONF.set_override("rest_notifier_ssl_verify", False, + group='alarm') + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY, + verify=False) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + def test_notify_alarm_rest_action_with_ssl_verify_disable(self): + action = 'https://host/action?ceilometer-alarm-ssl-verify=0' + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY, + verify=False) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self): + action = 'https://host/action?ceilometer-alarm-ssl-verify=1' + + self.CONF.set_override("rest_notifier_ssl_verify", False, + group='alarm') + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + poster.assert_called_with(action, data=mock.ANY, + headers=mock.ANY, + verify=True) + args, kwargs = poster.call_args + self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) + + @staticmethod + def _fake_urlsplit(*args, **kwargs): + raise Exception("Evil urlsplit!") + + def test_notify_alarm_invalid_url(self): + with mock.patch('oslo_utils.netutils.urlsplit', + self._fake_urlsplit): + LOG = mock.MagicMock() + with mock.patch('ceilometer.alarm.service.LOG', LOG): + self.service.notify_alarm( + context.get_admin_context(), + { + 'actions': ['no-such-action-i-am-sure'], + 'alarm_id': 'foobar', + 'condition': {'threshold': 42}, + }) + self.assertTrue(LOG.error.called) + + def test_notify_alarm_invalid_action(self): + LOG = mock.MagicMock() + with mock.patch('ceilometer.alarm.service.LOG', LOG): + self.service.notify_alarm( + context.get_admin_context(), + { + 'actions': ['no-such-action-i-am-sure://'], + 'alarm_id': 'foobar', + 'condition': {'threshold': 42}, + }) + self.assertTrue(LOG.error.called) + + def test_notify_alarm_trust_action(self): + action = 'trust+http://trust-1234@host/action' + url = 'http://host/action' + + client = mock.MagicMock() + client.auth_token = 'token_1234' + headers = {'X-Auth-Token': 'token_1234'} + headers.update(self.HTTP_HEADERS) + + self.useFixture(mockpatch.Patch('keystoneclient.v3.client.Client', + lambda **kwargs: client)) + + with mock.patch('eventlet.spawn_n', self._fake_spawn_n): + with mock.patch.object(requests.Session, 'post') as poster: + self.service.notify_alarm(context.get_admin_context(), + self._notification(action)) + headers = {'X-Auth-Token': 'token_1234'} + headers.update(self.HTTP_HEADERS) + poster.assert_called_with( + url, data=mock.ANY, headers=mock.ANY) + args, kwargs = poster.call_args + self.assertEqual(headers, kwargs['headers']) + self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_rpc.py ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_rpc.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/alarm/test_rpc.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/alarm/test_rpc.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,170 @@ +# +# Copyright 2013-2014 eNovance +# +# Authors: Mehdi Abaakouk +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from ceilometerclient.v2 import alarms +from oslo_config import fixture as fixture_config +import six + +from ceilometer.alarm import rpc as rpc_alarm +from ceilometer.alarm.storage import models +from ceilometer import messaging +from ceilometer.tests import base as tests_base + + +class FakeNotifier(object): + def __init__(self, transport): + self.rpc = messaging.get_rpc_server( + transport, "alarm_notifier", self) + self.notified = [] + + def start(self, expected_length): + self.expected_length = expected_length + self.rpc.start() + + def notify_alarm(self, context, data): + self.notified.append(data) + if len(self.notified) == self.expected_length: + self.rpc.stop() + + +class TestRPCAlarmNotifier(tests_base.BaseTestCase): + def setUp(self): + super(TestRPCAlarmNotifier, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + + self.notifier_server = FakeNotifier(self.transport) + self.notifier = rpc_alarm.RPCAlarmNotifier() + self.alarms = [ + alarms.Alarm(None, info={ + 'name': 'instance_running_hot', + 'meter_name': 'cpu_util', + 'comparison_operator': 'gt', + 'threshold': 80.0, + 'evaluation_periods': 5, + 'statistic': 'avg', + 'state': 'ok', + 'ok_actions': ['http://host:8080/path'], + 'user_id': 'foobar', + 'project_id': 'snafu', + 'period': 60, + 'alarm_id': str(uuid.uuid4()), + 'severity': 'critical', + 'matching_metadata':{'resource_id': + 'my_instance'} + }), + alarms.Alarm(None, info={ + 'name': 'group_running_idle', + 'meter_name': 'cpu_util', + 'comparison_operator': 'le', + 'threshold': 10.0, + 'statistic': 'max', + 'evaluation_periods': 4, + 'state': 'insufficient data', + 'insufficient_data_actions': ['http://other_host/path'], + 'user_id': 'foobar', + 'project_id': 'snafu', + 'period': 300, + 'alarm_id': str(uuid.uuid4()), + 'severity': 'critical', + 'matching_metadata':{'metadata.user_metadata.AS': + 'my_group'} + }), + ] + + def test_rpc_target(self): + topic = self.notifier.client.target.topic + self.assertEqual('alarm_notifier', topic) + + def test_notify_alarm(self): + self.notifier_server.start(2) + + previous = ['alarm', 'ok'] + for i, a in enumerate(self.alarms): + self.notifier.notify(a, previous[i], "what? %d" % i, + {'fire': '%d' % i}) + + self.notifier_server.rpc.wait() + + self.assertEqual(2, len(self.notifier_server.notified)) + for i, a in enumerate(self.alarms): + actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state]) + self.assertEqual(self.alarms[i].alarm_id, + self.notifier_server.notified[i]["alarm_id"]) + self.assertEqual(self.alarms[i].name, + self.notifier_server.notified[i]["alarm_name"]) + self.assertEqual(self.alarms[i].severity, + self.notifier_server.notified[i]["severity"]) + self.assertEqual(actions, + self.notifier_server.notified[i]["actions"]) + self.assertEqual(previous[i], + self.notifier_server.notified[i]["previous"]) + self.assertEqual(self.alarms[i].state, + self.notifier_server.notified[i]["current"]) + self.assertEqual("what? %d" % i, + self.notifier_server.notified[i]["reason"]) + self.assertEqual({'fire': '%d' % i}, + self.notifier_server.notified[i]["reason_data"]) + + def test_notify_non_string_reason(self): + self.notifier_server.start(1) + self.notifier.notify(self.alarms[0], 'ok', 42, {}) + self.notifier_server.rpc.wait() + reason = self.notifier_server.notified[0]['reason'] + self.assertIsInstance(reason, six.string_types) + + def test_notify_no_actions(self): + alarm = alarms.Alarm(None, info={ + 'name': 'instance_running_hot', + 'meter_name': 'cpu_util', + 'comparison_operator': 'gt', + 'threshold': 80.0, + 'evaluation_periods': 5, + 'statistic': 'avg', + 'state': 'ok', + 'user_id': 'foobar', + 'project_id': 'snafu', + 'period': 60, + 'ok_actions': [], + 'alarm_id': str(uuid.uuid4()), + 'matching_metadata': {'resource_id': + 'my_instance'} + }) + self.notifier.notify(alarm, 'alarm', "what?", {}) + self.assertEqual(0, len(self.notifier_server.notified)) + + +class FakeCoordinator(object): + def __init__(self, transport): + self.rpc = messaging.get_rpc_server( + transport, "alarm_partition_coordination", self) + self.notified = [] + + def presence(self, context, data): + self._record('presence', data) + + def allocate(self, context, data): + self._record('allocate', data) + + def assign(self, context, data): + self._record('assign', data) + + def _record(self, method, data): + self.notified.append((method, data)) + self.rpc.stop() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_app.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_app.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_app.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_app.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_config import fixture as fixture_config +from oslo_log import log + +from ceilometer.api import app +from ceilometer.tests import base + + +class TestApp(base.BaseTestCase): + + def setUp(self): + super(TestApp, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + log.register_options(cfg.CONF) + + def test_api_paste_file_not_exist(self): + self.CONF.set_override('api_paste_config', 'non-existent-file') + with mock.patch.object(self.CONF, 'find_file') as ff: + ff.return_value = None + self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) + + @mock.patch('ceilometer.storage.get_connection_from_config', + mock.MagicMock()) + @mock.patch('pecan.make_app') + def test_pecan_debug(self, mocked): + def _check_pecan_debug(g_debug, p_debug, expected, workers=1): + self.CONF.set_override('debug', g_debug) + if p_debug is not None: + self.CONF.set_override('pecan_debug', p_debug, group='api') + self.CONF.set_override('workers', workers, group='api') + app.setup_app() + args, kwargs = mocked.call_args + self.assertEqual(expected, kwargs.get('debug')) + + _check_pecan_debug(g_debug=False, p_debug=None, expected=False) + _check_pecan_debug(g_debug=True, p_debug=None, expected=False) + _check_pecan_debug(g_debug=True, p_debug=False, expected=False) + _check_pecan_debug(g_debug=False, p_debug=True, expected=True) + _check_pecan_debug(g_debug=True, p_debug=None, expected=False, + workers=5) + _check_pecan_debug(g_debug=False, p_debug=True, expected=False, + workers=5) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_hooks.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_hooks.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_hooks.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_hooks.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,35 @@ +# Copyright 2015 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import fixture as fixture_config +import oslo_messaging + +from ceilometer.api import hooks +from ceilometer.tests import base + + +class TestTestNotifierHook(base.BaseTestCase): + + def setUp(self): + super(TestTestNotifierHook, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + def test_init_notifier_with_drivers(self): + self.CONF.set_override('telemetry_driver', 'messagingv2', + group='publisher_notifier') + hook = hooks.NotifierHook() + notifier = hook.notifier + self.assertIsInstance(notifier, oslo_messaging.Notifier) + self.assertEqual(['messagingv2'], notifier._driver_names) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_versions.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_versions.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/test_versions.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/test_versions.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,65 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ceilometer.tests.functional import api + +V2_MEDIA_TYPES = [ + { + 'base': 'application/json', + 'type': 'application/vnd.openstack.telemetry-v2+json' + }, { + 'base': 'application/xml', + 'type': 'application/vnd.openstack.telemetry-v2+xml' + } +] + +V2_HTML_DESCRIPTION = { + 'href': 'http://docs.openstack.org/', + 'rel': 'describedby', + 'type': 'text/html', +} + +V2_EXPECTED_RESPONSE = { + 'id': 'v2', + 'links': [ + { + 'rel': 'self', + 'href': 'http://localhost/v2', + }, + V2_HTML_DESCRIPTION + ], + 'media-types': V2_MEDIA_TYPES, + 'status': 'stable', + 'updated': '2013-02-13T00:00:00Z', +} + +V2_VERSION_RESPONSE = { + "version": V2_EXPECTED_RESPONSE +} + +VERSIONS_RESPONSE = { + "versions": { + "values": [ + V2_EXPECTED_RESPONSE + ] + } +} + + +class TestVersions(api.FunctionalTest): + + def test_versions(self): + data = self.get_json('/') + self.assertEqual(VERSIONS_RESPONSE, data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_complex_query.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_complex_query.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_complex_query.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_complex_query.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,402 @@ +# +# Copyright Ericsson AB 2013. All rights reserved +# +# Authors: Ildiko Vancsa +# Balazs Gibizer +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test the methods related to complex query.""" +import datetime + +import fixtures +import jsonschema +import mock +from oslotest import base +import wsme + +from ceilometer.alarm.storage import models as alarm_models +from ceilometer.api.controllers.v2 import query +from ceilometer.storage import models + + +class FakeComplexQuery(query.ValidatedComplexQuery): + def __init__(self, db_model, additional_name_mapping=None, metadata=False): + super(FakeComplexQuery, self).__init__(query=None, + db_model=db_model, + additional_name_mapping=( + additional_name_mapping or + {}), + metadata_allowed=metadata) + + +sample_name_mapping = {"resource": "resource_id", + "meter": "counter_name", + "type": "counter_type", + "unit": "counter_unit", + "volume": "counter_volume"} + + +class TestComplexQuery(base.BaseTestCase): + def setUp(self): + super(TestComplexQuery, self).setUp() + self.useFixture(fixtures.MonkeyPatch( + 'pecan.response', mock.MagicMock())) + self.query = FakeComplexQuery(models.Sample, + sample_name_mapping, + True) + self.query_alarm = FakeComplexQuery(alarm_models.Alarm) + self.query_alarmchange = FakeComplexQuery( + alarm_models.AlarmChange) + + def test_replace_isotime_utc(self): + filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} + self.query._replace_isotime_with_datetime(filter_expr) + self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), + filter_expr["="]["timestamp"]) + + def test_replace_isotime_timezone_removed(self): + filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} + self.query._replace_isotime_with_datetime(filter_expr) + self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), + filter_expr["="]["timestamp"]) + + def test_replace_isotime_wrong_syntax(self): + filter_expr = {"=": {"timestamp": "not a valid isotime string"}} + self.assertRaises(wsme.exc.ClientSideError, + self.query._replace_isotime_with_datetime, + filter_expr) + + def test_replace_isotime_in_complex_filter(self): + filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, + {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} + self.query._replace_isotime_with_datetime(filter_expr) + self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), + filter_expr["and"][0]["="]["timestamp"]) + self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), + filter_expr["and"][1]["="]["timestamp"]) + + def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): + subfilter = {"and": [{"=": {"project_id": 42}}, + {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} + + filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, + subfilter]} + + self.query._replace_isotime_with_datetime(filter_expr) + self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), + filter_expr["or"][0]["="]["timestamp"]) + self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), + filter_expr["or"][1]["and"][1]["="]["timestamp"]) + + def test_convert_operator_to_lower_case(self): + filter_expr = {"AND": [{"=": {"project_id": 42}}, + {"=": {"project_id": 44}}]} + self.query._convert_operator_to_lower_case(filter_expr) + self.assertEqual("and", list(filter_expr.keys())[0]) + + filter_expr = {"Or": [{"=": {"project_id": 43}}, + {"anD": [{"=": {"project_id": 44}}, + {"=": {"project_id": 42}}]}]} + self.query._convert_operator_to_lower_case(filter_expr) + self.assertEqual("or", list(filter_expr.keys())[0]) + self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) + + def test_invalid_filter_misstyped_field_name_samples(self): + filter = {"=": {"project_id11": 42}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_invalid_filter_misstyped_field_name_alarms(self): + filter = {"=": {"enabbled": True}} + self.assertRaises(jsonschema.ValidationError, + self.query_alarm._validate_filter, + filter) + + def test_invalid_filter_misstyped_field_name_alarmchange(self): + filter = {"=": {"tpe": "rule change"}} + self.assertRaises(jsonschema.ValidationError, + self.query_alarmchange._validate_filter, + filter) + + def test_invalid_complex_filter_wrong_field_names(self): + filter = {"and": + [{"=": {"non_existing_field": 42}}, + {"=": {"project_id": 42}}]} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + filter = {"and": + [{"=": {"project_id": 42}}, + {"=": {"non_existing_field": 42}}]} + self.assertRaises(jsonschema.ValidationError, + self.query_alarm._validate_filter, + filter) + + filter = {"and": + [{"=": {"project_id11": 42}}, + {"=": {"project_id": 42}}]} + self.assertRaises(jsonschema.ValidationError, + self.query_alarmchange._validate_filter, + filter) + + filter = {"or": + [{"=": {"non_existing_field": 42}}, + {"and": + [{"=": {"project_id": 44}}, + {"=": {"project_id": 42}}]}]} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + filter = {"or": + [{"=": {"project_id": 43}}, + {"and": + [{"=": {"project_id": 44}}, + {"=": {"non_existing_field": 42}}]}]} + self.assertRaises(jsonschema.ValidationError, + self.query_alarm._validate_filter, + filter) + + def test_convert_orderby(self): + orderby = [] + self.query._convert_orderby_to_lower_case(orderby) + self.assertEqual([], orderby) + + orderby = [{"project_id": "DESC"}] + self.query._convert_orderby_to_lower_case(orderby) + self.assertEqual([{"project_id": "desc"}], orderby) + + orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] + self.query._convert_orderby_to_lower_case(orderby) + self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], + orderby) + + def test_validate_orderby_empty_direction(self): + orderby = [{"project_id": ""}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + orderby = [{"project_id": "asc"}, {"resource_id": ""}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_wrong_order_string(self): + orderby = [{"project_id": "not a valid order"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_wrong_multiple_item_order_string(self): + orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_empty_field_name(self): + orderby = [{"": "ASC"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + orderby = [{"project_id": "asc"}, {"": "desc"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_wrong_field_name(self): + orderby = [{"project_id11": "ASC"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): + orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + def test_validate_orderby_metadata_is_not_allowed(self): + orderby = [{"metadata.display_name": "asc"}] + self.assertRaises(jsonschema.ValidationError, + self.query._validate_orderby, + orderby) + + +class TestFilterSyntaxValidation(base.BaseTestCase): + def setUp(self): + super(TestFilterSyntaxValidation, self).setUp() + self.query = FakeComplexQuery(models.Sample, + sample_name_mapping, + True) + + def test_simple_operator(self): + filter = {"=": {"project_id": "string_value"}} + self.query._validate_filter(filter) + + filter = {"=>": {"project_id": "string_value"}} + self.query._validate_filter(filter) + + def test_valid_value_types(self): + filter = {"=": {"project_id": "string_value"}} + self.query._validate_filter(filter) + + filter = {"=": {"project_id": 42}} + self.query._validate_filter(filter) + + filter = {"=": {"project_id": 3.14}} + self.query._validate_filter(filter) + + filter = {"=": {"project_id": True}} + self.query._validate_filter(filter) + + filter = {"=": {"project_id": False}} + self.query._validate_filter(filter) + + def test_invalid_simple_operator(self): + filter = {"==": {"project_id": "string_value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + filter = {"": {"project_id": "string_value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_more_than_one_operator_is_invalid(self): + filter = {"=": {"project_id": "string_value"}, + "<": {"": ""}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_empty_expression_is_invalid(self): + filter = {} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_invalid_field_name(self): + filter = {"=": {"": "value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + filter = {"=": {" ": "value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + filter = {"=": {"\t": "value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_more_than_one_field_is_invalid(self): + filter = {"=": {"project_id": "value", "resource_id": "value"}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_missing_field_after_simple_op_is_invalid(self): + filter = {"=": {}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_and_or(self): + filter = {"and": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}]} + self.query._validate_filter(filter) + + filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}]}, + {"=": {"counter_name": "value"}}]} + self.query._validate_filter(filter) + + filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}, + {"<": {"counter_name": 42}}]}, + {"=": {"counter_name": "value"}}]} + self.query._validate_filter(filter) + + def test_complex_operator_with_in(self): + filter = {"and": [{"<": {"counter_volume": 42}}, + {">=": {"counter_volume": 36}}, + {"in": {"project_id": ["project_id1", + "project_id2", + "project_id3"]}}]} + self.query._validate_filter(filter) + + def test_invalid_complex_operator(self): + filter = {"xor": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}]} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_and_or_with_one_child_is_invalid(self): + filter = {"or": [{"=": {"project_id": "string_value"}}]} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_complex_operator_with_zero_child_is_invalid(self): + filter = {"or": []} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_more_than_one_complex_operator_is_invalid(self): + filter = {"and": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}], + "or": [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}]} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_not(self): + filter = {"not": {"=": {"project_id": "value"}}} + self.query._validate_filter(filter) + + filter = { + "not": + {"or": + [{"and": + [{"=": {"project_id": "string_value"}}, + {"=": {"resource_id": "value"}}, + {"<": {"counter_name": 42}}]}, + {"=": {"counter_name": "value"}}]}} + self.query._validate_filter(filter) + + def test_not_with_zero_child_is_invalid(self): + filter = {"not": {}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_not_with_more_than_one_child_is_invalid(self): + filter = {"not": {"=": {"project_id": "value"}, + "!=": {"resource_id": "value"}}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) + + def test_empty_in_query_not_passing(self): + filter = {"in": {"resource_id": []}} + self.assertRaises(jsonschema.ValidationError, + self.query._validate_filter, + filter) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_query.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_query.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_query.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_query.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,417 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test the methods related to query.""" +import datetime + +import fixtures +import mock +from oslo_utils import timeutils +from oslotest import base +from oslotest import mockpatch +import wsme + +from ceilometer.alarm.storage import base as alarm_storage_base +from ceilometer.api.controllers.v2 import base as v2_base +from ceilometer.api.controllers.v2 import meters +from ceilometer.api.controllers.v2 import utils +from ceilometer import storage +from ceilometer.storage import base as storage_base +from ceilometer.tests import base as tests_base + + +class TestQuery(base.BaseTestCase): + def setUp(self): + super(TestQuery, self).setUp() + self.useFixture(fixtures.MonkeyPatch( + 'pecan.response', mock.MagicMock())) + + def test_get_value_as_type_with_integer(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='123', + type='integer') + expected = 123 + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_float(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='123.456', + type='float') + expected = 123.456 + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_boolean(self): + query = v2_base.Query(field='metadata.is_public', + op='eq', + value='True', + type='boolean') + expected = True + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_string(self): + query = v2_base.Query(field='metadata.name', + op='eq', + value='linux', + type='string') + expected = 'linux' + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_datetime(self): + query = v2_base.Query(field='metadata.date', + op='eq', + value='2014-01-01T05:00:00', + type='datetime') + self.assertIsInstance(query._get_value_as_type(), datetime.datetime) + self.assertIsNone(query._get_value_as_type().tzinfo) + + def test_get_value_as_type_with_integer_without_type(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='123') + expected = 123 + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_float_without_type(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='123.456') + expected = 123.456 + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_boolean_without_type(self): + query = v2_base.Query(field='metadata.is_public', + op='eq', + value='True') + expected = True + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_string_without_type(self): + query = v2_base.Query(field='metadata.name', + op='eq', + value='linux') + expected = 'linux' + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_bad_type(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='123.456', + type='blob') + self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) + + def test_get_value_as_type_with_bad_value(self): + query = v2_base.Query(field='metadata.size', + op='eq', + value='fake', + type='integer') + self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) + + def test_get_value_as_type_integer_expression_without_type(self): + # bug 1221736 + query = v2_base.Query(field='should_be_a_string', + op='eq', + value='WWW-Layer-4a80714f') + expected = 'WWW-Layer-4a80714f' + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_boolean_expression_without_type(self): + # bug 1221736 + query = v2_base.Query(field='should_be_a_string', + op='eq', + value='True or False') + expected = 'True or False' + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_syntax_error(self): + # bug 1221736 + value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' + query = v2_base.Query(field='group_id', + op='eq', + value=value) + expected = value + self.assertEqual(expected, query._get_value_as_type()) + + def test_get_value_as_type_with_syntax_error_colons(self): + # bug 1221736 + value = 'Ref::StackId' + query = v2_base.Query(field='field_name', + op='eq', + value=value) + expected = value + self.assertEqual(expected, query._get_value_as_type()) + + +class TestValidateGroupByFields(base.BaseTestCase): + + def test_valid_field(self): + result = meters._validate_groupby_fields(['user_id']) + self.assertEqual(['user_id'], result) + + def test_valid_fields_multiple(self): + result = set(meters._validate_groupby_fields( + ['user_id', 'project_id', 'source'])) + self.assertEqual(set(['user_id', 'project_id', 'source']), result) + + def test_invalid_field(self): + self.assertRaises(wsme.exc.UnknownArgument, + meters._validate_groupby_fields, + ['wtf']) + + def test_invalid_field_multiple(self): + self.assertRaises(wsme.exc.UnknownArgument, + meters._validate_groupby_fields, + ['user_id', 'wtf', 'project_id', 'source']) + + def test_duplicate_fields(self): + result = set( + meters._validate_groupby_fields(['user_id', 'source', 'user_id']) + ) + self.assertEqual(set(['user_id', 'source']), result) + + +class TestQueryToKwArgs(tests_base.BaseTestCase): + def setUp(self): + super(TestQueryToKwArgs, self).setUp() + self.useFixture(mockpatch.PatchObject( + utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) + self.useFixture(mockpatch.PatchObject( + utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) + + def test_sample_filter_single(self): + q = [v2_base.Query(field='user_id', + op='eq', + value='uid')] + kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) + self.assertIn('user', kwargs) + self.assertEqual(1, len(kwargs)) + self.assertEqual('uid', kwargs['user']) + + def test_sample_filter_multi(self): + q = [v2_base.Query(field='user_id', + op='eq', + value='uid'), + v2_base.Query(field='project_id', + op='eq', + value='pid'), + v2_base.Query(field='resource_id', + op='eq', + value='rid'), + v2_base.Query(field='source', + op='eq', + value='source_name'), + v2_base.Query(field='meter', + op='eq', + value='meter_name')] + kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) + self.assertEqual(5, len(kwargs)) + self.assertEqual('uid', kwargs['user']) + self.assertEqual('pid', kwargs['project']) + self.assertEqual('rid', kwargs['resource']) + self.assertEqual('source_name', kwargs['source']) + self.assertEqual('meter_name', kwargs['meter']) + + def test_sample_filter_timestamp(self): + ts_start = timeutils.utcnow() + ts_end = ts_start + datetime.timedelta(minutes=5) + q = [v2_base.Query(field='timestamp', + op='lt', + value=str(ts_end)), + v2_base.Query(field='timestamp', + op='gt', + value=str(ts_start))] + kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) + self.assertEqual(4, len(kwargs)) + self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) + self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) + self.assertEqual('gt', kwargs['start_timestamp_op']) + self.assertEqual('lt', kwargs['end_timestamp_op']) + + def test_sample_filter_meta(self): + q = [v2_base.Query(field='metadata.size', + op='eq', + value='20'), + v2_base.Query(field='resource_metadata.id', + op='eq', + value='meta_id')] + kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) + self.assertEqual(1, len(kwargs)) + self.assertEqual(2, len(kwargs['metaquery'])) + self.assertEqual(20, kwargs['metaquery']['metadata.size']) + self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) + + def test_sample_filter_non_equality_on_metadata(self): + queries = [v2_base.Query(field='resource_metadata.image_id', + op='gt', + value='image', + type='string'), + v2_base.Query(field='metadata.ramdisk_id', + op='le', + value='ramdisk', + type='string')] + with mock.patch('pecan.request') as request: + request.headers.return_value = {'X-ProjectId': 'foobar'} + self.assertRaises( + wsme.exc.InvalidInput, + utils.query_to_kwargs, + queries, + storage.SampleFilter.__init__) + + def test_sample_filter_invalid_field(self): + q = [v2_base.Query(field='invalid', + op='eq', + value='20')] + self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, storage.SampleFilter.__init__) + + def test_sample_filter_invalid_op(self): + q = [v2_base.Query(field='user_id', + op='lt', + value='20')] + self.assertRaises( + wsme.exc.InvalidInput, + utils.query_to_kwargs, q, storage.SampleFilter.__init__) + + def test_sample_filter_timestamp_invalid_op(self): + ts_start = timeutils.utcnow() + q = [v2_base.Query(field='timestamp', + op='eq', + value=str(ts_start))] + self.assertRaises( + wsme.exc.InvalidInput, + utils.query_to_kwargs, q, storage.SampleFilter.__init__) + + def test_sample_filter_exclude_internal(self): + queries = [v2_base.Query(field=f, + op='eq', + value='fake', + type='string') + for f in ['y', 'on_behalf_of', 'x']] + with mock.patch('pecan.request') as request: + request.headers.return_value = {'X-ProjectId': 'foobar'} + self.assertRaises(wsme.exc.ClientSideError, + utils.query_to_kwargs, + queries, + storage.SampleFilter.__init__, + internal_keys=['on_behalf_of']) + + def test_sample_filter_self_always_excluded(self): + queries = [v2_base.Query(field='user_id', + op='eq', + value='20')] + with mock.patch('pecan.request') as request: + request.headers.return_value = {'X-ProjectId': 'foobar'} + kwargs = utils.query_to_kwargs(queries, + storage.SampleFilter.__init__) + self.assertFalse('self' in kwargs) + + def test_sample_filter_translation(self): + queries = [v2_base.Query(field=f, + op='eq', + value='fake_%s' % f, + type='string') for f in ['user_id', + 'project_id', + 'resource_id']] + with mock.patch('pecan.request') as request: + request.headers.return_value = {'X-ProjectId': 'foobar'} + kwargs = utils.query_to_kwargs(queries, + storage.SampleFilter.__init__) + for o in ['user', 'project', 'resource']: + self.assertEqual('fake_%s_id' % o, kwargs.get(o)) + + def test_timestamp_validation(self): + q = [v2_base.Query(field='timestamp', + op='le', + value='123')] + + exc = self.assertRaises( + wsme.exc.InvalidInput, + utils.query_to_kwargs, q, storage.SampleFilter.__init__) + expected_exc = wsme.exc.InvalidInput('timestamp', '123', + 'invalid timestamp format') + self.assertEqual(str(expected_exc), str(exc)) + + def test_get_alarm_changes_filter_valid_fields(self): + q = [v2_base.Query(field='abc', + op='eq', + value='abc')] + exc = self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, + alarm_storage_base.Connection.get_alarm_changes) + valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset', + 'severity', 'timestamp', 'type', 'user'] + msg = ("unrecognized field in query: %s, " + "valid keys: %s") % (q, valid_keys) + expected_exc = wsme.exc.UnknownArgument('abc', msg) + self.assertEqual(str(expected_exc), str(exc)) + + def test_sample_filter_valid_fields(self): + q = [v2_base.Query(field='abc', + op='eq', + value='abc')] + exc = self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, storage.SampleFilter.__init__) + valid_keys = ['message_id', 'meter', 'project', 'resource', + 'search_offset', 'source', 'timestamp', 'user'] + msg = ("unrecognized field in query: %s, " + "valid keys: %s") % (q, valid_keys) + expected_exc = wsme.exc.UnknownArgument('abc', msg) + self.assertEqual(str(expected_exc), str(exc)) + + def test_get_meters_filter_valid_fields(self): + q = [v2_base.Query(field='abc', + op='eq', + value='abc')] + exc = self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, storage_base.Connection.get_meters) + valid_keys = ['project', 'resource', 'source', 'user'] + msg = ("unrecognized field in query: %s, " + "valid keys: %s") % (q, valid_keys) + expected_exc = wsme.exc.UnknownArgument('abc', msg) + self.assertEqual(str(expected_exc), str(exc)) + + def test_get_resources_filter_valid_fields(self): + q = [v2_base.Query(field='abc', + op='eq', + value='abc')] + exc = self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, storage_base.Connection.get_resources) + valid_keys = ['project', 'resource', + 'search_offset', 'source', 'timestamp', 'user'] + msg = ("unrecognized field in query: %s, " + "valid keys: %s") % (q, valid_keys) + expected_exc = wsme.exc.UnknownArgument('abc', msg) + self.assertEqual(str(expected_exc), str(exc)) + + def test_get_alarms_filter_valid_fields(self): + q = [v2_base.Query(field='abc', + op='eq', + value='abc')] + exc = self.assertRaises( + wsme.exc.UnknownArgument, + utils.query_to_kwargs, q, + alarm_storage_base.Connection.get_alarms) + valid_keys = ['alarm_id', 'enabled', 'meter', 'name', + 'project', 'severity', 'state', 'type', 'user'] + msg = ("unrecognized field in query: %s, " + "valid keys: %s") % (q, valid_keys) + expected_exc = wsme.exc.UnknownArgument('abc', msg) + self.assertEqual(str(expected_exc), str(exc)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_statistics.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_statistics.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_statistics.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_statistics.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,105 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test statistics objects.""" + +import datetime + +from oslotest import base + +from ceilometer.api.controllers.v2 import meters + + +class TestStatisticsDuration(base.BaseTestCase): + + def setUp(self): + super(TestStatisticsDuration, self).setUp() + + # Create events relative to the range and pretend + # that the intervening events exist. + + self.early1 = datetime.datetime(2012, 8, 27, 7, 0) + self.early2 = datetime.datetime(2012, 8, 27, 17, 0) + + self.start = datetime.datetime(2012, 8, 28, 0, 0) + + self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) + self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) + + self.end = datetime.datetime(2012, 8, 28, 23, 59) + + self.late1 = datetime.datetime(2012, 8, 29, 9, 0) + self.late2 = datetime.datetime(2012, 8, 29, 19, 0) + + def test_nulls(self): + s = meters.Statistics(duration_start=None, + duration_end=None, + start_timestamp=None, + end_timestamp=None) + self.assertIsNone(s.duration_start) + self.assertIsNone(s.duration_end) + self.assertIsNone(s.duration) + + def test_overlap_range_start(self): + s = meters.Statistics(duration_start=self.early1, + duration_end=self.middle1, + start_timestamp=self.start, + end_timestamp=self.end) + self.assertEqual(self.start, s.duration_start) + self.assertEqual(self.middle1, s.duration_end) + self.assertEqual(8 * 60 * 60, s.duration) + + def test_within_range(self): + s = meters.Statistics(duration_start=self.middle1, + duration_end=self.middle2, + start_timestamp=self.start, + end_timestamp=self.end) + self.assertEqual(self.middle1, s.duration_start) + self.assertEqual(self.middle2, s.duration_end) + self.assertEqual(10 * 60 * 60, s.duration) + + def test_within_range_zero_duration(self): + s = meters.Statistics(duration_start=self.middle1, + duration_end=self.middle1, + start_timestamp=self.start, + end_timestamp=self.end) + self.assertEqual(self.middle1, s.duration_start) + self.assertEqual(self.middle1, s.duration_end) + self.assertEqual(0, s.duration) + + def test_overlap_range_end(self): + s = meters.Statistics(duration_start=self.middle2, + duration_end=self.late1, + start_timestamp=self.start, + end_timestamp=self.end) + self.assertEqual(self.middle2, s.duration_start) + self.assertEqual(self.end, s.duration_end) + self.assertEqual(((6 * 60) - 1) * 60, s.duration) + + def test_after_range(self): + s = meters.Statistics(duration_start=self.late1, + duration_end=self.late2, + start_timestamp=self.start, + end_timestamp=self.end) + self.assertIsNone(s.duration_start) + self.assertIsNone(s.duration_end) + self.assertIsNone(s.duration) + + def test_without_timestamp(self): + s = meters.Statistics(duration_start=self.late1, + duration_end=self.late2, + start_timestamp=None, + end_timestamp=None) + self.assertEqual(self.late1, s.duration_start) + self.assertEqual(self.late2, s.duration_end) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,33 @@ +# +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslotest import base +import wsme + +from ceilometer.api.controllers.v2 import base as v2_base + + +class TestWsmeCustomType(base.BaseTestCase): + + def test_advenum_default(self): + class dummybase(wsme.types.Base): + ae = v2_base.AdvEnum("name", str, "one", "other", default="other") + + obj = dummybase() + self.assertEqual("other", obj.ae) + + obj = dummybase(ae="one") + self.assertEqual("one", obj.ae) + + self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/notifications/test_instance.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/notifications/test_instance.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/notifications/test_instance.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/notifications/test_instance.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,685 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for converters for producing compute counter messages from +notification events. +""" +from oslotest import base + +from ceilometer.compute.notifications import instance +from ceilometer import sample + + +INSTANCE_CREATE_END = { + u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'_context_is_admin': True, + u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': u'10.0.2.15', + u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T20:23:41.425105', + u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'event_type': u'compute.instance.create.end', + u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', + u'payload': {u'created_at': u'2012-05-08 20:23:41', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'fixed_ips': [{u'address': u'10.0.0.2', + u'floating_ips': [], + u'meta': {}, + u'type': u'fixed', + u'version': 4}], + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-08 20:23:47.985999', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', + }, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 20:23:48.028195', +} + +INSTANCE_DELETE_START = { + u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'_context_is_admin': True, + u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': u'10.0.2.15', + u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T20:24:14.547374', + u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'event_type': u'compute.instance.delete.start', + u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', + u'payload': {u'created_at': u'2012-05-08 20:23:41', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-08 20:23:47', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'deleting', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', + }, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 20:24:14.824743', +} + +INSTANCE_EXISTS = { + u'_context_auth_token': None, + u'_context_is_admin': True, + u'_context_project_id': None, + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': None, + u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T16:03:43.760204', + u'_context_user_id': None, + u'event_type': u'compute.instance.exists', + u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', + u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', + u'audit_period_ending': u'2012-05-08 16:00:00', + u'bandwidth': {}, + u'created_at': u'2012-05-07 22:16:18', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-07 23:01:27', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', + }, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 16:03:44.122481', +} + +INSTANCE_EXISTS_METADATA_LIST = { + u'_context_auth_token': None, + u'_context_is_admin': True, + u'_context_project_id': None, + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': None, + u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T16:03:43.760204', + u'_context_user_id': None, + u'event_type': u'compute.instance.exists', + u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', + u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', + u'audit_period_ending': u'2012-05-08 16:00:00', + u'bandwidth': {}, + u'created_at': u'2012-05-07 22:16:18', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-07 23:01:27', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'metadata': [], + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', + }, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 16:03:44.122481', +} + + +INSTANCE_FINISH_RESIZE_END = { + u'_context_roles': [u'admin'], + u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', + u'_context_quota_class': None, + u'event_type': u'compute.instance.finish_resize.end', + u'_context_user_name': u'admin', + u'_context_project_name': u'admin', + u'timestamp': u'2013-01-04 15:10:17.436974', + u'_context_is_admin': True, + u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', + u'_context_auth_token': None, + u'_context_instance_lock_checked': False, + u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', + u'_context_timestamp': u'2013-01-04T15:08:39.162612', + u'_context_read_deleted': u'no', + u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'_context_remote_address': u'10.147.132.184', + u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', + u'payload': {u'state_description': u'', + u'availability_zone': None, + u'ephemeral_gb': 0, + u'instance_type_id': 5, + u'deleted_at': u'', + u'fixed_ips': [{u'floating_ips': [], + u'label': u'private', + u'version': 4, + u'meta': {}, + u'address': u'10.0.0.3', + u'type': u'fixed'}], + u'memory_mb': 2048, + u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'reservation_id': u'r-u3fvim06', + u'hostname': u's1', + u'state': u'resized', + u'launched_at': u'2013-01-04T15:10:14.923939', + u'metadata': {u'metering.server_group': u'Group_A', + u'AutoScalingGroupName': u'tyky-Group_Awste7', + u'metering.foo.bar': u'true'}, + u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'access_ip_v6': None, + u'disk_gb': 20, + u'access_ip_v4': None, + u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'host': u'ip-10-147-132-184.ec2.internal', + u'display_name': u's1', + u'image_ref_url': u'http://10.147.132.184:9292/images/' + 'a130b9d9-e00e-436e-9782-836ccef06e8a', + u'root_gb': 20, + u'tenant_id': u'cea4b25edb484e5392727181b7721d29', + u'created_at': u'2013-01-04T11:21:48.000000', + u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', + u'instance_type': u'm1.small', + u'vcpus': 1, + u'image_meta': {u'kernel_id': + u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'ramdisk_id': + u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'base_image_ref': + u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, + u'architecture': None, + u'os_type': None + }, + u'priority': u'INFO' +} + +INSTANCE_RESIZE_REVERT_END = { + u'_context_roles': [u'admin'], + u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', + u'_context_quota_class': None, + u'event_type': u'compute.instance.resize.revert.end', + u'_context_user_name': u'admin', + u'_context_project_name': u'admin', + u'timestamp': u'2013-01-04 15:20:32.009532', + u'_context_is_admin': True, + u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', + u'_context_auth_token': None, + u'_context_instance_lock_checked': False, + u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', + u'_context_timestamp': u'2013-01-04T15:19:51.018218', + u'_context_read_deleted': u'no', + u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'_context_remote_address': u'10.147.132.184', + u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', + u'payload': {u'state_description': u'resize_reverting', + u'availability_zone': None, + u'ephemeral_gb': 0, + u'instance_type_id': 2, + u'deleted_at': u'', + u'reservation_id': u'r-u3fvim06', + u'memory_mb': 512, + u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'hostname': u's1', + u'state': u'resized', + u'launched_at': u'2013-01-04T15:10:14.000000', + u'metadata': {u'metering.server_group': u'Group_A', + u'AutoScalingGroupName': u'tyky-Group_A-wste7', + u'metering.foo.bar': u'true'}, + u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'access_ip_v6': None, + u'disk_gb': 0, + u'access_ip_v4': None, + u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'host': u'ip-10-147-132-184.ec2.internal', + u'display_name': u's1', + u'image_ref_url': u'http://10.147.132.184:9292/images/' + 'a130b9d9-e00e-436e-9782-836ccef06e8a', + u'root_gb': 0, + u'tenant_id': u'cea4b25edb484e5392727181b7721d29', + u'created_at': u'2013-01-04T11:21:48.000000', + u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', + u'instance_type': u'm1.tiny', + u'vcpus': 1, + u'image_meta': {u'kernel_id': + u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'ramdisk_id': + u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'base_image_ref': + u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, + u'architecture': None, + u'os_type': None + }, + u'priority': u'INFO' +} + +INSTANCE_DELETE_SAMPLES = { + u'_context_roles': [u'admin'], + u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', + u'_context_quota_class': None, + u'event_type': u'compute.instance.delete.samples', + u'_context_user_name': u'admin', + u'_context_project_name': u'admin', + u'timestamp': u'2013-01-04 15:20:32.009532', + u'_context_is_admin': True, + u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', + u'_context_auth_token': None, + u'_context_instance_lock_checked': False, + u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', + u'_context_timestamp': u'2013-01-04T15:19:51.018218', + u'_context_read_deleted': u'no', + u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'_context_remote_address': u'10.147.132.184', + u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', + u'payload': {u'state_description': u'resize_reverting', + u'availability_zone': None, + u'ephemeral_gb': 0, + u'instance_type_id': 2, + u'deleted_at': u'', + u'reservation_id': u'r-u3fvim06', + u'memory_mb': 512, + u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', + u'hostname': u's1', + u'state': u'resized', + u'launched_at': u'2013-01-04T15:10:14.000000', + u'metadata': {u'metering.server_group': u'Group_A', + u'AutoScalingGroupName': u'tyky-Group_A-wste7', + u'metering.foo.bar': u'true'}, + u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'access_ip_v6': None, + u'disk_gb': 0, + u'access_ip_v4': None, + u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'host': u'ip-10-147-132-184.ec2.internal', + u'display_name': u's1', + u'image_ref_url': u'http://10.147.132.184:9292/images/' + 'a130b9d9-e00e-436e-9782-836ccef06e8a', + u'root_gb': 0, + u'tenant_id': u'cea4b25edb484e5392727181b7721d29', + u'created_at': u'2013-01-04T11:21:48.000000', + u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', + u'instance_type': u'm1.tiny', + u'vcpus': 1, + u'image_meta': {u'kernel_id': + u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', + u'ramdisk_id': + u'5f23128e-5525-46d8-bc66-9c30cd87141a', + u'base_image_ref': + u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, + u'architecture': None, + u'os_type': None, + u'samples': [{u'name': u'sample-name1', + u'type': u'sample-type1', + u'unit': u'sample-units1', + u'volume': 1}, + {u'name': u'sample-name2', + u'type': u'sample-type2', + u'unit': u'sample-units2', + u'volume': 2}, + ], + }, + u'priority': u'INFO' +} + +INSTANCE_SCHEDULED = { + u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', + u'_context_quota_class': None, + u'event_type': u'scheduler.run_instance.scheduled', + u'_context_service_catalog': [{ + u'endpoints': [{ + u'adminURL': + u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', + u'region': u'RegionOne', + u'internalURL': + u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', + u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', + u'publicURL': + u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], + u'endpoints_links': [], + u'type': u'volume', + u'name': u'cinder'}], + u'_context_auth_token': u'TOK', + u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', + u'payload': { + 'instance_id': 'fake-uuid1-1', + u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, + u'request_spec': { + u'num_instances': 1, + u'block_device_mapping': [{ + u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', + u'guest_format': None, + u'boot_index': 0, + u'no_device': None, + u'connection_info': None, + u'volume_id': None, + u'volume_size': None, + u'device_name': None, + u'disk_bus': None, + u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', + u'source_type': u'image', + u'device_type': u'disk', + u'snapshot_id': None, + u'destination_type': u'local', + u'delete_on_termination': True}], + u'image': { + u'status': u'active', + u'name': u'cirros-0.3.1-x86_64-uec', + u'deleted': False, + u'container_format': u'ami', + u'created_at': u'2014-02-18T13:16:26.000000', + u'disk_format': u'ami', + u'updated_at': u'2014-02-18T13:16:27.000000', + u'properties': { + u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', + u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, + u'min_disk': 0, + u'min_ram': 0, + u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', + u'owner': u'2bd766a095b44486bf07cf7f666997eb', + u'is_public': True, + u'deleted_at': None, + u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', + u'size': 25165824}, + u'instance_type': { + u'root_gb': 1, + u'name': u'm1.tiny', + u'ephemeral_gb': 0, + u'memory_mb': 512, + u'vcpus': 1, + u'extra_specs': {}, + u'swap': 0, + u'rxtx_factor': 1.0, + u'flavorid': u'1', + u'vcpu_weight': None, + u'id': 2}, + u'instance_properties': { + u'vm_state': u'building', + u'availability_zone': None, + u'terminated_at': None, + u'ephemeral_gb': 0, + u'instance_type_id': 2, + u'user_data': None, + u'cleaned': False, + u'vm_mode': None, + u'deleted_at': None, + u'reservation_id': u'r-ven5q6om', + u'id': 15, + u'security_groups': [{ + u'deleted_at': None, + u'user_id': u'0a757cd896b64b65ba3784afef564116', + u'description': u'default', + u'deleted': False, + u'created_at': u'2014-02-19T11:02:31.000000', + u'updated_at': None, + u'project_id': u'2bd766a095b44486bf07cf7f666997eb', + u'id': 1, + u'name': u'default'}], + u'disable_terminate': False, + u'root_device_name': None, + u'display_name': u'new', + u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', + u'default_swap_device': None, + u'info_cache': { + u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', + u'deleted': False, + u'created_at': u'2014-03-05T12:44:00.000000', + u'updated_at': None, + u'network_info': [], + u'deleted_at': None}, + u'hostname': u'new', + u'launched_on': None, + u'display_description': u'new', + u'key_data': None, + u'deleted': False, + u'config_drive': u'', + u'power_state': 0, + u'default_ephemeral_device': None, + u'progress': 0, + u'project_id': u'2bd766a095b44486bf07cf7f666997eb', + u'launched_at': None, + u'scheduled_at': None, + u'node': None, + u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', + u'access_ip_v6': None, + u'access_ip_v4': None, + u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', + u'key_name': None, + u'updated_at': None, + u'host': None, + u'root_gb': 1, + u'user_id': u'0a757cd896b64b65ba3784afef564116', + u'system_metadata': { + u'image_kernel_id': + u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', + u'image_min_disk': u'1', + u'instance_type_memory_mb': u'512', + u'instance_type_swap': u'0', + u'instance_type_vcpu_weight': None, + u'instance_type_root_gb': u'1', + u'instance_type_name': u'm1.tiny', + u'image_ramdisk_id': + u'4999726c-545c-4a9e-bfc0-917459784275', + u'instance_type_id': u'2', + u'instance_type_ephemeral_gb': u'0', + u'instance_type_rxtx_factor': u'1.0', + u'instance_type_flavorid': u'1', + u'instance_type_vcpus': u'1', + u'image_container_format': u'ami', + u'image_min_ram': u'0', + u'image_disk_format': u'ami', + u'image_base_image_ref': + u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, + u'task_state': u'scheduling', + u'shutdown_terminate': False, + u'cell_name': None, + u'ephemeral_key_uuid': None, + u'locked': False, + u'name': u'instance-0000000f', + u'created_at': u'2014-03-05T12:44:00.000000', + u'locked_by': None, + u'launch_index': 0, + u'memory_mb': 512, + u'vcpus': 1, + u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', + u'architecture': None, + u'auto_disk_config': False, + u'os_type': None, + u'metadata': {u'metering.server_group': u'Group_A', + u'AutoScalingGroupName': u'tyky-Group_Awste7', + u'metering.foo.bar': u'true'}}, + u'security_group': [u'default'], + u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, + u'priority': u'INFO', + u'_context_is_admin': True, + u'_context_timestamp': u'2014-03-05T12:44:00.135674', + u'publisher_id': u'scheduler.eglynn-f19-devstack3', + u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', + u'_context_remote_address': u'172.16.12.21', + u'_context_roles': [u'_member_', u'admin'], + u'timestamp': u'2014-03-05 12:44:00.733758', + u'_context_user': u'0a757cd896b64b65ba3784afef564116', + u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', + u'_context_project_name': u'admin', + u'_context_read_deleted': u'no', + u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', + u'_context_instance_lock_checked': False, + u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', + u'_context_user_name': u'admin' +} + + +class TestNotifications(base.BaseTestCase): + + def test_process_notification(self): + info = list(instance.Instance(None).process_notification( + INSTANCE_CREATE_END + ))[0] + for name, actual, expected in [ + ('counter_name', info.name, 'instance'), + ('counter_type', info.type, sample.TYPE_GAUGE), + ('counter_volume', info.volume, 1), + ('timestamp', info.timestamp, + INSTANCE_CREATE_END['timestamp']), + ('resource_id', info.resource_id, + INSTANCE_CREATE_END['payload']['instance_id']), + ('instance_type_id', + info.resource_metadata['instance_type_id'], + INSTANCE_CREATE_END['payload']['instance_type_id']), + ('host', info.resource_metadata['host'], + INSTANCE_CREATE_END['publisher_id']), + ]: + self.assertEqual(expected, actual, name) + + @staticmethod + def _find_counter(counters, name): + return filter(lambda counter: counter.name == name, counters)[0] + + def _verify_user_metadata(self, metadata): + self.assertIn('user_metadata', metadata) + user_meta = metadata['user_metadata'] + self.assertEqual('Group_A', user_meta.get('server_group')) + self.assertNotIn('AutoScalingGroupName', user_meta) + self.assertIn('foo_bar', user_meta) + self.assertNotIn('foo.bar', user_meta) + + def test_instance_create_instance(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_CREATE_END)) + self.assertEqual(1, len(counters)) + c = counters[0] + self.assertEqual(1, c.volume) + + def test_instance_exists_instance(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_EXISTS)) + self.assertEqual(1, len(counters)) + + def test_instance_exists_metadata_list(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) + self.assertEqual(1, len(counters)) + + def test_instance_delete_instance(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_DELETE_START)) + self.assertEqual(1, len(counters)) + + def test_instance_finish_resize_instance(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) + self.assertEqual(1, len(counters)) + c = counters[0] + self.assertEqual(1, c.volume) + self._verify_user_metadata(c.resource_metadata) + + def test_instance_resize_finish_instance(self): + ic = instance.Instance(None) + counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) + self.assertEqual(1, len(counters)) + c = counters[0] + self.assertEqual(1, c.volume) + self._verify_user_metadata(c.resource_metadata) + + def test_instance_delete_samples(self): + ic = instance.InstanceDelete(None) + counters = list(ic.process_notification(INSTANCE_DELETE_SAMPLES)) + self.assertEqual(2, len(counters)) + names = [c.name for c in counters] + self.assertEqual(['sample-name1', 'sample-name2'], names) + c = counters[0] + self._verify_user_metadata(c.resource_metadata) + + def test_instance_scheduled(self): + ic = instance.InstanceScheduled(None) + + self.assertIn(INSTANCE_SCHEDULED['event_type'], + ic.event_types) + + counters = list(ic.process_notification(INSTANCE_SCHEDULED)) + self.assertEqual(1, len(counters)) + names = [c.name for c in counters] + self.assertEqual(['instance.scheduled'], names) + rid = [c.resource_id for c in counters] + self.assertEqual(['fake-uuid1-1'], rid) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,54 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslotest import mockpatch + +import ceilometer.tests.base as base + + +class TestPollsterBase(base.BaseTestCase): + + def setUp(self): + super(TestPollsterBase, self).setUp() + + self.inspector = mock.Mock() + self.instance = mock.MagicMock() + self.instance.name = 'instance-00000001' + setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', + self.instance.name) + self.instance.id = 1 + self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, + 'ram': 512, 'disk': 20, 'ephemeral': 0} + self.instance.status = 'active' + self.instance.metadata = { + 'fqdn': 'vm_fqdn', + 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', + 'project_cos': 'dev'} + + patch_virt = mockpatch.Patch( + 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', + new=mock.Mock(return_value=self.inspector)) + self.useFixture(patch_virt) + + # as we're having lazy hypervisor inspector singleton object in the + # base compute pollster class, that leads to the fact that we + # need to mock all this class property to avoid context sharing between + # the tests + patch_inspector = mockpatch.Patch( + 'ceilometer.compute.pollsters.BaseComputePollster.inspector', + self.inspector) + self.useFixture(patch_inspector) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_cpu.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_cpu.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_cpu.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_cpu.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,108 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import mock + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import cpu +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.tests.unit.compute.pollsters import base + + +class TestCPUPollster(base.TestPollsterBase): + + def setUp(self): + super(TestCPUPollster, self).setUp() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + next_value = iter(( + virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), + virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), + # cpu_time resets on instance restart + virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), + )) + + def inspect_cpus(name): + return next(next_value) + + self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) + + mgr = manager.AgentManager() + pollster = cpu.CPUPollster() + + def _verify_cpu_metering(expected_time): + cache = {} + samples = list(pollster.get_samples(mgr, cache, [self.instance])) + self.assertEqual(1, len(samples)) + self.assertEqual(set(['cpu']), set([s.name for s in samples])) + self.assertEqual(expected_time, samples[0].volume) + self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) + # ensure elapsed time between polling cycles is non-zero + time.sleep(0.001) + + _verify_cpu_metering(1 * (10 ** 6)) + _verify_cpu_metering(3 * (10 ** 6)) + _verify_cpu_metering(2 * (10 ** 6)) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples_no_caching(self): + cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) + self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) + + mgr = manager.AgentManager() + pollster = cpu.CPUPollster() + + cache = {} + samples = list(pollster.get_samples(mgr, cache, [self.instance])) + self.assertEqual(1, len(samples)) + self.assertEqual(10 ** 6, samples[0].volume) + self.assertEqual(0, len(cache)) + + +class TestCPUUtilPollster(base.TestPollsterBase): + + def setUp(self): + super(TestCPUUtilPollster, self).setUp() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + next_value = iter(( + virt_inspector.CPUUtilStats(util=40), + virt_inspector.CPUUtilStats(util=60), + )) + + def inspect_cpu_util(name, duration): + return next(next_value) + + self.inspector.inspect_cpu_util = (mock. + Mock(side_effect=inspect_cpu_util)) + + mgr = manager.AgentManager() + pollster = cpu.CPUUtilPollster() + + def _verify_cpu_util_metering(expected_util): + cache = {} + samples = list(pollster.get_samples(mgr, cache, [self.instance])) + self.assertEqual(1, len(samples)) + self.assertEqual(set(['cpu_util']), + set([s.name for s in samples])) + self.assertEqual(expected_util, samples[0].volume) + + _verify_cpu_util_metering(40) + _verify_cpu_util_metering(60) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_diskio.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_diskio.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_diskio.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_diskio.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,361 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# Copyright 2014 Cisco Systems, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock +from oslotest import mockpatch + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import disk +from ceilometer.compute.virt import inspector as virt_inspector +import ceilometer.tests.base as base + + +class TestBaseDiskIO(base.BaseTestCase): + + TYPE = 'cumulative' + + def setUp(self): + super(TestBaseDiskIO, self).setUp() + + self.inspector = mock.Mock() + self.instance = self._get_fake_instances() + patch_virt = mockpatch.Patch( + 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', + new=mock.Mock(return_value=self.inspector)) + self.useFixture(patch_virt) + + # as we're having lazy hypervisor inspector singleton object in the + # base compute pollster class, that leads to the fact that we + # need to mock all this class property to avoid context sharing between + # the tests + patch_inspector = mockpatch.Patch( + 'ceilometer.compute.pollsters.BaseComputePollster.inspector', + self.inspector) + self.useFixture(patch_inspector) + + @staticmethod + def _get_fake_instances(): + instances = [] + for i in [1, 2]: + instance = mock.MagicMock() + instance.name = 'instance-%s' % i + setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', + instance.name) + instance.id = i + instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, + 'ram': 512, 'disk': 20, 'ephemeral': 0} + instance.status = 'active' + instances.append(instance) + return instances + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples(self, factory, name, expected_count=2): + pollster = factory() + + mgr = manager.AgentManager() + cache = {} + samples = list(pollster.get_samples(mgr, cache, self.instance)) + self.assertIsNotEmpty(samples) + cache_key = getattr(pollster, self.CACHE_KEY) + self.assertIn(cache_key, cache) + for instance in self.instance: + self.assertIn(instance.id, cache[cache_key]) + self.assertEqual(set([name]), set([s.name for s in samples])) + + match = [s for s in samples if s.name == name] + self.assertEqual(len(match), expected_count, + 'missing counter %s' % name) + return match + + def _check_aggregate_samples(self, factory, name, + expected_volume, + expected_device=None): + match = self._check_get_samples(factory, name) + self.assertEqual(expected_volume, match[0].volume) + self.assertEqual(self.TYPE, match[0].type) + if expected_device is not None: + self.assertEqual(set(expected_device), + set(match[0].resource_metadata.get('device'))) + instances = [i.id for i in self.instance] + for m in match: + self.assertIn(m.resource_id, instances) + + def _check_per_device_samples(self, factory, name, + expected_volume, + expected_device=None): + match = self._check_get_samples(factory, name, expected_count=4) + match_dict = {} + for m in match: + match_dict[m.resource_id] = m + for instance in self.instance: + key = "%s-%s" % (instance.id, expected_device) + self.assertEqual(expected_volume, + match_dict[key].volume) + self.assertEqual(self.TYPE, match_dict[key].type) + + self.assertEqual(key, match_dict[key].resource_id) + + +class TestDiskPollsters(TestBaseDiskIO): + + DISKS = [ + (virt_inspector.Disk(device='vda1'), + virt_inspector.DiskStats(read_bytes=1, read_requests=2, + write_bytes=3, write_requests=4, + errors=-1)), + (virt_inspector.Disk(device='vda2'), + virt_inspector.DiskStats(read_bytes=2, read_requests=3, + write_bytes=5, write_requests=7, + errors=-1)), + ] + CACHE_KEY = "CACHE_KEY_DISK" + + def setUp(self): + super(TestDiskPollsters, self).setUp() + self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) + + def test_disk_read_requests(self): + self._check_aggregate_samples(disk.ReadRequestsPollster, + 'disk.read.requests', 5, + expected_device=['vda1', 'vda2']) + + def test_disk_read_bytes(self): + self._check_aggregate_samples(disk.ReadBytesPollster, + 'disk.read.bytes', 3, + expected_device=['vda1', 'vda2']) + + def test_disk_write_requests(self): + self._check_aggregate_samples(disk.WriteRequestsPollster, + 'disk.write.requests', 11, + expected_device=['vda1', 'vda2']) + + def test_disk_write_bytes(self): + self._check_aggregate_samples(disk.WriteBytesPollster, + 'disk.write.bytes', 8, + expected_device=['vda1', 'vda2']) + + def test_per_disk_read_requests(self): + self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, + 'disk.device.read.requests', 2, + 'vda1') + self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, + 'disk.device.read.requests', 3, + 'vda2') + + def test_per_disk_write_requests(self): + self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, + 'disk.device.write.requests', 4, + 'vda1') + self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, + 'disk.device.write.requests', 7, + 'vda2') + + def test_per_disk_read_bytes(self): + self._check_per_device_samples(disk.PerDeviceReadBytesPollster, + 'disk.device.read.bytes', 1, + 'vda1') + self._check_per_device_samples(disk.PerDeviceReadBytesPollster, + 'disk.device.read.bytes', 2, + 'vda2') + + def test_per_disk_write_bytes(self): + self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, + 'disk.device.write.bytes', 3, + 'vda1') + self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, + 'disk.device.write.bytes', 5, + 'vda2') + + +class TestDiskRatePollsters(TestBaseDiskIO): + + DISKS = [ + (virt_inspector.Disk(device='disk1'), + virt_inspector.DiskRateStats(1024, 300, 5120, 700)), + + (virt_inspector.Disk(device='disk2'), + virt_inspector.DiskRateStats(2048, 400, 6144, 800)) + ] + TYPE = 'gauge' + CACHE_KEY = "CACHE_KEY_DISK_RATE" + + def setUp(self): + super(TestDiskRatePollsters, self).setUp() + self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) + + def test_disk_read_bytes_rate(self): + self._check_aggregate_samples(disk.ReadBytesRatePollster, + 'disk.read.bytes.rate', 3072, + expected_device=['disk1', 'disk2']) + + def test_disk_read_requests_rate(self): + self._check_aggregate_samples(disk.ReadRequestsRatePollster, + 'disk.read.requests.rate', 700, + expected_device=['disk1', 'disk2']) + + def test_disk_write_bytes_rate(self): + self._check_aggregate_samples(disk.WriteBytesRatePollster, + 'disk.write.bytes.rate', 11264, + expected_device=['disk1', 'disk2']) + + def test_disk_write_requests_rate(self): + self._check_aggregate_samples(disk.WriteRequestsRatePollster, + 'disk.write.requests.rate', 1500, + expected_device=['disk1', 'disk2']) + + def test_per_disk_read_bytes_rate(self): + self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, + 'disk.device.read.bytes.rate', + 1024, 'disk1') + self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, + 'disk.device.read.bytes.rate', + 2048, 'disk2') + + def test_per_disk_read_requests_rate(self): + self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, + 'disk.device.read.requests.rate', + 300, 'disk1') + self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, + 'disk.device.read.requests.rate', + 400, 'disk2') + + def test_per_disk_write_bytes_rate(self): + self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, + 'disk.device.write.bytes.rate', + 5120, 'disk1') + self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, + 'disk.device.write.bytes.rate', 6144, + 'disk2') + + def test_per_disk_write_requests_rate(self): + self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, + 'disk.device.write.requests.rate', 700, + 'disk1') + self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, + 'disk.device.write.requests.rate', 800, + 'disk2') + + +class TestDiskLatencyPollsters(TestBaseDiskIO): + + DISKS = [ + (virt_inspector.Disk(device='disk1'), + virt_inspector.DiskLatencyStats(1000)), + + (virt_inspector.Disk(device='disk2'), + virt_inspector.DiskLatencyStats(2000)) + ] + TYPE = 'gauge' + CACHE_KEY = "CACHE_KEY_DISK_LATENCY" + + def setUp(self): + super(TestDiskLatencyPollsters, self).setUp() + self.inspector.inspect_disk_latency = mock.Mock( + return_value=self.DISKS) + + def test_disk_latency(self): + self._check_aggregate_samples(disk.DiskLatencyPollster, + 'disk.latency', 3) + + def test_per_device_latency(self): + self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, + 'disk.device.latency', 1, 'disk1') + + self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, + 'disk.device.latency', 2, 'disk2') + + +class TestDiskIOPSPollsters(TestBaseDiskIO): + + DISKS = [ + (virt_inspector.Disk(device='disk1'), + virt_inspector.DiskIOPSStats(10)), + + (virt_inspector.Disk(device='disk2'), + virt_inspector.DiskIOPSStats(20)), + ] + TYPE = 'gauge' + CACHE_KEY = "CACHE_KEY_DISK_IOPS" + + def setUp(self): + super(TestDiskIOPSPollsters, self).setUp() + self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) + + def test_disk_iops(self): + self._check_aggregate_samples(disk.DiskIOPSPollster, + 'disk.iops', 30) + + def test_per_device_iops(self): + self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, + 'disk.device.iops', 10, 'disk1') + + self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, + 'disk.device.iops', 20, 'disk2') + + +class TestDiskInfoPollsters(TestBaseDiskIO): + + DISKS = [ + (virt_inspector.Disk(device='vda1'), + virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), + (virt_inspector.Disk(device='vda2'), + virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), + ] + TYPE = 'gauge' + CACHE_KEY = "CACHE_KEY_DISK_INFO" + + def setUp(self): + super(TestDiskInfoPollsters, self).setUp() + self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) + + def test_disk_capacity(self): + self._check_aggregate_samples(disk.CapacityPollster, + 'disk.capacity', 7, + expected_device=['vda1', 'vda2']) + + def test_disk_allocation(self): + self._check_aggregate_samples(disk.AllocationPollster, + 'disk.allocation', 5, + expected_device=['vda1', 'vda2']) + + def test_disk_physical(self): + self._check_aggregate_samples(disk.PhysicalPollster, + 'disk.usage', 3, + expected_device=['vda1', 'vda2']) + + def test_per_disk_capacity(self): + self._check_per_device_samples(disk.PerDeviceCapacityPollster, + 'disk.device.capacity', 3, + 'vda1') + self._check_per_device_samples(disk.PerDeviceCapacityPollster, + 'disk.device.capacity', 4, + 'vda2') + + def test_per_disk_allocation(self): + self._check_per_device_samples(disk.PerDeviceAllocationPollster, + 'disk.device.allocation', 2, + 'vda1') + self._check_per_device_samples(disk.PerDeviceAllocationPollster, + 'disk.device.allocation', 3, + 'vda2') + + def test_per_disk_physical(self): + self._check_per_device_samples(disk.PerDevicePhysicalPollster, + 'disk.device.usage', 1, + 'vda1') + self._check_per_device_samples(disk.PerDevicePhysicalPollster, + 'disk.device.usage', 2, + 'vda2') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_instance.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_instance.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_instance.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_instance.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,78 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import fixture as fixture_config + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import instance as pollsters_instance +from ceilometer.tests.unit.compute.pollsters import base + + +class TestInstancePollster(base.TestPollsterBase): + + def setUp(self): + super(TestInstancePollster, self).setUp() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples_instance(self): + mgr = manager.AgentManager() + pollster = pollsters_instance.InstancePollster() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(1, len(samples)) + self.assertEqual('instance', samples[0].name) + self.assertEqual(1, samples[0].resource_metadata['vcpus']) + self.assertEqual(512, samples[0].resource_metadata['memory_mb']) + self.assertEqual(20, samples[0].resource_metadata['disk_gb']) + self.assertEqual(20, samples[0].resource_metadata['root_gb']) + self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) + self.assertEqual('active', samples[0].resource_metadata['status']) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_reserved_metadata_with_keys(self): + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override('reserved_metadata_keys', ['fqdn']) + + mgr = manager.AgentManager() + pollster = pollsters_instance.InstancePollster() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual({'fqdn': 'vm_fqdn', + 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, + samples[0].resource_metadata['user_metadata']) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_reserved_metadata_with_namespace(self): + mgr = manager.AgentManager() + pollster = pollsters_instance.InstancePollster() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, + samples[0].resource_metadata['user_metadata']) + + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override('reserved_metadata_namespace', []) + mgr = manager.AgentManager() + pollster = pollsters_instance.InstancePollster() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertNotIn('user_metadata', samples[0].resource_metadata) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_flavor_name_as_metadata_instance_type(self): + mgr = manager.AgentManager() + pollster = pollsters_instance.InstancePollster() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(1, len(samples)) + self.assertEqual('m1.small', + samples[0].resource_metadata['instance_type']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,113 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for the compute pollsters. +""" + +import mock +from oslotest import base +import six + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import util + + +class FauxInstance(object): + + def __init__(self, **kwds): + for name, value in kwds.items(): + setattr(self, name, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default): + try: + return getattr(self, key) + except AttributeError: + return default + + +class TestLocationMetadata(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + self.manager = manager.AgentManager() + super(TestLocationMetadata, self).setUp() + + # Mimics an instance returned from nova api call + self.INSTANCE_PROPERTIES = {'name': 'display name', + 'OS-EXT-SRV-ATTR:instance_name': + 'instance-000001', + 'OS-EXT-AZ:availability_zone': + 'foo-zone', + 'reservation_id': 'reservation id', + 'architecture': 'x86_64', + 'kernel_id': 'kernel id', + 'os_type': 'linux', + 'ramdisk_id': 'ramdisk id', + 'status': 'active', + 'ephemeral_gb': 0, + 'root_gb': 20, + 'disk_gb': 20, + 'image': {'id': 1, + 'links': [{"rel": "bookmark", + 'href': 2}]}, + 'hostId': '1234-5678', + 'flavor': {'name': 'm1.tiny', + 'id': 1, + 'disk': 20, + 'ram': 512, + 'vcpus': 2, + 'ephemeral': 0}, + 'metadata': {'metering.autoscale.group': + 'X' * 512, + 'metering.ephemeral_gb': 42}} + + self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) + + def test_metadata(self): + md = util._get_metadata_from_object(self.instance) + for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): + if prop not in ("metadata"): + # Special cases + if prop == 'name': + prop = 'display_name' + elif prop == 'hostId': + prop = "host" + elif prop == 'OS-EXT-SRV-ATTR:instance_name': + prop = 'name' + self.assertEqual(value, md[prop]) + user_metadata = md['user_metadata'] + expected = self.INSTANCE_PROPERTIES[ + 'metadata']['metering.autoscale.group'][:256] + self.assertEqual(expected, user_metadata['autoscale_group']) + self.assertEqual(1, len(user_metadata)) + + def test_metadata_empty_image(self): + self.INSTANCE_PROPERTIES['image'] = None + self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) + md = util._get_metadata_from_object(self.instance) + self.assertIsNone(md['image']) + self.assertIsNone(md['image_ref']) + self.assertIsNone(md['image_ref_url']) + + def test_metadata_image_through_conductor(self): + # There should be no links here, should default to None + self.INSTANCE_PROPERTIES['image'] = {'id': 1} + self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) + md = util._get_metadata_from_object(self.instance) + self.assertEqual(1, md['image_ref']) + self.assertIsNone(md['image_ref_url']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_memory.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_memory.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_memory.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_memory.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,114 @@ +# Copyright (c) 2014 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import memory +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.tests.unit.compute.pollsters import base + + +class TestMemoryPollster(base.TestPollsterBase): + + def setUp(self): + super(TestMemoryPollster, self).setUp() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + next_value = iter(( + virt_inspector.MemoryUsageStats(usage=1.0), + virt_inspector.MemoryUsageStats(usage=2.0), + virt_inspector.NoDataException(), + virt_inspector.InstanceShutOffException(), + )) + + def inspect_memory_usage(instance, duration): + value = next(next_value) + if isinstance(value, virt_inspector.MemoryUsageStats): + return value + else: + raise value + + self.inspector.inspect_memory_usage = mock.Mock( + side_effect=inspect_memory_usage) + + mgr = manager.AgentManager() + pollster = memory.MemoryUsagePollster() + + @mock.patch('ceilometer.compute.pollsters.memory.LOG') + def _verify_memory_metering(expected_count, expected_memory_mb, mylog): + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(expected_count, len(samples)) + if expected_count > 0: + self.assertEqual(set(['memory.usage']), + set([s.name for s in samples])) + self.assertEqual(expected_memory_mb, samples[0].volume) + else: + self.assertEqual(1, mylog.warn.call_count) + self.assertEqual(0, mylog.exception.call_count) + + _verify_memory_metering(1, 1.0) + _verify_memory_metering(1, 2.0) + _verify_memory_metering(0, 0) + _verify_memory_metering(0, 0) + + +class TestResidentMemoryPollster(base.TestPollsterBase): + + def setUp(self): + super(TestResidentMemoryPollster, self).setUp() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + next_value = iter(( + virt_inspector.MemoryResidentStats(resident=1.0), + virt_inspector.MemoryResidentStats(resident=2.0), + virt_inspector.NoDataException(), + virt_inspector.InstanceShutOffException(), + )) + + def inspect_memory_resident(instance, duration): + value = next(next_value) + if isinstance(value, virt_inspector.MemoryResidentStats): + return value + else: + raise value + + self.inspector.inspect_memory_resident = mock.Mock( + side_effect=inspect_memory_resident) + + mgr = manager.AgentManager() + pollster = memory.MemoryResidentPollster() + + @mock.patch('ceilometer.compute.pollsters.memory.LOG') + def _verify_resident_memory_metering(expected_count, + expected_resident_memory_mb, + mylog): + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(expected_count, len(samples)) + if expected_count > 0: + self.assertEqual(set(['memory.resident']), + set([s.name for s in samples])) + self.assertEqual(expected_resident_memory_mb, + samples[0].volume) + else: + self.assertEqual(1, mylog.warn.call_count) + self.assertEqual(0, mylog.exception.call_count) + + _verify_resident_memory_metering(1, 1.0) + _verify_resident_memory_metering(1, 2.0) + _verify_resident_memory_metering(0, 0) + _verify_resident_memory_metering(0, 0) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_net.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_net.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/pollsters/test_net.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/pollsters/test_net.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,317 @@ +# +# Copyright 2012 eNovance +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from ceilometer.agent import manager +from ceilometer.compute.pollsters import net +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer import sample +from ceilometer.tests.unit.compute.pollsters import base + + +class FauxInstance(object): + + def __init__(self, **kwargs): + for name, value in kwargs.items(): + setattr(self, name, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default): + return getattr(self, key, default) + + +class TestNetPollster(base.TestPollsterBase): + + def setUp(self): + super(TestNetPollster, self).setUp() + self.vnic0 = virt_inspector.Interface( + name='vnet0', + fref='fa163e71ec6e', + mac='fa:16:3e:71:ec:6d', + parameters=dict(ip='10.0.0.2', + projmask='255.255.255.0', + projnet='proj1', + dhcp_server='10.0.0.1')) + stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, + tx_bytes=3, tx_packets=4) + self.vnic1 = virt_inspector.Interface( + name='vnet1', + fref='fa163e71ec6f', + mac='fa:16:3e:71:ec:6e', + parameters=dict(ip='192.168.0.3', + projmask='255.255.255.0', + projnet='proj2', + dhcp_server='10.0.0.2')) + stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, + tx_bytes=7, tx_packets=8) + self.vnic2 = virt_inspector.Interface( + name='vnet2', + fref=None, + mac='fa:18:4e:72:fc:7e', + parameters=dict(ip='192.168.0.4', + projmask='255.255.255.0', + projnet='proj3', + dhcp_server='10.0.0.3')) + stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, + tx_bytes=11, tx_packets=12) + + vnics = [ + (self.vnic0, stats0), + (self.vnic1, stats1), + (self.vnic2, stats2), + ] + self.inspector.inspect_vnics = mock.Mock(return_value=vnics) + + self.INSTANCE_PROPERTIES = {'name': 'display name', + 'OS-EXT-SRV-ATTR:instance_name': + 'instance-000001', + 'OS-EXT-AZ:availability_zone': 'foo-zone', + 'reservation_id': 'reservation id', + 'id': 'instance id', + 'user_id': 'user id', + 'tenant_id': 'tenant id', + 'architecture': 'x86_64', + 'kernel_id': 'kernel id', + 'os_type': 'linux', + 'ramdisk_id': 'ramdisk id', + 'status': 'active', + 'ephemeral_gb': 0, + 'root_gb': 20, + 'disk_gb': 20, + 'image': {'id': 1, + 'links': [{"rel": "bookmark", + 'href': 2}]}, + 'hostId': '1234-5678', + 'flavor': {'id': 1, + 'disk': 20, + 'ram': 512, + 'vcpus': 2, + 'ephemeral': 0}, + 'metadata': {'metering.autoscale.group': + 'X' * 512, + 'metering.ephemeral_gb': 42}} + + self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples(self, factory, expected): + mgr = manager.AgentManager() + pollster = factory() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(3, len(samples)) # one for each nic + self.assertEqual(set([samples[0].name]), + set([s.name for s in samples])) + + def _verify_vnic_metering(ip, expected_volume, expected_rid): + match = [s for s in samples + if s.resource_metadata['parameters']['ip'] == ip + ] + self.assertEqual(len(match), 1, 'missing ip %s' % ip) + self.assertEqual(expected_volume, match[0].volume) + self.assertEqual('cumulative', match[0].type) + self.assertEqual(expected_rid, match[0].resource_id) + + for ip, volume, rid in expected: + _verify_vnic_metering(ip, volume, rid) + + def test_incoming_bytes(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.IncomingBytesPollster, + [('10.0.0.2', 1, self.vnic0.fref), + ('192.168.0.3', 5, self.vnic1.fref), + ('192.168.0.4', 9, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) + + def test_outgoing_bytes(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.OutgoingBytesPollster, + [('10.0.0.2', 3, self.vnic0.fref), + ('192.168.0.3', 7, self.vnic1.fref), + ('192.168.0.4', 11, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) + + def test_incoming_packets(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.IncomingPacketsPollster, + [('10.0.0.2', 2, self.vnic0.fref), + ('192.168.0.3', 6, self.vnic1.fref), + ('192.168.0.4', 10, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) + + def test_outgoing_packets(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.OutgoingPacketsPollster, + [('10.0.0.2', 4, self.vnic0.fref), + ('192.168.0.3', 8, self.vnic1.fref), + ('192.168.0.4', 12, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_metadata(self): + factory = net.OutgoingBytesPollster + pollster = factory() + sm = pollster.make_vnic_sample(self.faux_instance, + name='network.outgoing.bytes', + type=sample.TYPE_CUMULATIVE, + unit='B', + volume=100, + vnic_data=self.vnic0) + + user_metadata = sm.resource_metadata['user_metadata'] + expected = self.INSTANCE_PROPERTIES[ + 'metadata']['metering.autoscale.group'][:256] + self.assertEqual(expected, user_metadata['autoscale_group']) + self.assertEqual(2, len(user_metadata)) + + +class TestNetPollsterCache(base.TestPollsterBase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples_cache(self, factory): + vnic0 = virt_inspector.Interface( + name='vnet0', + fref='fa163e71ec6e', + mac='fa:16:3e:71:ec:6d', + parameters=dict(ip='10.0.0.2', + projmask='255.255.255.0', + projnet='proj1', + dhcp_server='10.0.0.1')) + stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, + tx_bytes=3, tx_packets=4) + vnics = [(vnic0, stats0)] + + mgr = manager.AgentManager() + pollster = factory() + cache = { + pollster.CACHE_KEY_VNIC: { + self.instance.id: vnics, + }, + } + samples = list(pollster.get_samples(mgr, cache, [self.instance])) + self.assertEqual(1, len(samples)) + + def test_incoming_bytes(self): + self._check_get_samples_cache(net.IncomingBytesPollster) + + def test_outgoing_bytes(self): + self._check_get_samples_cache(net.OutgoingBytesPollster) + + def test_incoming_packets(self): + self._check_get_samples_cache(net.IncomingPacketsPollster) + + def test_outgoing_packets(self): + self._check_get_samples_cache(net.OutgoingPacketsPollster) + + +class TestNetRatesPollster(base.TestPollsterBase): + + def setUp(self): + super(TestNetRatesPollster, self).setUp() + self.vnic0 = virt_inspector.Interface( + name='vnet0', + fref='fa163e71ec6e', + mac='fa:16:3e:71:ec:6d', + parameters=dict(ip='10.0.0.2', + projmask='255.255.255.0', + projnet='proj1', + dhcp_server='10.0.0.1')) + stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, + tx_bytes_rate=2) + self.vnic1 = virt_inspector.Interface( + name='vnet1', + fref='fa163e71ec6f', + mac='fa:16:3e:71:ec:6e', + parameters=dict(ip='192.168.0.3', + projmask='255.255.255.0', + projnet='proj2', + dhcp_server='10.0.0.2')) + stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, + tx_bytes_rate=4) + self.vnic2 = virt_inspector.Interface( + name='vnet2', + fref=None, + mac='fa:18:4e:72:fc:7e', + parameters=dict(ip='192.168.0.4', + projmask='255.255.255.0', + projnet='proj3', + dhcp_server='10.0.0.3')) + stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, + tx_bytes_rate=6) + + vnics = [ + (self.vnic0, stats0), + (self.vnic1, stats1), + (self.vnic2, stats2), + ] + self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples(self, factory, expected): + mgr = manager.AgentManager() + pollster = factory() + samples = list(pollster.get_samples(mgr, {}, [self.instance])) + self.assertEqual(3, len(samples)) # one for each nic + self.assertEqual(set([samples[0].name]), + set([s.name for s in samples])) + + def _verify_vnic_metering(ip, expected_volume, expected_rid): + match = [s for s in samples + if s.resource_metadata['parameters']['ip'] == ip + ] + self.assertEqual(1, len(match), 'missing ip %s' % ip) + self.assertEqual(expected_volume, match[0].volume) + self.assertEqual('gauge', match[0].type) + self.assertEqual(expected_rid, match[0].resource_id) + + for ip, volume, rid in expected: + _verify_vnic_metering(ip, volume, rid) + + def test_incoming_bytes_rate(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.IncomingBytesRatePollster, + [('10.0.0.2', 1, self.vnic0.fref), + ('192.168.0.3', 3, self.vnic1.fref), + ('192.168.0.4', 5, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) + + def test_outgoing_bytes_rate(self): + instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) + self._check_get_samples( + net.OutgoingBytesRatePollster, + [('10.0.0.2', 2, self.vnic0.fref), + ('192.168.0.3', 4, self.vnic1.fref), + ('192.168.0.4', 6, + "%s-%s" % (instance_name_id, self.vnic2.name)), + ], + ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,156 @@ +# Copyright 2013 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Hyper-V inspector. +""" + +import mock +from oslo_utils import units +from oslotest import base + +from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector + + +class TestHyperVInspection(base.BaseTestCase): + + def setUp(self): + self._inspector = hyperv_inspector.HyperVInspector() + self._inspector._utils = mock.MagicMock() + + super(TestHyperVInspection, self).setUp() + + def test_inspect_cpus(self): + fake_instance_name = 'fake_instance_name' + fake_host_cpu_clock = 1000 + fake_host_cpu_count = 2 + fake_cpu_clock_used = 2000 + fake_cpu_count = 3000 + fake_uptime = 4000 + + fake_cpu_percent_used = (fake_cpu_clock_used / + float(fake_host_cpu_clock * fake_cpu_count)) + fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * + 1000) + + self._inspector._utils.get_host_cpu_info.return_value = ( + fake_host_cpu_clock, fake_host_cpu_count) + + self._inspector._utils.get_cpu_metrics.return_value = ( + fake_cpu_clock_used, fake_cpu_count, fake_uptime) + + cpu_stats = self._inspector.inspect_cpus(fake_instance_name) + + self.assertEqual(fake_cpu_count, cpu_stats.number) + self.assertEqual(fake_cpu_time, cpu_stats.time) + + @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2.' + 'get_memory_metrics') + def test_inspect_memory_usage(self, mock_get_memory_metrics): + fake_usage = self._inspector._utils.get_memory_metrics.return_value + usage = self._inspector.inspect_memory_usage( + mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) + self.assertEqual(fake_usage, usage.usage) + + def test_inspect_vnics(self): + fake_instance_name = 'fake_instance_name' + fake_rx_mb = 1000 + fake_tx_mb = 2000 + fake_element_name = 'fake_element_name' + fake_address = 'fake_address' + + self._inspector._utils.get_vnic_metrics.return_value = [{ + 'rx_mb': fake_rx_mb, + 'tx_mb': fake_tx_mb, + 'element_name': fake_element_name, + 'address': fake_address}] + + inspected_vnics = list(self._inspector.inspect_vnics( + fake_instance_name)) + + self.assertEqual(1, len(inspected_vnics)) + self.assertEqual(2, len(inspected_vnics[0])) + + inspected_vnic, inspected_stats = inspected_vnics[0] + + self.assertEqual(fake_element_name, inspected_vnic.name) + self.assertEqual(fake_address, inspected_vnic.mac) + + self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) + self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) + + def test_inspect_disks(self): + fake_instance_name = 'fake_instance_name' + fake_read_mb = 1000 + fake_write_mb = 2000 + fake_instance_id = "fake_fake_instance_id" + fake_host_resource = "fake_host_resource" + + self._inspector._utils.get_disk_metrics.return_value = [{ + 'read_mb': fake_read_mb, + 'write_mb': fake_write_mb, + 'instance_id': fake_instance_id, + 'host_resource': fake_host_resource}] + + inspected_disks = list(self._inspector.inspect_disks( + fake_instance_name)) + + self.assertEqual(1, len(inspected_disks)) + self.assertEqual(2, len(inspected_disks[0])) + + inspected_disk, inspected_stats = inspected_disks[0] + + self.assertEqual(fake_instance_id, inspected_disk.device) + + self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) + self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) + + def test_inspect_disk_latency(self): + fake_instance_name = mock.sentinel.INSTANCE_NAME + fake_disk_latency = mock.sentinel.DISK_LATENCY + fake_instance_id = mock.sentinel.INSTANCE_ID + + self._inspector._utils.get_disk_latency_metrics.return_value = [{ + 'disk_latency': fake_disk_latency, + 'instance_id': fake_instance_id}] + + inspected_disks = list(self._inspector.inspect_disk_latency( + fake_instance_name)) + + self.assertEqual(1, len(inspected_disks)) + self.assertEqual(2, len(inspected_disks[0])) + + inspected_disk, inspected_stats = inspected_disks[0] + + self.assertEqual(fake_instance_id, inspected_disk.device) + self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) + + def test_inspect_disk_iops_count(self): + fake_instance_name = mock.sentinel.INSTANCE_NAME + fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT + fake_instance_id = mock.sentinel.INSTANCE_ID + + self._inspector._utils.get_disk_iops_count.return_value = [{ + 'iops_count': fake_disk_iops_count, + 'instance_id': fake_instance_id}] + + inspected_disks = list(self._inspector.inspect_disk_iops( + fake_instance_name)) + + self.assertEqual(1, len(inspected_disks)) + self.assertEqual(2, len(inspected_disks[0])) + + inspected_disk, inspected_stats = inspected_disks[0] + + self.assertEqual(fake_instance_id, inspected_disk.device) + self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/hyperv/test_utilsv2.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/hyperv/test_utilsv2.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/hyperv/test_utilsv2.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/hyperv/test_utilsv2.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,283 @@ +# Copyright 2013 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Hyper-V utilsv2. +""" + +import mock +from oslotest import base + +from ceilometer.compute.virt.hyperv import utilsv2 as utilsv2 +from ceilometer.compute.virt import inspector + + +class TestUtilsV2(base.BaseTestCase): + + _FAKE_RETURN_CLASS = 'fake_return_class' + + def setUp(self): + self._utils = utilsv2.UtilsV2() + self._utils._conn = mock.MagicMock() + self._utils._conn_cimv2 = mock.MagicMock() + + super(TestUtilsV2, self).setUp() + + @mock.patch.object(utilsv2.UtilsV2, '_get_metrics') + @mock.patch.object(utilsv2.UtilsV2, '_get_metric_def') + @mock.patch.object(utilsv2.UtilsV2, '_lookup_vm') + def test_get_memory_metrics(self, mock_lookup_vm, mock_get_metric_def, + mock_get_metrics): + mock_vm = mock_lookup_vm.return_value + + mock_metric_def = mock_get_metric_def.return_value + + metric_memory = mock.MagicMock() + metric_memory.MetricValue = 3 + mock_get_metrics.return_value = [metric_memory] + + response = self._utils.get_memory_metrics(mock.sentinel._FAKE_INSTANCE) + + mock_lookup_vm.assert_called_once_with(mock.sentinel._FAKE_INSTANCE) + mock_get_metric_def.assert_called_once_with( + self._utils._MEMORY_METRIC_NAME) + mock_get_metrics.assert_called_once_with(mock_vm, mock_metric_def) + + self.assertEqual(3, response) + + def test_get_host_cpu_info(self): + _fake_clock_speed = 1000 + _fake_cpu_count = 2 + + mock_cpu = mock.MagicMock() + mock_cpu.MaxClockSpeed = _fake_clock_speed + + self._utils._conn_cimv2.Win32_Processor.return_value = [mock_cpu, + mock_cpu] + cpu_info = self._utils.get_host_cpu_info() + + self.assertEqual(_fake_clock_speed, cpu_info[0]) + self.assertEqual(_fake_cpu_count, cpu_info[1]) + + def test_get_all_vms(self): + fake_vm_element_name = "fake_vm_element_name" + fake_vm_name = "fake_vm_name" + + mock_vm = mock.MagicMock() + mock_vm.ElementName = fake_vm_element_name + mock_vm.Name = fake_vm_name + self._utils._conn.Msvm_ComputerSystem.return_value = [mock_vm] + + vms = self._utils.get_all_vms() + + self.assertEqual((fake_vm_element_name, fake_vm_name), vms[0]) + + def test_get_cpu_metrics(self): + fake_vm_element_name = "fake_vm_element_name" + fake_cpu_count = 2 + fake_uptime = 1000 + fake_cpu_metric_val = 2000 + + self._utils._lookup_vm = mock.MagicMock() + self._utils._lookup_vm().OnTimeInMilliseconds = fake_uptime + + self._utils._get_vm_resources = mock.MagicMock() + mock_res = self._utils._get_vm_resources()[0] + mock_res.VirtualQuantity = fake_cpu_count + + self._utils._get_metrics = mock.MagicMock() + self._utils._get_metrics()[0].MetricValue = fake_cpu_metric_val + + cpu_metrics = self._utils.get_cpu_metrics(fake_vm_element_name) + + self.assertEqual(3, len(cpu_metrics)) + self.assertEqual(fake_cpu_metric_val, cpu_metrics[0]) + self.assertEqual(fake_cpu_count, cpu_metrics[1]) + self.assertEqual(fake_uptime, cpu_metrics[2]) + + @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2' + '._sum_metric_values_by_defs') + @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2' + '._get_metric_value_instances') + def test_get_vnic_metrics(self, mock_get_instances, mock_get_by_defs): + fake_vm_element_name = "fake_vm_element_name" + fake_vnic_element_name = "fake_vnic_name" + fake_vnic_address = "fake_vnic_address" + fake_vnic_path = "fake_vnic_path" + fake_rx_mb = 1000 + fake_tx_mb = 2000 + + self._utils._lookup_vm = mock.MagicMock() + self._utils._get_vm_resources = mock.MagicMock() + + mock_port = mock.MagicMock() + mock_port.Parent = fake_vnic_path + + mock_vnic = mock.MagicMock() + mock_vnic.path_.return_value = fake_vnic_path + mock_vnic.ElementName = fake_vnic_element_name + mock_vnic.Address = fake_vnic_address + + self._utils._get_vm_resources.side_effect = [[mock_port], [mock_vnic]] + + self._utils._get_metric_def = mock.MagicMock() + + mock_get_by_defs.return_value = [fake_rx_mb, fake_tx_mb] + + vnic_metrics = list(self._utils.get_vnic_metrics(fake_vm_element_name)) + + self.assertEqual(1, len(vnic_metrics)) + self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb']) + self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb']) + self.assertEqual(fake_vnic_element_name, + vnic_metrics[0]['element_name']) + self.assertEqual(fake_vnic_address, vnic_metrics[0]['address']) + + def test_get_disk_metrics(self): + fake_vm_element_name = "fake_vm_element_name" + fake_host_resource = "fake_host_resource" + fake_instance_id = "fake_instance_id" + fake_read_mb = 1000 + fake_write_mb = 2000 + + self._utils._lookup_vm = mock.MagicMock() + + mock_disk = mock.MagicMock() + mock_disk.HostResource = [fake_host_resource] + mock_disk.InstanceID = fake_instance_id + self._utils._get_vm_resources = mock.MagicMock( + return_value=[mock_disk]) + + self._utils._get_metric_def = mock.MagicMock() + + self._utils._get_metric_values = mock.MagicMock() + self._utils._get_metric_values.return_value = [fake_read_mb, + fake_write_mb] + + disk_metrics = list(self._utils.get_disk_metrics(fake_vm_element_name)) + + self.assertEqual(1, len(disk_metrics)) + self.assertEqual(fake_read_mb, disk_metrics[0]['read_mb']) + self.assertEqual(fake_write_mb, disk_metrics[0]['write_mb']) + self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) + self.assertEqual(fake_host_resource, disk_metrics[0]['host_resource']) + + def test_get_disk_latency(self): + fake_vm_name = mock.sentinel.VM_NAME + fake_instance_id = mock.sentinel.FAKE_INSTANCE_ID + fake_latency = mock.sentinel.FAKE_LATENCY + + self._utils._lookup_vm = mock.MagicMock() + + mock_disk = mock.MagicMock() + mock_disk.InstanceID = fake_instance_id + self._utils._get_vm_resources = mock.MagicMock( + return_value=[mock_disk]) + + self._utils._get_metric_values = mock.MagicMock( + return_value=[fake_latency]) + + disk_metrics = list(self._utils.get_disk_latency_metrics(fake_vm_name)) + + self.assertEqual(1, len(disk_metrics)) + self.assertEqual(fake_latency, disk_metrics[0]['disk_latency']) + self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) + + def test_get_disk_iops_metrics(self): + fake_vm_name = mock.sentinel.VM_NAME + fake_instance_id = mock.sentinel.FAKE_INSTANCE_ID + fake_iops_count = mock.sentinel.FAKE_IOPS_COUNT + + self._utils._lookup_vm = mock.MagicMock() + + mock_disk = mock.MagicMock() + mock_disk.InstanceID = fake_instance_id + self._utils._get_vm_resources = mock.MagicMock( + return_value=[mock_disk]) + + self._utils._get_metric_values = mock.MagicMock( + return_value=[fake_iops_count]) + + disk_metrics = list(self._utils.get_disk_iops_count(fake_vm_name)) + + self.assertEqual(1, len(disk_metrics)) + self.assertEqual(fake_iops_count, disk_metrics[0]['iops_count']) + self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id']) + + def test_get_metric_value_instances(self): + mock_el1 = mock.MagicMock() + mock_associator = mock.MagicMock() + mock_el1.associators.return_value = [mock_associator] + + mock_el2 = mock.MagicMock() + mock_el2.associators.return_value = [] + + returned = self._utils._get_metric_value_instances( + [mock_el1, mock_el2], self._FAKE_RETURN_CLASS) + + self.assertEqual([mock_associator], returned) + + def test_lookup_vm(self): + fake_vm_element_name = "fake_vm_element_name" + fake_vm = "fake_vm" + self._utils._conn.Msvm_ComputerSystem.return_value = [fake_vm] + + vm = self._utils._lookup_vm(fake_vm_element_name) + + self.assertEqual(fake_vm, vm) + + def test_lookup_vm_not_found(self): + fake_vm_element_name = "fake_vm_element_name" + self._utils._conn.Msvm_ComputerSystem.return_value = [] + + self.assertRaises(inspector.InstanceNotFoundException, + self._utils._lookup_vm, fake_vm_element_name) + + def test_lookup_vm_duplicate_found(self): + fake_vm_element_name = "fake_vm_element_name" + fake_vm = "fake_vm" + self._utils._conn.Msvm_ComputerSystem.return_value = [fake_vm, fake_vm] + + self.assertRaises(utilsv2.HyperVException, + self._utils._lookup_vm, fake_vm_element_name) + + def test_get_metric_values(self): + fake_metric_def_id = "fake_metric_def_id" + fake_metric_value = "1000" + + mock_metric = mock.MagicMock() + mock_metric.MetricDefinitionId = fake_metric_def_id + mock_metric.MetricValue = fake_metric_value + + mock_element = mock.MagicMock() + mock_element.associators.return_value = [mock_metric] + + mock_metric_def = mock.MagicMock() + mock_metric_def.Id = fake_metric_def_id + + metric_values = self._utils._get_metric_values(mock_element, + [mock_metric_def]) + + self.assertEqual(1, len(metric_values)) + self.assertEqual(int(fake_metric_value), metric_values[0]) + + def test_get_vm_setting_data(self): + mock_vm_s = mock.MagicMock() + mock_vm_s.VirtualSystemType = self._utils._VIRTUAL_SYSTEM_TYPE_REALIZED + + mock_vm = mock.MagicMock() + mock_vm.associators.return_value = [mock_vm_s] + + vm_setting_data = self._utils._get_vm_setting_data(mock_vm) + + self.assertEqual(mock_vm_s, vm_setting_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,365 @@ +#!/usr/bin/env python +# +# Copyright 2012 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for libvirt inspector. +""" + +try: + import contextlib2 as contextlib # for Python < 3.3 +except ImportError: + import contextlib + +import fixtures +import mock +from oslo_utils import units +from oslotest import base + +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector + + +class TestLibvirtInspection(base.BaseTestCase): + + class fakeLibvirtError(Exception): + pass + + def setUp(self): + super(TestLibvirtInspection, self).setUp() + + class VMInstance(object): + id = 'ff58e738-12f4-4c58-acde-77617b68da56' + name = 'instance-00000001' + self.instance = VMInstance + self.inspector = libvirt_inspector.LibvirtInspector() + self.inspector.connection = mock.Mock() + libvirt_inspector.libvirt = mock.Mock() + libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 + libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError + self.domain = mock.Mock() + self.addCleanup(mock.patch.stopall) + + def test_inspect_cpus(self): + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(self.inspector.connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(0, 0, 0, + 2, 999999))) + cpu_info = self.inspector.inspect_cpus(self.instance) + self.assertEqual(2, cpu_info.number) + self.assertEqual(999999, cpu_info.time) + + def test_inspect_vnics(self): + dom_xml = """ + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + """ + + interface_stats = { + 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), + 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), + 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), + } + interfaceStats = interface_stats.__getitem__ + + connection = self.inspector.connection + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', + return_value=dom_xml)) + stack.enter_context(mock.patch.object(self.domain, + 'interfaceStats', + side_effect=interfaceStats)) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(0, 0, 0, + 2, 999999))) + interfaces = list(self.inspector.inspect_vnics(self.instance)) + + self.assertEqual(3, len(interfaces)) + vnic0, info0 = interfaces[0] + self.assertEqual('vnet0', vnic0.name) + self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) + self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) + self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) + self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) + self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) + self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) + self.assertEqual(1, info0.rx_bytes) + self.assertEqual(2, info0.rx_packets) + self.assertEqual(3, info0.tx_bytes) + self.assertEqual(4, info0.tx_packets) + + vnic1, info1 = interfaces[1] + self.assertEqual('vnet1', vnic1.name) + self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) + self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) + self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) + self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) + self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) + self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) + self.assertEqual(5, info1.rx_bytes) + self.assertEqual(6, info1.rx_packets) + self.assertEqual(7, info1.tx_bytes) + self.assertEqual(8, info1.tx_packets) + + vnic2, info2 = interfaces[2] + self.assertEqual('vnet2', vnic2.name) + self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) + self.assertIsNone(vnic2.fref) + self.assertEqual(dict(), vnic2.parameters) + self.assertEqual(9, info2.rx_bytes) + self.assertEqual(10, info2.rx_packets) + self.assertEqual(11, info2.tx_bytes) + self.assertEqual(12, info2.tx_packets) + + def test_inspect_vnics_with_domain_shutoff(self): + connection = self.inspector.connection + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(5, 0, 0, + 2, 999999))) + inspect = self.inspector.inspect_vnics + self.assertRaises(virt_inspector.InstanceShutOffException, + list, inspect(self.instance)) + + def test_inspect_disks(self): + dom_xml = """ + + + + + + + +
+ + + + """ + + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(self.inspector.connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', + return_value=dom_xml)) + stack.enter_context(mock.patch.object(self.domain, 'blockStats', + return_value=(1, 2, 3, + 4, -1))) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(0, 0, 0, + 2, 999999))) + disks = list(self.inspector.inspect_disks(self.instance)) + + self.assertEqual(1, len(disks)) + disk0, info0 = disks[0] + self.assertEqual('vda', disk0.device) + self.assertEqual(1, info0.read_requests) + self.assertEqual(2, info0.read_bytes) + self.assertEqual(3, info0.write_requests) + self.assertEqual(4, info0.write_bytes) + + def test_inspect_disks_with_domain_shutoff(self): + connection = self.inspector.connection + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(5, 0, 0, + 2, 999999))) + inspect = self.inspector.inspect_disks + self.assertRaises(virt_inspector.InstanceShutOffException, + list, inspect(self.instance)) + + def test_inspect_memory_usage(self): + fake_memory_stats = {'available': 51200, 'unused': 25600} + connection = self.inspector.connection + with mock.patch.object(connection, 'lookupByUUIDString', + return_value=self.domain): + with mock.patch.object(self.domain, 'info', + return_value=(0, 0, 51200, + 2, 999999)): + with mock.patch.object(self.domain, 'memoryStats', + return_value=fake_memory_stats): + memory = self.inspector.inspect_memory_usage( + self.instance) + self.assertEqual(25600 / units.Ki, memory.usage) + + def test_inspect_disk_info(self): + dom_xml = """ + + + + + + + +
+ + + + """ + + with contextlib.ExitStack() as stack: + stack.enter_context(mock.patch.object(self.inspector.connection, + 'lookupByUUIDString', + return_value=self.domain)) + stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', + return_value=dom_xml)) + stack.enter_context(mock.patch.object(self.domain, 'blockInfo', + return_value=(1, 2, 3, + -1))) + stack.enter_context(mock.patch.object(self.domain, 'info', + return_value=(0, 0, 0, + 2, 999999))) + disks = list(self.inspector.inspect_disk_info(self.instance)) + + self.assertEqual(1, len(disks)) + disk0, info0 = disks[0] + self.assertEqual('vda', disk0.device) + self.assertEqual(1, info0.capacity) + self.assertEqual(2, info0.allocation) + self.assertEqual(3, info0.physical) + + def test_inspect_memory_usage_with_domain_shutoff(self): + connection = self.inspector.connection + with mock.patch.object(connection, 'lookupByUUIDString', + return_value=self.domain): + with mock.patch.object(self.domain, 'info', + return_value=(5, 0, 0, + 2, 999999)): + self.assertRaises(virt_inspector.InstanceShutOffException, + self.inspector.inspect_memory_usage, + self.instance) + + def test_inspect_memory_usage_with_empty_stats(self): + connection = self.inspector.connection + with mock.patch.object(connection, 'lookupByUUIDString', + return_value=self.domain): + with mock.patch.object(self.domain, 'info', + return_value=(0, 0, 51200, + 2, 999999)): + with mock.patch.object(self.domain, 'memoryStats', + return_value={}): + self.assertRaises(virt_inspector.NoDataException, + self.inspector.inspect_memory_usage, + self.instance) + + +class TestLibvirtInspectionWithError(base.BaseTestCase): + + class fakeLibvirtError(Exception): + pass + + def setUp(self): + super(TestLibvirtInspectionWithError, self).setUp() + self.inspector = libvirt_inspector.LibvirtInspector() + self.useFixture(fixtures.MonkeyPatch( + 'ceilometer.compute.virt.libvirt.inspector.' + 'LibvirtInspector._get_connection', + self._dummy_get_connection)) + libvirt_inspector.libvirt = mock.Mock() + libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError + + @staticmethod + def _dummy_get_connection(*args, **kwargs): + raise Exception('dummy') + + def test_inspect_unknown_error(self): + self.assertRaises(virt_inspector.InspectorException, + self.inspector.inspect_cpus, 'foo') + + +class TestLibvirtInitWithError(base.BaseTestCase): + + def setUp(self): + super(TestLibvirtInitWithError, self).setUp() + self.inspector = libvirt_inspector.LibvirtInspector() + libvirt_inspector.libvirt = mock.Mock() + + @mock.patch('ceilometer.compute.virt.libvirt.inspector.' + 'LibvirtInspector._get_connection', + mock.Mock(return_value=None)) + def test_init_error(self): + self.assertRaises(virt_inspector.NoSanityException, + self.inspector.check_sanity) + + @mock.patch('ceilometer.compute.virt.libvirt.inspector.' + 'LibvirtInspector._get_connection', + mock.Mock(side_effect=virt_inspector.NoDataException)) + def test_init_exception(self): + self.assertRaises(virt_inspector.NoDataException, + self.inspector.check_sanity) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,165 @@ +# Copyright (c) 2014 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for VMware vSphere inspector. +""" + +import mock +from oslo_vmware import api +from oslotest import base + +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.compute.virt.vmware import inspector as vsphere_inspector + + +class TestVsphereInspection(base.BaseTestCase): + + def setUp(self): + api_session = api.VMwareAPISession("test_server", "test_user", + "test_password", 0, None, + create_session=False, port=7443) + vsphere_inspector.get_api_session = mock.Mock( + return_value=api_session) + self._inspector = vsphere_inspector.VsphereInspector() + self._inspector._ops = mock.MagicMock() + + super(TestVsphereInspection, self).setUp() + + def test_inspect_memory_usage(self): + fake_instance_moid = 'fake_instance_moid' + fake_instance_id = 'fake_instance_id' + fake_perf_counter_id = 'fake_perf_counter_id' + fake_memory_value = 1024.0 + fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) + + def construct_mock_instance_object(fake_instance_id): + instance_object = mock.MagicMock() + instance_object.id = fake_instance_id + return instance_object + + fake_instance = construct_mock_instance_object(fake_instance_id) + self._inspector._ops.get_vm_moid.return_value = fake_instance_moid + (self._inspector._ops. + get_perf_counter_id.return_value) = fake_perf_counter_id + (self._inspector._ops.query_vm_aggregate_stats. + return_value) = fake_memory_value + memory_stat = self._inspector.inspect_memory_usage(fake_instance) + self.assertEqual(fake_stat, memory_stat) + + def test_inspect_cpu_util(self): + fake_instance_moid = 'fake_instance_moid' + fake_instance_id = 'fake_instance_id' + fake_perf_counter_id = 'fake_perf_counter_id' + fake_cpu_util_value = 60 + fake_stat = virt_inspector.CPUUtilStats(util=60) + + def construct_mock_instance_object(fake_instance_id): + instance_object = mock.MagicMock() + instance_object.id = fake_instance_id + return instance_object + + fake_instance = construct_mock_instance_object(fake_instance_id) + self._inspector._ops.get_vm_moid.return_value = fake_instance_moid + (self._inspector._ops.get_perf_counter_id. + return_value) = fake_perf_counter_id + (self._inspector._ops.query_vm_aggregate_stats. + return_value) = fake_cpu_util_value * 100 + cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) + self.assertEqual(fake_stat, cpu_util_stat) + + def test_inspect_vnic_rates(self): + + # construct test data + test_vm_moid = "vm-21" + vnic1 = "vnic-1" + vnic2 = "vnic-2" + counter_name_to_id_map = { + vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, + vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 + } + counter_id_to_stats_map = { + 1: {vnic1: 1, vnic2: 3}, + 2: {vnic1: 2, vnic2: 4}, + } + + def get_counter_id_side_effect(counter_full_name): + return counter_name_to_id_map[counter_full_name] + + def query_stat_side_effect(vm_moid, counter_id, duration): + # assert inputs + self.assertEqual(test_vm_moid, vm_moid) + self.assertIn(counter_id, counter_id_to_stats_map) + return counter_id_to_stats_map[counter_id] + + # configure vsphere operations mock with the test data + ops_mock = self._inspector._ops + ops_mock.get_vm_moid.return_value = test_vm_moid + ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect + ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect + result = self._inspector.inspect_vnic_rates(mock.MagicMock()) + + # validate result + expected_stats = { + vnic1: virt_inspector.InterfaceRateStats(1024, 2048), + vnic2: virt_inspector.InterfaceRateStats(3072, 4096) + } + + for vnic, rates_info in result: + self.assertEqual(expected_stats[vnic.name], rates_info) + + def test_inspect_disk_rates(self): + + # construct test data + test_vm_moid = "vm-21" + disk1 = "disk-1" + disk2 = "disk-2" + counter_name_to_id_map = { + vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, + vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, + vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, + vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 + } + counter_id_to_stats_map = { + 1: {disk1: 1, disk2: 2}, + 2: {disk1: 300, disk2: 400}, + 3: {disk1: 5, disk2: 6}, + 4: {disk1: 700}, + } + + def get_counter_id_side_effect(counter_full_name): + return counter_name_to_id_map[counter_full_name] + + def query_stat_side_effect(vm_moid, counter_id, duration): + # assert inputs + self.assertEqual(test_vm_moid, vm_moid) + self.assertIn(counter_id, counter_id_to_stats_map) + return counter_id_to_stats_map[counter_id] + + # configure vsphere operations mock with the test data + ops_mock = self._inspector._ops + ops_mock.get_vm_moid.return_value = test_vm_moid + ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect + ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect + + result = self._inspector.inspect_disk_rates(mock.MagicMock()) + + # validate result + expected_stats = { + disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), + disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) + } + + actual_stats = dict((disk.device, rates) for (disk, rates) in result) + self.assertEqual(expected_stats, actual_stats) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,174 @@ +# Copyright (c) 2014 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_vmware import api +from oslotest import base + +from ceilometer.compute.virt.vmware import vsphere_operations + + +class VsphereOperationsTest(base.BaseTestCase): + + def setUp(self): + api_session = api.VMwareAPISession("test_server", "test_user", + "test_password", 0, None, + create_session=False) + api_session._vim = mock.MagicMock() + self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, + 1000) + super(VsphereOperationsTest, self).setUp() + + def test_get_vm_moid(self): + + vm1_moid = "vm-1" + vm2_moid = "vm-2" + vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" + vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" + + def construct_mock_vm_object(vm_moid, vm_instance): + vm_object = mock.MagicMock() + vm_object.obj.value = vm_moid + vm_object.propSet[0].val = vm_instance + return vm_object + + def retrieve_props_side_effect(pc, specSet, options): + # assert inputs + self.assertEqual(self._vsphere_ops._max_objects, + options.maxObjects) + self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, + specSet[0].pathSet[0]) + + # mock return result + vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) + vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) + result = mock.MagicMock() + result.objects.__iter__.return_value = [vm1, vm2] + return result + + vim_mock = self._vsphere_ops._api_session._vim + vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect + vim_mock.ContinueRetrievePropertiesEx.return_value = None + + vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) + self.assertEqual(vm1_moid, vm_moid) + + vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) + self.assertEqual(vm2_moid, vm_moid) + + def test_query_vm_property(self): + + vm_moid = "vm-21" + vm_property_name = "runtime.powerState" + vm_property_val = "poweredON" + + def retrieve_props_side_effect(pc, specSet, options): + # assert inputs + self.assertEqual(vm_moid, specSet[0].obj.value) + self.assertEqual(vm_property_name, specSet[0].pathSet[0]) + + # mock return result + result = mock.MagicMock() + result.objects[0].propSet[0].val = vm_property_val + return result + + vim_mock = self._vsphere_ops._api_session._vim + vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect + + actual_val = self._vsphere_ops.query_vm_property(vm_moid, + vm_property_name) + self.assertEqual(vm_property_val, actual_val) + + def test_get_perf_counter_id(self): + + def construct_mock_counter_info(group_name, counter_name, rollup_type, + counter_id): + counter_info = mock.MagicMock() + counter_info.groupInfo.key = group_name + counter_info.nameInfo.key = counter_name + counter_info.rollupType = rollup_type + counter_info.key = counter_id + return counter_info + + def retrieve_props_side_effect(pc, specSet, options): + # assert inputs + self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, + specSet[0].pathSet[0]) + + # mock return result + counter_info1 = construct_mock_counter_info("a", "b", "c", 1) + counter_info2 = construct_mock_counter_info("x", "y", "z", 2) + result = mock.MagicMock() + (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. + return_value) = [counter_info1, counter_info2] + return result + + vim_mock = self._vsphere_ops._api_session._vim + vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect + + counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") + self.assertEqual(1, counter_id) + + counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") + self.assertEqual(2, counter_id) + + def test_query_vm_stats(self): + + vm_moid = "vm-21" + device1 = "device-1" + device2 = "device-2" + device3 = "device-3" + counter_id = 5 + + def construct_mock_metric_series(device_name, stat_values): + metric_series = mock.MagicMock() + metric_series.value = stat_values + metric_series.id.instance = device_name + return metric_series + + def vim_query_perf_side_effect(perf_manager, querySpec): + # assert inputs + self.assertEqual(vm_moid, querySpec[0].entity.value) + self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) + self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, + querySpec[0].intervalId) + + # mock return result + perf_stats = mock.MagicMock() + perf_stats[0].sampleInfo = ["s1", "s2", "s3"] + perf_stats[0].value.__iter__.return_value = [ + construct_mock_metric_series(None, [111, 222, 333]), + construct_mock_metric_series(device1, [100, 200, 300]), + construct_mock_metric_series(device2, [10, 20, 30]), + construct_mock_metric_series(device3, [1, 2, 3]) + ] + return perf_stats + + vim_mock = self._vsphere_ops._api_session._vim + vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect + ops = self._vsphere_ops + + # test aggregate stat + stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) + self.assertEqual(222, stat_val) + + # test per-device(non-aggregate) stats + expected_device_stats = { + device1: 200, + device2: 20, + device3: 2 + } + stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) + self.assertEqual(expected_device_stats, stats) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,160 @@ +# Copyright 2014 Intel +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for xenapi inspector. +""" + +import mock +from oslotest import base + +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector + + +class TestXenapiInspection(base.BaseTestCase): + + def setUp(self): + api_session = mock.Mock() + xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) + self.inspector = xenapi_inspector.XenapiInspector() + + super(TestXenapiInspection, self).setUp() + + def test_inspect_cpu_util(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + fake_stat = virt_inspector.CPUUtilStats(util=40) + + def fake_xenapi_request(method, args): + metrics_rec = { + 'memory_actual': '536870912', + 'VCPUs_number': '1', + 'VCPUs_utilisation': {'0': 0.4, } + } + + if method == 'VM.get_by_name_label': + return ['vm_ref'] + elif method == 'VM.get_metrics': + return 'metrics_ref' + elif method == 'VM_metrics.get_record': + return metrics_rec + else: + return None + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', + side_effect=fake_xenapi_request): + cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) + self.assertEqual(fake_stat, cpu_util_stat) + + def test_inspect_memory_usage(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + fake_stat = virt_inspector.MemoryUsageStats(usage=128) + + def fake_xenapi_request(method, args): + metrics_rec = { + 'memory_actual': '134217728', + } + + if method == 'VM.get_by_name_label': + return ['vm_ref'] + elif method == 'VM.get_metrics': + return 'metrics_ref' + elif method == 'VM_metrics.get_record': + return metrics_rec + else: + return None + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', + side_effect=fake_xenapi_request): + memory_stat = self.inspector.inspect_memory_usage(fake_instance) + self.assertEqual(fake_stat, memory_stat) + + def test_inspect_vnic_rates(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + + def fake_xenapi_request(method, args): + vif_rec = { + 'metrics': 'vif_metrics_ref', + 'uuid': 'vif_uuid', + 'MAC': 'vif_mac', + } + + vif_metrics_rec = { + 'io_read_kbs': '1', + 'io_write_kbs': '2', + } + if method == 'VM.get_by_name_label': + return ['vm_ref'] + elif method == 'VM.get_VIFs': + return ['vif_ref'] + elif method == 'VIF.get_record': + return vif_rec + elif method == 'VIF.get_metrics': + return 'vif_metrics_ref' + elif method == 'VIF_metrics.get_record': + return vif_metrics_rec + else: + return None + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', + side_effect=fake_xenapi_request): + interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) + + self.assertEqual(1, len(interfaces)) + vnic0, info0 = interfaces[0] + self.assertEqual('vif_uuid', vnic0.name) + self.assertEqual('vif_mac', vnic0.mac) + self.assertEqual(1024, info0.rx_bytes_rate) + self.assertEqual(2048, info0.tx_bytes_rate) + + def test_inspect_disk_rates(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + + def fake_xenapi_request(method, args): + vbd_rec = { + 'device': 'xvdd' + } + + vbd_metrics_rec = { + 'io_read_kbs': '1', + 'io_write_kbs': '2' + } + if method == 'VM.get_by_name_label': + return ['vm_ref'] + elif method == 'VM.get_VBDs': + return ['vbd_ref'] + elif method == 'VBD.get_record': + return vbd_rec + elif method == 'VBD.get_metrics': + return 'vbd_metrics_ref' + elif method == 'VBD_metrics.get_record': + return vbd_metrics_rec + else: + return None + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', + side_effect=fake_xenapi_request): + disks = list(self.inspector.inspect_disk_rates(fake_instance)) + + self.assertEqual(1, len(disks)) + disk0, info0 = disks[0] + self.assertEqual('xvdd', disk0.device) + self.assertEqual(1024, info0.read_bytes_rate) + self.assertEqual(2048, info0.write_bytes_rate) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/database/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/unit/database/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/database/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/database/test_notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,90 @@ +# +# Copyright 2015 Hewlett Packard +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock +from oslo_utils import timeutils +from oslotest import base + +from ceilometer.database import notifications +from ceilometer import sample + +NOW = timeutils.utcnow().isoformat() + +TENANT_ID = u'76538754af6548f5b53cf9af2d35d582' +USER_ID = u'b70ece400e4e45c187168c40fa42ff7a' +INSTANCE_STATE = u'active' +INSTANCE_TYPE = u'm1.rd-tiny' +RESOURCE_ID = u'a8b55824-e731-40a3-a32d-de81474d74b2' +SERVICE_ID = u'2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b' +NOVA_INSTANCE_ID = u'1cf6ce1b-708b-4e6a-8ecf-2b60c8ccd435' +PUBLISHER_ID = u'trove' + + +def _trove_notification_for(operation): + return { + u'event_type': '%s.instance.%s' % (notifications.SERVICE, + operation), + u'priority': u'INFO', + u'timestamp': NOW, + u'publisher_id': PUBLISHER_ID, + u'message_id': u'67ba0a2a-32bd-4cdf-9bfb-ef9cefcd0f63', + u'payload': { + u'state_description': INSTANCE_STATE, + u'user_id': USER_ID, + u'audit_period_beginning': u'2015-07-10T20:05:29.870091Z', + u'tenant_id': TENANT_ID, + u'created_at': u'2015-06-29T20:52:12.000000', + u'instance_type_id': u'7', + u'launched_at': u'2015-06-29T20:52:12.000000', + u'instance_id': RESOURCE_ID, + u'instance_type': INSTANCE_TYPE, + u'state': INSTANCE_STATE, + u'service_id': SERVICE_ID, + u'nova_instance_id': NOVA_INSTANCE_ID, + u'display_name': u'test', + u'instance_name': u'test', + u'region': u'LOCAL_DEV', + u'audit_period_ending': u'2015-07-10T21:05:29.870091Z' + }, + + } + + +class TestNotification(base.BaseTestCase): + def _verify_common_sample(self, actual, operation): + self.assertIsNotNone(actual) + self.assertEqual('%s.instance.%s' % (notifications.SERVICE, operation), + actual.name) + self.assertEqual(NOW, actual.timestamp) + self.assertEqual(sample.TYPE_CUMULATIVE, actual.type) + self.assertEqual(TENANT_ID, actual.project_id) + self.assertEqual(RESOURCE_ID, actual.resource_id) + self.assertEqual(USER_ID, actual.user_id) + self.assertEqual(3600, actual.volume) + self.assertEqual('s', actual.unit) + + metadata = actual.resource_metadata + self.assertEqual(PUBLISHER_ID, metadata.get('host')) + + def _test_operation(self, operation): + notif = _trove_notification_for(operation) + handler = notifications.InstanceExists(mock.Mock()) + data = list(handler.process_notification(notif)) + self.assertEqual(1, len(data)) + self._verify_common_sample(data[0], operation) + + def test_exists(self): + self._test_operation('exists') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_db.py ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_db.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_db.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_db.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,117 @@ +# +# Copyright 2013 IBM Corp +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import uuid + +import mock +from oslo_config import fixture as fixture_config +from oslotest import base + +from ceilometer.dispatcher import database +from ceilometer.event.storage import models as event_models +from ceilometer.publisher import utils + + +class TestDispatcherDB(base.BaseTestCase): + + def setUp(self): + super(TestDispatcherDB, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.set_override('connection', 'sqlite://', group='database') + self.dispatcher = database.DatabaseDispatcher(self.CONF) + self.ctx = None + + def test_event_conn(self): + event = event_models.Event(uuid.uuid4(), 'test', + datetime.datetime(2012, 7, 2, 13, 53, 40), + [], {}).serialize() + with mock.patch.object(self.dispatcher.event_conn, + 'record_events') as record_events: + self.dispatcher.record_events(event) + self.assertTrue(record_events.called) + + def test_valid_message(self): + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + } + msg['message_signature'] = utils.compute_signature( + msg, self.CONF.publisher.telemetry_secret, + ) + + with mock.patch.object(self.dispatcher.meter_conn, + 'record_metering_data') as record_metering_data: + self.dispatcher.record_metering_data(msg) + + record_metering_data.assert_called_once_with(msg) + + def test_invalid_message(self): + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + 'message_signature': 'invalid-signature'} + + class ErrorConnection(object): + + called = False + + def record_metering_data(self, data): + self.called = True + + self.dispatcher._meter_conn = ErrorConnection() + + self.dispatcher.record_metering_data(msg) + + if self.dispatcher.meter_conn.called: + self.fail('Should not have called the storage connection') + + def test_timestamp_conversion(self): + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + 'timestamp': '2012-07-02T13:53:40Z', + } + msg['message_signature'] = utils.compute_signature( + msg, self.CONF.publisher.telemetry_secret, + ) + + expected = msg.copy() + expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) + + with mock.patch.object(self.dispatcher.meter_conn, + 'record_metering_data') as record_metering_data: + self.dispatcher.record_metering_data(msg) + + record_metering_data.assert_called_once_with(expected) + + def test_timestamp_tzinfo_conversion(self): + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + 'timestamp': '2012-09-30T15:31:50.262-08:00', + } + msg['message_signature'] = utils.compute_signature( + msg, self.CONF.publisher.telemetry_secret, + ) + + expected = msg.copy() + expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, + 31, 50, 262000) + + with mock.patch.object(self.dispatcher.meter_conn, + 'record_metering_data') as record_metering_data: + self.dispatcher.record_metering_data(msg) + + record_metering_data.assert_called_once_with(expected) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_file.py ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_file.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_file.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_file.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,99 @@ +# +# Copyright 2013 IBM Corp +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import logging.handlers +import os +import tempfile + +from oslo_config import fixture as fixture_config +from oslotest import base + +from ceilometer.dispatcher import file +from ceilometer.publisher import utils + + +class TestDispatcherFile(base.BaseTestCase): + + def setUp(self): + super(TestDispatcherFile, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + def test_file_dispatcher_with_all_config(self): + # Create a temporaryFile to get a file name + tf = tempfile.NamedTemporaryFile('r') + filename = tf.name + tf.close() + + self.CONF.dispatcher_file.file_path = filename + self.CONF.dispatcher_file.max_bytes = 50 + self.CONF.dispatcher_file.backup_count = 5 + dispatcher = file.FileDispatcher(self.CONF) + + # The number of the handlers should be 1 + self.assertEqual(1, len(dispatcher.log.handlers)) + # The handler should be RotatingFileHandler + handler = dispatcher.log.handlers[0] + self.assertIsInstance(handler, + logging.handlers.RotatingFileHandler) + + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + } + msg['message_signature'] = utils.compute_signature( + msg, self.CONF.publisher.telemetry_secret, + ) + + # The record_metering_data method should exist and not produce errors. + dispatcher.record_metering_data(msg) + # After the method call above, the file should have been created. + self.assertTrue(os.path.exists(handler.baseFilename)) + + def test_file_dispatcher_with_path_only(self): + # Create a temporaryFile to get a file name + tf = tempfile.NamedTemporaryFile('r') + filename = tf.name + tf.close() + + self.CONF.dispatcher_file.file_path = filename + self.CONF.dispatcher_file.max_bytes = 0 + self.CONF.dispatcher_file.backup_count = 0 + dispatcher = file.FileDispatcher(self.CONF) + + # The number of the handlers should be 1 + self.assertEqual(1, len(dispatcher.log.handlers)) + # The handler should be RotatingFileHandler + handler = dispatcher.log.handlers[0] + self.assertIsInstance(handler, + logging.FileHandler) + + msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + } + msg['message_signature'] = utils.compute_signature( + msg, self.CONF.publisher.telemetry_secret, + ) + + # The record_metering_data method should exist and not produce errors. + dispatcher.record_metering_data(msg) + # After the method call above, the file should have been created. + self.assertTrue(os.path.exists(handler.baseFilename)) + + def test_file_dispatcher_with_no_path(self): + self.CONF.dispatcher_file.file_path = None + dispatcher = file.FileDispatcher(self.CONF) + + # The log should be None + self.assertIsNone(dispatcher.log) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_gnocchi.py ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_gnocchi.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_gnocchi.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_gnocchi.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,479 @@ +# +# Copyright 2014 eNovance +# +# Authors: Mehdi Abaakouk +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +import uuid + +import mock +from oslo_config import fixture as config_fixture +from oslo_utils import fileutils +from oslotest import mockpatch +import requests +import six +import six.moves.urllib.parse as urlparse +import tempfile +import testscenarios +import yaml + +from ceilometer.dispatcher import gnocchi +from ceilometer import service as ceilometer_service +from ceilometer.tests import base + +load_tests = testscenarios.load_tests_apply_scenarios + + +class json_matcher(object): + def __init__(self, ref): + self.ref = ref + + def __eq__(self, obj): + return self.ref == json.loads(obj) + + def __repr__(self): + return "" % self.ref + + +class DispatcherTest(base.BaseTestCase): + + def setUp(self): + super(DispatcherTest, self).setUp() + self.conf = self.useFixture(config_fixture.Config()) + ceilometer_service.prepare_service(argv=[], config_files=[]) + self.conf.config( + resources_definition_file=self.path_get( + 'etc/ceilometer/gnocchi_resources.yaml'), + group="dispatcher_gnocchi" + ) + self.resource_id = str(uuid.uuid4()) + self.samples = [{ + 'counter_name': 'disk.root.size', + 'counter_type': 'gauge', + 'counter_volume': '2', + 'user_id': 'test_user', + 'project_id': 'test_project', + 'source': 'openstack', + 'timestamp': '2012-05-08 20:23:48.028195', + 'resource_id': self.resource_id, + 'resource_metadata': { + 'host': 'foo', + 'image_ref_url': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + }}, + { + 'counter_name': 'disk.root.size', + 'counter_type': 'gauge', + 'counter_volume': '2', + 'user_id': 'test_user', + 'project_id': 'test_project', + 'source': 'openstack', + 'timestamp': '2014-05-08 20:23:48.028195', + 'resource_id': self.resource_id, + 'resource_metadata': { + 'host': 'foo', + 'image_ref_url': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + } + }] + + ks_client = mock.Mock(auth_token='fake_token') + ks_client.tenants.find.return_value = mock.Mock( + name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') + self.useFixture(mockpatch.Patch( + 'ceilometer.keystone_client.get_client', + return_value=ks_client)) + self.conf.conf.dispatcher_gnocchi.filter_service_activity = True + + def test_config_load(self): + self.conf.config(filter_service_activity=False, + group='dispatcher_gnocchi') + d = gnocchi.GnocchiDispatcher(self.conf.conf) + names = [rd.cfg['resource_type'] for rd in d.resources_definition] + self.assertIn('instance', names) + self.assertIn('volume', names) + + def test_broken_config_load(self): + contents = [("---\n" + "resources:\n" + " - resource_type: foobar\n"), + ("---\n" + "resources:\n" + " - resource_type: 0\n"), + ("---\n" + "resources:\n" + " - sample_types: ['foo', 'bar']\n"), + ("---\n" + "resources:\n" + " - sample_types: foobar\n" + " - resource_type: foobar\n"), + ] + + for content in contents: + if six.PY3: + content = content.encode('utf-8') + + temp = fileutils.write_to_tempfile(content=content, + prefix='gnocchi_resources', + suffix='.yaml') + self.addCleanup(os.remove, temp) + self.conf.config(filter_service_activity=False, + resources_definition_file=temp, + group='dispatcher_gnocchi') + self.assertRaises(gnocchi.ResourcesDefinitionException, + gnocchi.GnocchiDispatcher, self.conf.conf) + + @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' + '._process_resource') + def _do_test_activity_filter(self, expected_samples, + fake_process_resource): + + def assert_samples(resource_id, metric_grouped_samples): + samples = [] + for metric_name, s in metric_grouped_samples: + samples.extend(list(s)) + self.assertEqual(expected_samples, samples) + + fake_process_resource.side_effect = assert_samples + + d = gnocchi.GnocchiDispatcher(self.conf.conf) + d.record_metering_data(self.samples) + + fake_process_resource.assert_called_with(self.resource_id, + mock.ANY) + + def test_archive_policy_map_config(self): + archive_policy_map = yaml.dump({ + 'foo.*': 'low' + }) + archive_policy_cfg_file = tempfile.NamedTemporaryFile( + mode='w+b', prefix="foo", suffix=".yaml") + archive_policy_cfg_file.write(archive_policy_map.encode()) + archive_policy_cfg_file.seek(0) + self.conf.conf.dispatcher_gnocchi.archive_policy_file = ( + archive_policy_cfg_file.name) + d = gnocchi.GnocchiDispatcher(self.conf.conf) + legacy = d._load_archive_policy(self.conf.conf) + self.assertEqual(legacy.get('foo.disk.rate'), "low") + archive_policy_cfg_file.close() + + def test_activity_filter_match_project_id(self): + self.samples[0]['project_id'] = ( + 'a2d42c23-d518-46b6-96ab-3fba2e146859') + self._do_test_activity_filter([self.samples[1]]) + + def test_activity_filter_match_swift_event(self): + self.samples[0]['counter_name'] = 'storage.api.request' + self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' + self._do_test_activity_filter([self.samples[1]]) + + def test_activity_filter_nomatch(self): + self._do_test_activity_filter(self.samples) + + +class MockResponse(mock.NonCallableMock): + def __init__(self, code): + text = {500: 'Internal Server Error', + 404: 'Not Found', + 204: 'Created', + 409: 'Conflict', + }.get(code) + super(MockResponse, self).__init__(spec=requests.Response, + status_code=code, + text=text) + + +class DispatcherWorkflowTest(base.BaseTestCase, + testscenarios.TestWithScenarios): + + sample_scenarios = [ + ('disk.root.size', dict( + sample={ + 'counter_name': 'disk.root.size', + 'counter_type': 'gauge', + 'counter_volume': '2', + 'user_id': 'test_user', + 'project_id': 'test_project', + 'source': 'openstack', + 'timestamp': '2012-05-08 20:23:48.028195', + 'resource_metadata': { + 'host': 'foo', + 'image_ref_url': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + } + }, + measures_attributes=[{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': '2' + }], + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'flavor_id': 1234, + 'display_name': 'myinstance', + }, + metric_names=[ + 'instance', 'disk.root.size', 'disk.ephemeral.size', + 'memory', 'vcpus', 'memory.usage', 'memory.resident', + 'cpu', 'cpu_util', 'vcpus', 'disk.read.requests', + 'disk.read.requests.rate', 'disk.write.requests', + 'disk.write.requests.rate', 'disk.read.bytes', + 'disk.read.bytes.rate', 'disk.write.bytes', + 'disk.write.bytes.rate', 'disk.device.read.requests', + 'disk.device.read.requests.rate', 'disk.device.write.requests', + 'disk.device.write.requests.rate', 'disk.device.read.bytes', + 'disk.device.read.bytes.rate', 'disk.device.write.bytes', + 'disk.device.write.bytes.rate', 'disk.latency', 'disk.iops', + 'disk.device.latency', 'disk.device.iops', 'disk.capacity', + 'disk.allocation', 'disk.usage', 'disk.device.capacity', + 'disk.device.allocation', 'disk.device.usage', + 'network.outgoing.packets.rate', + 'network.incoming.packets.rate', 'network.outgoing.packets', + 'network.incoming.packets', 'network.outgoing.bytes.rate', + 'network.incoming.bytes.rate', 'network.outgoing.bytes', + 'network.incoming.bytes'], + resource_type='instance')), + ('hardware.ipmi.node.power', dict( + sample={ + 'counter_name': 'hardware.ipmi.node.power', + 'counter_type': 'gauge', + 'counter_volume': '2', + 'user_id': 'test_user', + 'project_id': 'test_project', + 'source': 'openstack', + 'timestamp': '2012-05-08 20:23:48.028195', + 'resource_metadata': { + 'useless': 'not_used', + } + }, + measures_attributes=[{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': '2' + }], + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + }, + metric_names=[ + 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', + 'hardware.ipmi.node.inlet_temperature', + 'hardware.ipmi.node.outlet_temperature', + 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', + 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', + 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', + 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' + ], + resource_type='ipmi')), + ] + + worflow_scenarios = [ + ('normal_workflow', dict(measure=204, post_resource=None, metric=None, + measure_retry=None, patch_resource=204)), + ('new_resource', dict(measure=404, post_resource=204, metric=None, + measure_retry=204, patch_resource=204)), + ('new_resource_fail', dict(measure=404, post_resource=500, metric=None, + measure_retry=None, patch_resource=None)), + ('resource_update_fail', dict(measure=204, post_resource=None, + metric=None, measure_retry=None, + patch_resource=500)), + ('new_metric', dict(measure=404, post_resource=409, metric=204, + measure_retry=204, patch_resource=204)), + ('new_metric_fail', dict(measure=404, post_resource=409, metric=500, + measure_retry=None, patch_resource=None)), + ('retry_fail', dict(measure=404, post_resource=409, metric=409, + measure_retry=500, patch_resource=None)), + ('measure_fail', dict(measure=500, post_resource=None, metric=None, + measure_retry=None, patch_resource=None)), + ('measure_auth', dict(measure=401, post_resource=None, metric=None, + measure_retry=None, patch_resource=204)), + ] + + @classmethod + def generate_scenarios(cls): + cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, + cls.worflow_scenarios) + + def setUp(self): + super(DispatcherWorkflowTest, self).setUp() + self.conf = self.useFixture(config_fixture.Config()) + # Set this explicitly to avoid conflicts with any existing + # configuration. + self.conf.config(url='http://localhost:8041', + group='dispatcher_gnocchi') + ks_client = mock.Mock(auth_token='fake_token') + ks_client.tenants.find.return_value = mock.Mock( + name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') + self.useFixture(mockpatch.Patch( + 'ceilometer.keystone_client.get_client', + return_value=ks_client)) + self.ks_client = ks_client + + ceilometer_service.prepare_service(argv=[], config_files=[]) + self.conf.config( + resources_definition_file=self.path_get( + 'etc/ceilometer/gnocchi_resources.yaml'), + group="dispatcher_gnocchi" + ) + + self.sample['resource_id'] = str(uuid.uuid4()) + "/foobar" + + @mock.patch('ceilometer.dispatcher.gnocchi.LOG') + @mock.patch('ceilometer.dispatcher.gnocchi_client.LOG') + @mock.patch('ceilometer.dispatcher.gnocchi_client.requests') + def test_workflow(self, fake_requests, client_logger, logger): + self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) + + base_url = self.dispatcher.conf.dispatcher_gnocchi.url + url_params = { + 'url': urlparse.urljoin(base_url, '/v1/resource'), + # NOTE(sileht): we don't use urlparse.quote here + # to ensure / is converted in %2F + 'resource_id': self.sample['resource_id'].replace("/", "%2F"), + 'resource_type': self.resource_type, + 'metric_name': self.sample['counter_name'] + } + headers = {'Content-Type': 'application/json', + 'X-Auth-Token': 'fake_token'} + + patch_responses = [] + post_responses = [] + + # This is needed to mock Exception in py3 + fake_requests.ConnectionError = requests.ConnectionError + + expected_calls = [ + mock.call.session(), + mock.call.adapters.HTTPAdapter(pool_block=True), + mock.call.session().mount('http://', mock.ANY), + mock.call.session().mount('https://', mock.ANY), + mock.call.session().post( + "%(url)s/%(resource_type)s/%(resource_id)s/" + "metric/%(metric_name)s/measures" % url_params, + headers=headers, + data=json_matcher(self.measures_attributes)) + + ] + post_responses.append(MockResponse(self.measure)) + + if self.measure == 401: + type(self.ks_client).auth_token = mock.PropertyMock( + side_effect=['fake_token', 'new_token', 'new_token', + 'new_token', 'new_token']) + headers = {'Content-Type': 'application/json', + 'X-Auth-Token': 'new_token'} + + expected_calls.append( + mock.call.session().post( + "%(url)s/%(resource_type)s/%(resource_id)s/" + "metric/%(metric_name)s/measures" % url_params, + headers=headers, + data=json_matcher(self.measures_attributes))) + + post_responses.append(MockResponse(200)) + + if self.post_resource: + attributes = self.postable_attributes.copy() + attributes.update(self.patchable_attributes) + attributes['id'] = self.sample['resource_id'] + attributes['metrics'] = dict((metric_name, + {'archive_policy_name': 'low'}) + for metric_name in self.metric_names) + expected_calls.append(mock.call.session().post( + "%(url)s/%(resource_type)s" % url_params, + headers=headers, + data=json_matcher(attributes)), + ) + post_responses.append(MockResponse(self.post_resource)) + + if self.metric: + expected_calls.append(mock.call.session().post( + "%(url)s/%(resource_type)s/%(resource_id)s/metric" + % url_params, + headers=headers, + data=json_matcher({self.sample['counter_name']: + {'archive_policy_name': 'low'}}) + )) + post_responses.append(MockResponse(self.metric)) + + if self.measure_retry: + expected_calls.append(mock.call.session().post( + "%(url)s/%(resource_type)s/%(resource_id)s/" + "metric/%(metric_name)s/measures" % url_params, + headers=headers, + data=json_matcher(self.measures_attributes)) + ) + post_responses.append(MockResponse(self.measure_retry)) + + if self.patch_resource and self.patchable_attributes: + expected_calls.append(mock.call.session().patch( + "%(url)s/%(resource_type)s/%(resource_id)s" % url_params, + headers=headers, + data=json_matcher(self.patchable_attributes)), + ) + patch_responses.append(MockResponse(self.patch_resource)) + + s = fake_requests.session.return_value + s.patch.side_effect = patch_responses + s.post.side_effect = post_responses + + self.dispatcher.record_metering_data([self.sample]) + + # Check that the last log message is the expected one + if self.measure == 500 or self.measure_retry == 500: + logger.error.assert_called_with( + "Fail to post measure on metric %s of resource %s " + "with status: %d: Internal Server Error" % + (self.sample['counter_name'], + self.sample['resource_id'], + 500)) + + elif self.post_resource == 500 or (self.patch_resource == 500 and + self.patchable_attributes): + logger.error.assert_called_with( + "Resource %s %s failed with status: " + "%d: Internal Server Error" % + (self.sample['resource_id'], + 'update' if self.patch_resource else 'creation', + 500)) + elif self.metric == 500: + logger.error.assert_called_with( + "Fail to create metric %s of resource %s " + "with status: %d: Internal Server Error" % + (self.sample['counter_name'], + self.sample['resource_id'], + 500)) + elif self.patch_resource == 204 and self.patchable_attributes: + client_logger.debug.assert_called_with( + 'Resource %s updated', self.sample['resource_id']) + elif self.measure == 200: + client_logger.debug.assert_called_with( + "Measure posted on metric %s of resource %s", + self.sample['counter_name'], + self.sample['resource_id']) + + self.assertEqual(expected_calls, fake_requests.mock_calls) + + +DispatcherWorkflowTest.generate_scenarios() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_http.py ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_http.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/dispatcher/test_http.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/dispatcher/test_http.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,170 @@ +# +# Copyright 2013 IBM Corp +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import uuid + +import mock +from oslo_config import fixture as fixture_config +from oslotest import base +import requests + +from ceilometer.dispatcher import http +from ceilometer.event.storage import models as event_models +from ceilometer.publisher import utils + + +class TestDispatcherHttp(base.BaseTestCase): + + def setUp(self): + super(TestDispatcherHttp, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.msg = {'counter_name': 'test', + 'resource_id': self.id(), + 'counter_volume': 1, + } + self.msg['message_signature'] = utils.compute_signature( + self.msg, self.CONF.publisher.telemetry_secret, + ) + + def test_http_dispatcher_config_options(self): + self.CONF.dispatcher_http.target = 'fake' + self.CONF.dispatcher_http.timeout = 2 + self.CONF.dispatcher_http.cadf_only = True + dispatcher = http.HttpDispatcher(self.CONF) + + self.assertEqual('fake', dispatcher.target) + self.assertEqual(2, dispatcher.timeout) + self.assertEqual(True, dispatcher.cadf_only) + + def test_http_dispatcher_with_no_target(self): + self.CONF.dispatcher_http.target = '' + dispatcher = http.HttpDispatcher(self.CONF) + + # The target should be None + self.assertEqual('', dispatcher.target) + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_metering_data(self.msg) + + # Since the target is not set, no http post should occur, thus the + # call_count should be zero. + self.assertEqual(0, post.call_count) + + def test_http_dispatcher_with_no_metadata(self): + self.CONF.dispatcher_http.target = 'fake' + self.CONF.dispatcher_http.cadf_only = True + dispatcher = http.HttpDispatcher(self.CONF) + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_metering_data(self.msg) + + self.assertEqual(0, post.call_count) + + def test_http_dispatcher_without_cadf_event(self): + self.CONF.dispatcher_http.target = 'fake' + self.CONF.dispatcher_http.cadf_only = True + dispatcher = http.HttpDispatcher(self.CONF) + + self.msg['resource_metadata'] = {'request': {'NONE_CADF_EVENT': { + 'q1': 'v1', 'q2': 'v2'}, }, } + self.msg['message_signature'] = utils.compute_signature( + self.msg, self.CONF.publisher.telemetry_secret, + ) + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_metering_data(self.msg) + + # Since the meter does not have metadata or CADF_EVENT, the method + # call count should be zero + self.assertEqual(0, post.call_count) + + def test_http_dispatcher_with_cadf_event(self): + self.CONF.dispatcher_http.target = 'fake' + self.CONF.dispatcher_http.cadf_only = True + dispatcher = http.HttpDispatcher(self.CONF) + + self.msg['resource_metadata'] = {'request': {'CADF_EVENT': { + 'q1': 'v1', 'q2': 'v2'}, }, } + self.msg['message_signature'] = utils.compute_signature( + self.msg, self.CONF.publisher.telemetry_secret, + ) + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_metering_data(self.msg) + + self.assertEqual(1, post.call_count) + + def test_http_dispatcher_with_none_cadf_event(self): + self.CONF.dispatcher_http.target = 'fake' + self.CONF.dispatcher_http.cadf_only = False + dispatcher = http.HttpDispatcher(self.CONF) + + self.msg['resource_metadata'] = {'any': {'thing1': 'v1', + 'thing2': 'v2', }, } + self.msg['message_signature'] = utils.compute_signature( + self.msg, self.CONF.publisher.telemetry_secret, + ) + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_metering_data(self.msg) + + self.assertEqual(1, post.call_count) + + +class TestEventDispatcherHttp(base.BaseTestCase): + + def setUp(self): + super(TestEventDispatcherHttp, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + def test_http_dispatcher(self): + self.CONF.dispatcher_http.event_target = 'fake' + dispatcher = http.HttpDispatcher(self.CONF) + + event = event_models.Event(uuid.uuid4(), 'test', + datetime.datetime(2012, 7, 2, 13, 53, 40), + [], {}).serialize() + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_events(event) + + self.assertEqual(1, post.call_count) + + def test_http_dispatcher_bad(self): + self.CONF.dispatcher_http.event_target = '' + dispatcher = http.HttpDispatcher(self.CONF) + + event = event_models.Event(uuid.uuid4(), 'test', + datetime.datetime(2012, 7, 2, 13, 53, 40), + [], {}).serialize() + + with mock.patch('ceilometer.dispatcher.http.LOG', + mock.MagicMock()) as LOG: + dispatcher.record_events(event) + self.assertTrue(LOG.exception.called) + + def test_http_dispatcher_share_target(self): + self.CONF.dispatcher_http.target = 'fake' + dispatcher = http.HttpDispatcher(self.CONF) + + event = event_models.Event(uuid.uuid4(), 'test', + datetime.datetime(2012, 7, 2, 13, 53, 40), + [], {}).serialize() + + with mock.patch.object(requests, 'post') as post: + dispatcher.record_events(event) + + self.assertEqual('fake', post.call_args[0][0]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/dns/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/unit/dns/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/dns/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/dns/test_notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,102 @@ +# +# Copyright (c) 2015 Hewlett Packard Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo_utils import timeutils +from oslotest import base + +from ceilometer.dns import notifications +from ceilometer import sample + +NOW = timeutils.utcnow().isoformat() + +TENANT_ID = u'76538754af6548f5b53cf9af2d35d582' +USER_ID = u'b70ece400e4e45c187168c40fa42ff7a' +DOMAIN_STATUS = u'ACTIVE' +RESOURCE_ID = u'a8b55824-e731-40a3-a32d-de81474d74b2' +PUBLISHER_ID = u'central.ubuntu' +POOL_ID = u'794ccc2c-d751-44fe-b57f-8894c9f5c842' + + +def _dns_notification_for(operation): + + return { + u'event_type': '%s.domain.%s' % (notifications.SERVICE, + operation), + u'_context_roles': [u'admin'], + u'timestamp': NOW, + u'_context_tenant': TENANT_ID, + u'payload': { + u'status': DOMAIN_STATUS, + u'retry': 600, + u'description': None, + u'expire': 86400, + u'deleted': u'0', + u'tenant_id': TENANT_ID, + u'created_at': u'2015-07-10T20:05:29.870091Z', + u'updated_at': None, + u'refresh': 3600, + u'pool_id': POOL_ID, + u'email': u'admin@hpcloud.com', + u'minimum': 3600, + u'parent_domain_id': None, + u'version': 1, + u'ttl': 3600, + u'action': operation.upper(), + u'serial': 1426295326, + u'deleted_at': None, + u'id': RESOURCE_ID, + u'name': u'paas.hpcloud.com.', + u'audit_period_beginning': u'2015-07-10T20:05:29.870091Z', + u'audit_period_ending': u'2015-07-10T21:05:29.870091Z' + }, + u'_context_user': USER_ID, + u'_context_auth_token': u'b95d4fc3bb2e4a5487cad06af65ffcfc', + u'_context_tenant': TENANT_ID, + u'priority': u'INFO', + u'_context_is_admin': False, + u'publisher_id': PUBLISHER_ID, + u'message_id': u'67ba0a2a-32bd-4cdf-9bfb-ef9cefcd0f63', + } + + +class TestNotification(base.BaseTestCase): + def _verify_common_sample(self, actual, operation): + self.assertIsNotNone(actual) + self.assertEqual('%s.domain.%s' % (notifications.SERVICE, operation), + actual.name) + self.assertEqual(NOW, actual.timestamp) + self.assertEqual(sample.TYPE_CUMULATIVE, actual.type) + self.assertEqual(TENANT_ID, actual.project_id) + self.assertEqual(RESOURCE_ID, actual.resource_id) + self.assertEqual(USER_ID, actual.user_id) + metadata = actual.resource_metadata + self.assertEqual(PUBLISHER_ID, metadata.get('host')) + self.assertEqual(operation.upper(), metadata.get('action')) + self.assertEqual(DOMAIN_STATUS, metadata.get('status')) + + self.assertEqual(3600, actual.volume) + self.assertEqual('s', actual.unit) + + def _test_operation(self, operation): + notif = _dns_notification_for(operation) + handler = notifications.DomainExists(mock.Mock()) + data = list(handler.process_notification(notif)) + self.assertEqual(1, len(data)) + self._verify_common_sample(data[0], operation) + + def test_exists(self): + self._test_operation('exists') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/energy/test_kwapi.py ceilometer-5.0.0~b3/ceilometer/tests/unit/energy/test_kwapi.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/energy/test_kwapi.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/energy/test_kwapi.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from keystoneclient import exceptions +import mock +from oslo_context import context +from oslotest import base +from oslotest import mockpatch +import six + +from ceilometer.agent import manager +from ceilometer.energy import kwapi + + +PROBE_DICT = { + "probes": { + "A": { + "timestamp": 1357730232.68754, + "w": 107.3, + "kwh": 0.001058255421506034 + }, + "B": { + "timestamp": 1357730232.048158, + "w": 15.0, + "kwh": 0.029019045026169896 + }, + "C": { + "timestamp": 1357730232.223375, + "w": 95.0, + "kwh": 0.17361822634312918 + } + } +} + +ENDPOINT = 'end://point' + + +class TestManager(manager.AgentManager): + + @mock.patch('keystoneclient.v2_0.client', mock.MagicMock()) + def __init__(self): + super(TestManager, self).__init__() + self.keystone = mock.Mock() + + +class _BaseTestCase(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(_BaseTestCase, self).setUp() + self.context = context.get_admin_context() + self.manager = TestManager() + + +class TestKwapi(_BaseTestCase): + + @staticmethod + def fake_get_kwapi_client(ksclient, endpoint): + raise exceptions.EndpointNotFound("fake keystone exception") + + def test_endpoint_not_exist(self): + with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', + side_effect=self.fake_get_kwapi_client): + pollster = kwapi.EnergyPollster() + samples = list(pollster.get_samples(self.manager, {}, + [ENDPOINT])) + + self.assertEqual(0, len(samples)) + + +class TestEnergyPollster(_BaseTestCase): + pollster_cls = kwapi.EnergyPollster + unit = 'kwh' + + def setUp(self): + super(TestEnergyPollster, self).setUp() + self.useFixture(mockpatch.PatchObject( + kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) + + @staticmethod + def fake_iter_probes(ksclient, cache, endpoint): + probes = PROBE_DICT['probes'] + for key, value in six.iteritems(probes): + probe_dict = value + probe_dict['id'] = key + yield probe_dict + + def test_default_discovery(self): + pollster = kwapi.EnergyPollster() + self.assertEqual('endpoint:energy', pollster.default_discovery) + + def test_sample(self): + cache = {} + samples = list(self.pollster_cls().get_samples(self.manager, cache, + [ENDPOINT])) + self.assertEqual(len(PROBE_DICT['probes']), len(samples)) + samples_by_name = dict((s.resource_id, s) for s in samples) + for name, probe in PROBE_DICT['probes'].items(): + sample = samples_by_name[name] + expected = datetime.datetime.fromtimestamp( + probe['timestamp'] + ).isoformat() + self.assertEqual(expected, sample.timestamp) + self.assertEqual(probe[self.unit], sample.volume) + + +class TestPowerPollster(TestEnergyPollster): + pollster_cls = kwapi.PowerPollster + unit = 'w' + + +class TestEnergyPollsterCache(_BaseTestCase): + pollster_cls = kwapi.EnergyPollster + + def test_get_samples_cached(self): + probe = {'id': 'A'} + probe.update(PROBE_DICT['probes']['A']) + cache = { + '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], + } + self.manager.keystone = mock.Mock() + pollster = self.pollster_cls() + with mock.patch.object(pollster, '_get_probes') as do_not_call: + do_not_call.side_effect = AssertionError('should not be called') + samples = list(pollster.get_samples(self.manager, cache, + [ENDPOINT])) + self.assertEqual(1, len(samples)) + + +class TestPowerPollsterCache(TestEnergyPollsterCache): + pollster_cls = kwapi.PowerPollster diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_converter.py ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_converter.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_converter.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_converter.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,777 @@ +# +# Copyright 2013 Rackspace Hosting. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import jsonpath_rw_ext +import mock +from oslo_config import fixture as fixture_config +import six + +from ceilometer.event import converter +from ceilometer.event.storage import models +from ceilometer.tests import base + + +class ConverterBase(base.BaseTestCase): + @staticmethod + def _create_test_notification(event_type, message_id, **kw): + return dict(event_type=event_type, + message_id=message_id, + priority="INFO", + publisher_id="compute.host-1-2-3", + timestamp="2013-08-08 21:06:37.803826", + payload=kw, + ) + + def assertIsValidEvent(self, event, notification): + self.assertIsNot( + None, event, + "Notification dropped unexpectedly:" + " %s" % str(notification)) + self.assertIsInstance(event, models.Event) + + def assertIsNotValidEvent(self, event, notification): + self.assertIs( + None, event, + "Notification NOT dropped when expected to be dropped:" + " %s" % str(notification)) + + def assertHasTrait(self, event, name, value=None, dtype=None): + traits = [trait for trait in event.traits if trait.name == name] + self.assertTrue( + len(traits) > 0, + "Trait %s not found in event %s" % (name, event)) + trait = traits[0] + if value is not None: + self.assertEqual(value, trait.value) + if dtype is not None: + self.assertEqual(dtype, trait.dtype) + if dtype == models.Trait.INT_TYPE: + self.assertIsInstance(trait.value, int) + elif dtype == models.Trait.FLOAT_TYPE: + self.assertIsInstance(trait.value, float) + elif dtype == models.Trait.DATETIME_TYPE: + self.assertIsInstance(trait.value, datetime.datetime) + elif dtype == models.Trait.TEXT_TYPE: + self.assertIsInstance(trait.value, six.string_types) + + def assertDoesNotHaveTrait(self, event, name): + traits = [trait for trait in event.traits if trait.name == name] + self.assertEqual( + len(traits), 0, + "Extra Trait %s found in event %s" % (name, event)) + + def assertHasDefaultTraits(self, event): + text = models.Trait.TEXT_TYPE + self.assertHasTrait(event, 'service', dtype=text) + + def _cmp_tree(self, this, other): + if hasattr(this, 'right') and hasattr(other, 'right'): + return (self._cmp_tree(this.right, other.right) and + self._cmp_tree(this.left, other.left)) + if not hasattr(this, 'right') and not hasattr(other, 'right'): + return this == other + return False + + def assertPathsEqual(self, path1, path2): + self.assertTrue(self._cmp_tree(path1, path2), + 'JSONPaths not equivalent %s %s' % (path1, path2)) + + +class TestTraitDefinition(ConverterBase): + + def setUp(self): + super(TestTraitDefinition, self).setUp() + self.n1 = self._create_test_notification( + "test.thing", + "uuid-for-notif-0001", + instance_uuid="uuid-for-instance-0001", + instance_id="id-for-instance-0001", + instance_uuid2=None, + instance_id2=None, + host='host-1-2-3', + bogus_date='', + image_meta=dict( + disk_gb='20', + thing='whatzit'), + foobar=50) + + self.ext1 = mock.MagicMock(name='mock_test_plugin') + self.test_plugin_class = self.ext1.plugin + self.test_plugin = self.test_plugin_class() + self.test_plugin.trait_value.return_value = 'foobar' + self.ext1.reset_mock() + + self.ext2 = mock.MagicMock(name='mock_nothing_plugin') + self.nothing_plugin_class = self.ext2.plugin + self.nothing_plugin = self.nothing_plugin_class() + self.nothing_plugin.trait_value.return_value = None + self.ext2.reset_mock() + + self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) + + def test_to_trait_with_plugin(self): + cfg = dict(type='text', + fields=['payload.instance_id', 'payload.instance_uuid'], + plugin=dict(name='test')) + + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('test_trait', t.name) + self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) + self.assertEqual('foobar', t.value) + self.test_plugin_class.assert_called_once_with() + self.test_plugin.trait_value.assert_called_once_with([ + ('payload.instance_id', 'id-for-instance-0001'), + ('payload.instance_uuid', 'uuid-for-instance-0001')]) + + def test_to_trait_null_match_with_plugin(self): + cfg = dict(type='text', + fields=['payload.nothere', 'payload.bogus'], + plugin=dict(name='test')) + + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('test_trait', t.name) + self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) + self.assertEqual('foobar', t.value) + self.test_plugin_class.assert_called_once_with() + self.test_plugin.trait_value.assert_called_once_with([]) + + def test_to_trait_with_plugin_null(self): + cfg = dict(type='text', + fields=['payload.instance_id', 'payload.instance_uuid'], + plugin=dict(name='nothing')) + + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIs(None, t) + self.nothing_plugin_class.assert_called_once_with() + self.nothing_plugin.trait_value.assert_called_once_with([ + ('payload.instance_id', 'id-for-instance-0001'), + ('payload.instance_uuid', 'uuid-for-instance-0001')]) + + def test_to_trait_with_plugin_with_parameters(self): + cfg = dict(type='text', + fields=['payload.instance_id', 'payload.instance_uuid'], + plugin=dict(name='test', parameters=dict(a=1, b='foo'))) + + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('test_trait', t.name) + self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) + self.assertEqual('foobar', t.value) + self.test_plugin_class.assert_called_once_with(a=1, b='foo') + self.test_plugin.trait_value.assert_called_once_with([ + ('payload.instance_id', 'id-for-instance-0001'), + ('payload.instance_uuid', 'uuid-for-instance-0001')]) + + def test_to_trait(self): + cfg = dict(type='text', fields='payload.instance_id') + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('test_trait', t.name) + self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) + self.assertEqual('id-for-instance-0001', t.value) + + cfg = dict(type='int', fields='payload.image_meta.disk_gb') + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('test_trait', t.name) + self.assertEqual(models.Trait.INT_TYPE, t.dtype) + self.assertEqual(20, t.value) + + def test_to_trait_multiple(self): + cfg = dict(type='text', fields=['payload.instance_id', + 'payload.instance_uuid']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('id-for-instance-0001', t.value) + + cfg = dict(type='text', fields=['payload.instance_uuid', + 'payload.instance_id']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('uuid-for-instance-0001', t.value) + + def test_to_trait_multiple_different_nesting(self): + cfg = dict(type='int', fields=['payload.foobar', + 'payload.image_meta.disk_gb']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual(50, t.value) + + cfg = dict(type='int', fields=['payload.image_meta.disk_gb', + 'payload.foobar']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual(20, t.value) + + def test_to_trait_some_null_multiple(self): + cfg = dict(type='text', fields=['payload.instance_id2', + 'payload.instance_uuid']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('uuid-for-instance-0001', t.value) + + def test_to_trait_some_missing_multiple(self): + cfg = dict(type='text', fields=['payload.not_here_boss', + 'payload.instance_uuid']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIsInstance(t, models.Trait) + self.assertEqual('uuid-for-instance-0001', t.value) + + def test_to_trait_missing(self): + cfg = dict(type='text', fields='payload.not_here_boss') + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIs(None, t) + + def test_to_trait_null(self): + cfg = dict(type='text', fields='payload.instance_id2') + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIs(None, t) + + def test_to_trait_empty_nontext(self): + cfg = dict(type='datetime', fields='payload.bogus_date') + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIs(None, t) + + def test_to_trait_multiple_null_missing(self): + cfg = dict(type='text', fields=['payload.not_here_boss', + 'payload.instance_id2']) + tdef = converter.TraitDefinition('test_trait', cfg, + self.fake_plugin_mgr) + t = tdef.to_trait(self.n1) + self.assertIs(None, t) + + def test_missing_fields_config(self): + self.assertRaises(converter.EventDefinitionException, + converter.TraitDefinition, + 'bogus_trait', + dict(), + self.fake_plugin_mgr) + + def test_string_fields_config(self): + cfg = dict(fields='payload.test') + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertPathsEqual(t.fields, jsonpath_rw_ext.parse('payload.test')) + + def test_list_fields_config(self): + cfg = dict(fields=['payload.test', 'payload.other']) + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertPathsEqual( + t.fields, + jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) + + def test_invalid_path_config(self): + # test invalid jsonpath... + cfg = dict(fields='payload.bogus(') + self.assertRaises(converter.EventDefinitionException, + converter.TraitDefinition, + 'bogus_trait', + cfg, + self.fake_plugin_mgr) + + def test_invalid_plugin_config(self): + # test invalid jsonpath... + cfg = dict(fields='payload.test', plugin=dict(bogus="true")) + self.assertRaises(converter.EventDefinitionException, + converter.TraitDefinition, + 'test_trait', + cfg, + self.fake_plugin_mgr) + + def test_unknown_plugin(self): + # test invalid jsonpath... + cfg = dict(fields='payload.test', plugin=dict(name='bogus')) + self.assertRaises(converter.EventDefinitionException, + converter.TraitDefinition, + 'test_trait', + cfg, + self.fake_plugin_mgr) + + def test_type_config(self): + cfg = dict(type='text', fields='payload.test') + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) + + cfg = dict(type='int', fields='payload.test') + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertEqual(models.Trait.INT_TYPE, t.trait_type) + + cfg = dict(type='float', fields='payload.test') + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) + + cfg = dict(type='datetime', fields='payload.test') + t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) + self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) + + def test_invalid_type_config(self): + # test invalid jsonpath... + cfg = dict(type='bogus', fields='payload.test') + self.assertRaises(converter.EventDefinitionException, + converter.TraitDefinition, + 'bogus_trait', + cfg, + self.fake_plugin_mgr) + + +class TestEventDefinition(ConverterBase): + + def setUp(self): + super(TestEventDefinition, self).setUp() + + self.traits_cfg = { + 'instance_id': { + 'type': 'text', + 'fields': ['payload.instance_uuid', + 'payload.instance_id'], + }, + 'host': { + 'type': 'text', + 'fields': 'payload.host', + }, + } + + self.test_notification1 = self._create_test_notification( + "test.thing", + "uuid-for-notif-0001", + instance_id="uuid-for-instance-0001", + host='host-1-2-3') + + self.test_notification2 = self._create_test_notification( + "test.thing", + "uuid-for-notif-0002", + instance_id="uuid-for-instance-0002") + + self.test_notification3 = self._create_test_notification( + "test.thing", + "uuid-for-notif-0003", + instance_id="uuid-for-instance-0003", + host=None) + self.fake_plugin_mgr = {} + + def test_to_event(self): + dtype = models.Trait.TEXT_TYPE + cfg = dict(event_type='test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + + e = edef.to_event(self.test_notification1) + self.assertEqual('test.thing', e.event_type) + self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), + e.generated) + + self.assertHasDefaultTraits(e) + self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) + self.assertHasTrait(e, 'instance_id', + value='uuid-for-instance-0001', + dtype=dtype) + + def test_to_event_missing_trait(self): + dtype = models.Trait.TEXT_TYPE + cfg = dict(event_type='test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + + e = edef.to_event(self.test_notification2) + + self.assertHasDefaultTraits(e) + self.assertHasTrait(e, 'instance_id', + value='uuid-for-instance-0002', + dtype=dtype) + self.assertDoesNotHaveTrait(e, 'host') + + def test_to_event_null_trait(self): + dtype = models.Trait.TEXT_TYPE + cfg = dict(event_type='test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + + e = edef.to_event(self.test_notification3) + + self.assertHasDefaultTraits(e) + self.assertHasTrait(e, 'instance_id', + value='uuid-for-instance-0003', + dtype=dtype) + self.assertDoesNotHaveTrait(e, 'host') + + def test_bogus_cfg_no_traits(self): + bogus = dict(event_type='test.foo') + self.assertRaises(converter.EventDefinitionException, + converter.EventDefinition, + bogus, + self.fake_plugin_mgr) + + def test_bogus_cfg_no_type(self): + bogus = dict(traits=self.traits_cfg) + self.assertRaises(converter.EventDefinitionException, + converter.EventDefinition, + bogus, + self.fake_plugin_mgr) + + def test_included_type_string(self): + cfg = dict(event_type='test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertEqual(1, len(edef._included_types)) + self.assertEqual('test.thing', edef._included_types[0]) + self.assertEqual(0, len(edef._excluded_types)) + self.assertTrue(edef.included_type('test.thing')) + self.assertFalse(edef.excluded_type('test.thing')) + self.assertTrue(edef.match_type('test.thing')) + self.assertFalse(edef.match_type('random.thing')) + + def test_included_type_list(self): + cfg = dict(event_type=['test.thing', 'other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertEqual(2, len(edef._included_types)) + self.assertEqual(0, len(edef._excluded_types)) + self.assertTrue(edef.included_type('test.thing')) + self.assertTrue(edef.included_type('other.thing')) + self.assertFalse(edef.excluded_type('test.thing')) + self.assertTrue(edef.match_type('test.thing')) + self.assertTrue(edef.match_type('other.thing')) + self.assertFalse(edef.match_type('random.thing')) + + def test_excluded_type_string(self): + cfg = dict(event_type='!test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertEqual(1, len(edef._included_types)) + self.assertEqual('*', edef._included_types[0]) + self.assertEqual('test.thing', edef._excluded_types[0]) + self.assertEqual(1, len(edef._excluded_types)) + self.assertEqual('test.thing', edef._excluded_types[0]) + self.assertTrue(edef.excluded_type('test.thing')) + self.assertTrue(edef.included_type('random.thing')) + self.assertFalse(edef.match_type('test.thing')) + self.assertTrue(edef.match_type('random.thing')) + + def test_excluded_type_list(self): + cfg = dict(event_type=['!test.thing', '!other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertEqual(1, len(edef._included_types)) + self.assertEqual(2, len(edef._excluded_types)) + self.assertTrue(edef.excluded_type('test.thing')) + self.assertTrue(edef.excluded_type('other.thing')) + self.assertFalse(edef.excluded_type('random.thing')) + self.assertFalse(edef.match_type('test.thing')) + self.assertFalse(edef.match_type('other.thing')) + self.assertTrue(edef.match_type('random.thing')) + + def test_mixed_type_list(self): + cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertEqual(1, len(edef._included_types)) + self.assertEqual(2, len(edef._excluded_types)) + self.assertTrue(edef.excluded_type('test.thing')) + self.assertTrue(edef.excluded_type('other.thing')) + self.assertFalse(edef.excluded_type('random.thing')) + self.assertFalse(edef.match_type('test.thing')) + self.assertFalse(edef.match_type('other.thing')) + self.assertFalse(edef.match_type('random.whatzit')) + self.assertTrue(edef.match_type('random.thing')) + + def test_catchall(self): + cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertFalse(edef.is_catchall) + + cfg = dict(event_type=['!other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertFalse(edef.is_catchall) + + cfg = dict(event_type=['other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertFalse(edef.is_catchall) + + cfg = dict(event_type=['*', '!other.thing'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertFalse(edef.is_catchall) + + cfg = dict(event_type=['*'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertTrue(edef.is_catchall) + + cfg = dict(event_type=['*', 'foo'], + traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + self.assertTrue(edef.is_catchall) + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_extract_when(self, mock_utcnow): + now = datetime.datetime.utcnow() + modified = now + datetime.timedelta(minutes=1) + mock_utcnow.return_value = now + + body = {"timestamp": str(modified)} + when = converter.EventDefinition._extract_when(body) + self.assertTimestampEqual(modified, when) + + body = {"_context_timestamp": str(modified)} + when = converter.EventDefinition._extract_when(body) + self.assertTimestampEqual(modified, when) + + then = now + datetime.timedelta(hours=1) + body = {"timestamp": str(modified), "_context_timestamp": str(then)} + when = converter.EventDefinition._extract_when(body) + self.assertTimestampEqual(modified, when) + + when = converter.EventDefinition._extract_when({}) + self.assertTimestampEqual(now, when) + + def test_default_traits(self): + cfg = dict(event_type='test.thing', traits={}) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() + traits = set(edef.traits.keys()) + for dt in default_traits: + self.assertIn(dt, traits) + self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), + len(edef.traits)) + + def test_traits(self): + cfg = dict(event_type='test.thing', traits=self.traits_cfg) + edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) + default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() + traits = set(edef.traits.keys()) + for dt in default_traits: + self.assertIn(dt, traits) + self.assertIn('host', traits) + self.assertIn('instance_id', traits) + self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, + len(edef.traits)) + + +class TestNotificationConverter(ConverterBase): + + def setUp(self): + super(TestNotificationConverter, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.valid_event_def1 = [{ + 'event_type': 'compute.instance.create.*', + 'traits': { + 'instance_id': { + 'type': 'text', + 'fields': ['payload.instance_uuid', + 'payload.instance_id'], + }, + 'host': { + 'type': 'text', + 'fields': 'payload.host', + }, + }, + }] + + self.test_notification1 = self._create_test_notification( + "compute.instance.create.start", + "uuid-for-notif-0001", + instance_id="uuid-for-instance-0001", + host='host-1-2-3') + self.test_notification2 = self._create_test_notification( + "bogus.notification.from.mars", + "uuid-for-notif-0002", + weird='true', + host='cydonia') + self.fake_plugin_mgr = {} + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_converter_missing_keys(self, mock_utcnow): + # test a malformed notification + now = datetime.datetime.utcnow() + mock_utcnow.return_value = now + c = converter.NotificationEventsConverter( + [], + self.fake_plugin_mgr, + add_catchall=True) + message = {'event_type': "foo", + 'message_id': "abc", + 'publisher_id': "1"} + e = c.to_event(message) + self.assertIsValidEvent(e, message) + self.assertEqual(1, len(e.traits)) + self.assertEqual("foo", e.event_type) + self.assertEqual(now, e.generated) + + def test_converter_with_catchall(self): + c = converter.NotificationEventsConverter( + self.valid_event_def1, + self.fake_plugin_mgr, + add_catchall=True) + self.assertEqual(2, len(c.definitions)) + e = c.to_event(self.test_notification1) + self.assertIsValidEvent(e, self.test_notification1) + self.assertEqual(3, len(e.traits)) + self.assertHasDefaultTraits(e) + self.assertHasTrait(e, 'instance_id') + self.assertHasTrait(e, 'host') + + e = c.to_event(self.test_notification2) + self.assertIsValidEvent(e, self.test_notification2) + self.assertEqual(1, len(e.traits)) + self.assertHasDefaultTraits(e) + self.assertDoesNotHaveTrait(e, 'instance_id') + self.assertDoesNotHaveTrait(e, 'host') + + def test_converter_without_catchall(self): + c = converter.NotificationEventsConverter( + self.valid_event_def1, + self.fake_plugin_mgr, + add_catchall=False) + self.assertEqual(1, len(c.definitions)) + e = c.to_event(self.test_notification1) + self.assertIsValidEvent(e, self.test_notification1) + self.assertEqual(3, len(e.traits)) + self.assertHasDefaultTraits(e) + self.assertHasTrait(e, 'instance_id') + self.assertHasTrait(e, 'host') + + e = c.to_event(self.test_notification2) + self.assertIsNotValidEvent(e, self.test_notification2) + + def test_converter_empty_cfg_with_catchall(self): + c = converter.NotificationEventsConverter( + [], + self.fake_plugin_mgr, + add_catchall=True) + self.assertEqual(1, len(c.definitions)) + e = c.to_event(self.test_notification1) + self.assertIsValidEvent(e, self.test_notification1) + self.assertEqual(1, len(e.traits)) + self.assertHasDefaultTraits(e) + + e = c.to_event(self.test_notification2) + self.assertIsValidEvent(e, self.test_notification2) + self.assertEqual(1, len(e.traits)) + self.assertHasDefaultTraits(e) + + def test_converter_empty_cfg_without_catchall(self): + c = converter.NotificationEventsConverter( + [], + self.fake_plugin_mgr, + add_catchall=False) + self.assertEqual(0, len(c.definitions)) + e = c.to_event(self.test_notification1) + self.assertIsNotValidEvent(e, self.test_notification1) + + e = c.to_event(self.test_notification2) + self.assertIsNotValidEvent(e, self.test_notification2) + + @staticmethod + def _convert_message(convert, level): + message = {'priority': level, 'event_type': "foo", + 'message_id': "abc", 'publisher_id': "1"} + return convert.to_event(message) + + def test_store_raw_all(self): + self.CONF.event.store_raw = ['info', 'error'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertTrue(self._convert_message(c, 'info').raw) + self.assertTrue(self._convert_message(c, 'error').raw) + + def test_store_raw_info_only(self): + self.CONF.event.store_raw = ['info'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertTrue(self._convert_message(c, 'info').raw) + self.assertFalse(self._convert_message(c, 'error').raw) + + def test_store_raw_error_only(self): + self.CONF.event.store_raw = ['error'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertFalse(self._convert_message(c, 'info').raw) + self.assertTrue(self._convert_message(c, 'error').raw) + + def test_store_raw_skip_all(self): + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertFalse(self._convert_message(c, 'info').raw) + self.assertFalse(self._convert_message(c, 'error').raw) + + def test_store_raw_info_only_no_case(self): + self.CONF.event.store_raw = ['INFO'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertTrue(self._convert_message(c, 'info').raw) + self.assertFalse(self._convert_message(c, 'error').raw) + + def test_store_raw_bad_skip_all(self): + self.CONF.event.store_raw = ['unknown'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertFalse(self._convert_message(c, 'info').raw) + self.assertFalse(self._convert_message(c, 'error').raw) + + def test_store_raw_bad_and_good(self): + self.CONF.event.store_raw = ['info', 'unknown'] + c = converter.NotificationEventsConverter( + [], self.fake_plugin_mgr) + self.assertTrue(self._convert_message(c, 'info').raw) + self.assertFalse(self._convert_message(c, 'error').raw) + + @mock.patch('ceilometer.event.converter.get_config_file', + mock.Mock(return_value=None)) + def test_setup_events_default_config(self): + self.CONF.set_override('drop_unmatched_notifications', + False, group='event') + + c = converter.setup_events(self.fake_plugin_mgr) + self.assertIsInstance(c, converter.NotificationEventsConverter) + self.assertEqual(1, len(c.definitions)) + self.assertTrue(c.definitions[0].is_catchall) + + self.CONF.set_override('drop_unmatched_notifications', + True, group='event') + + c = converter.setup_events(self.fake_plugin_mgr) + self.assertIsInstance(c, converter.NotificationEventsConverter) + self.assertEqual(0, len(c.definitions)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_endpoint.py ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_endpoint.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_endpoint.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_endpoint.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,188 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for Ceilometer notify daemon.""" + +import mock +from oslo_config import cfg +from oslo_config import fixture as fixture_config +import oslo_messaging +from oslo_utils import fileutils +from oslotest import mockpatch +import six +import yaml + +from ceilometer.event import endpoint as event_endpoint +from ceilometer import pipeline +from ceilometer import publisher +from ceilometer.publisher import test +from ceilometer.tests import base as tests_base + + +TEST_NOTICE_CTXT = { + u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'is_admin': True, + u'project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'quota_class': None, + u'read_deleted': u'no', + u'remote_address': u'10.0.2.15', + u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', + u'roles': [u'admin'], + u'timestamp': u'2012-05-08T20:23:41.425105', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', +} + +TEST_NOTICE_METADATA = { + u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', + u'timestamp': u'2012-05-08 20:23:48.028195', +} + +TEST_NOTICE_PAYLOAD = { + u'created_at': u'2012-05-08 20:23:41', + u'deleted_at': u'', + u'disk_gb': 0, + u'display_name': u'testme', + u'fixed_ips': [{u'address': u'10.0.0.2', + u'floating_ips': [], + u'meta': {}, + u'type': u'fixed', + u'version': 4}], + u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', + u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', + u'instance_type': u'm1.tiny', + u'instance_type_id': 2, + u'launched_at': u'2012-05-08 20:23:47.985999', + u'memory_mb': 512, + u'state': u'active', + u'state_description': u'', + u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', + u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', + u'vcpus': 1, + u'root_gb': 0, + u'ephemeral_gb': 0, + u'host': u'compute-host-name', + u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', + u'os_type': u'linux?', + u'architecture': u'x86', + u'image_ref': u'UUID', + u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', + u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', +} + + +cfg.CONF.import_opt('store_events', 'ceilometer.notification', + group='notification') + + +class TestEventEndpoint(tests_base.BaseTestCase): + + def get_publisher(self, url, namespace=''): + fake_drivers = {'test://': test.TestPublisher, + 'except://': test.TestPublisher} + return fake_drivers[url](url) + + def _setup_pipeline(self, publishers): + ev_pipeline = yaml.dump({ + 'sources': [{ + 'name': 'test_event', + 'events': ['test.test'], + 'sinks': ['test_sink'] + }], + 'sinks': [{ + 'name': 'test_sink', + 'publishers': publishers + }] + }) + + if six.PY3: + ev_pipeline = ev_pipeline.encode('utf-8') + ev_pipeline_cfg_file = fileutils.write_to_tempfile( + content=ev_pipeline, prefix="event_pipeline", suffix="yaml") + self.CONF.set_override('event_pipeline_cfg_file', + ev_pipeline_cfg_file) + + ev_pipeline_mgr = pipeline.setup_event_pipeline() + return ev_pipeline_mgr + + def _setup_endpoint(self, publishers): + ev_pipeline_mgr = self._setup_pipeline(publishers) + self.endpoint = event_endpoint.EventsNotificationEndpoint( + ev_pipeline_mgr) + + self.endpoint.event_converter = mock.MagicMock() + self.endpoint.event_converter.to_event.return_value = mock.MagicMock( + event_type='test.test') + + def setUp(self): + super(TestEventEndpoint, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF([]) + self.CONF.set_override("connection", "log://", group='database') + self.CONF.set_override("store_events", True, group="notification") + self.setup_messaging(self.CONF) + + self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher', + side_effect=self.get_publisher)) + self.fake_publisher = mock.Mock() + self.useFixture(mockpatch.Patch( + 'ceilometer.publisher.test.TestPublisher', + return_value=self.fake_publisher)) + + def test_message_to_event(self): + self._setup_endpoint(['test://']) + self.endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', + 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) + + def test_bad_event_non_ack_and_requeue(self): + self._setup_endpoint(['test://']) + self.fake_publisher.publish_events.side_effect = Exception + self.CONF.set_override("ack_on_event_error", False, + group="notification") + ret = self.endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise', + 'compute.instance.create.end', + TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA) + self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) + + def test_message_to_event_bad_event(self): + self._setup_endpoint(['test://']) + self.fake_publisher.publish_events.side_effect = Exception + self.CONF.set_override("ack_on_event_error", False, + group="notification") + + message = {'event_type': "foo", 'message_id': "abc"} + with mock.patch("ceilometer.pipeline.LOG") as mock_logger: + ret = self.endpoint.process_notification(message) + self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) + exception_mock = mock_logger.exception + self.assertIn('Exit after error from publisher', + exception_mock.call_args_list[0][0][0]) + + def test_message_to_event_bad_event_multi_publish(self): + + self._setup_endpoint(['test://', 'except://']) + + self.fake_publisher.publish_events.side_effect = Exception + self.CONF.set_override("ack_on_event_error", False, + group="notification") + + message = {'event_type': "foo", 'message_id': "abc"} + + with mock.patch("ceilometer.pipeline.LOG") as mock_logger: + ret = self.endpoint.process_notification(message) + self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) + exception_mock = mock_logger.exception + self.assertIn('Continue after error from publisher', + exception_mock.call_args_list[0][0][0]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_trait_plugins.py ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_trait_plugins.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/event/test_trait_plugins.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/event/test_trait_plugins.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,115 @@ +# +# Copyright 2013 Rackspace Hosting. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslotest import base + +from ceilometer.event import trait_plugins + + +class TestSplitterPlugin(base.BaseTestCase): + + def setUp(self): + super(TestSplitterPlugin, self).setUp() + self.pclass = trait_plugins.SplitterTraitPlugin + + def test_split(self): + param = dict(separator='-', segment=0) + plugin = self.pclass(**param) + match_list = [('test.thing', 'test-foobar-baz')] + value = plugin.trait_value(match_list) + self.assertEqual('test', value) + + param = dict(separator='-', segment=1) + plugin = self.pclass(**param) + match_list = [('test.thing', 'test-foobar-baz')] + value = plugin.trait_value(match_list) + self.assertEqual('foobar', value) + + param = dict(separator='-', segment=1, max_split=1) + plugin = self.pclass(**param) + match_list = [('test.thing', 'test-foobar-baz')] + value = plugin.trait_value(match_list) + self.assertEqual('foobar-baz', value) + + def test_no_sep(self): + param = dict(separator='-', segment=0) + plugin = self.pclass(**param) + match_list = [('test.thing', 'test.foobar.baz')] + value = plugin.trait_value(match_list) + self.assertEqual('test.foobar.baz', value) + + def test_no_segment(self): + param = dict(separator='-', segment=5) + plugin = self.pclass(**param) + match_list = [('test.thing', 'test-foobar-baz')] + value = plugin.trait_value(match_list) + self.assertIs(None, value) + + def test_no_match(self): + param = dict(separator='-', segment=0) + plugin = self.pclass(**param) + match_list = [] + value = plugin.trait_value(match_list) + self.assertIs(None, value) + + +class TestBitfieldPlugin(base.BaseTestCase): + + def setUp(self): + super(TestBitfieldPlugin, self).setUp() + self.pclass = trait_plugins.BitfieldTraitPlugin + self.init = 0 + self.params = dict(initial_bitfield=self.init, + flags=[dict(path='payload.foo', bit=0, value=42), + dict(path='payload.foo', bit=1, value=12), + dict(path='payload.thud', bit=1, value=23), + dict(path='thingy.boink', bit=4), + dict(path='thingy.quux', bit=6, + value="wokka"), + dict(path='payload.bar', bit=10, + value='test')]) + + def test_bitfield(self): + match_list = [('payload.foo', 12), + ('payload.bar', 'test'), + ('thingy.boink', 'testagain')] + + plugin = self.pclass(**self.params) + value = plugin.trait_value(match_list) + self.assertEqual(0x412, value) + + def test_initial(self): + match_list = [('payload.foo', 12), + ('payload.bar', 'test'), + ('thingy.boink', 'testagain')] + self.params['initial_bitfield'] = 0x2000 + plugin = self.pclass(**self.params) + value = plugin.trait_value(match_list) + self.assertEqual(0x2412, value) + + def test_no_match(self): + match_list = [] + plugin = self.pclass(**self.params) + value = plugin.trait_value(match_list) + self.assertEqual(self.init, value) + + def test_multi(self): + match_list = [('payload.foo', 12), + ('payload.thud', 23), + ('payload.bar', 'test'), + ('thingy.boink', 'testagain')] + + plugin = self.pclass(**self.params) + value = plugin.trait_value(match_list) + self.assertEqual(0x412, value) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/inspector/test_inspector.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/inspector/test_inspector.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/inspector/test_inspector.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/inspector/test_inspector.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,33 @@ +# +# Copyright 2014 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_utils import netutils + +from ceilometer.hardware import inspector +from ceilometer.tests import base + + +class TestHardwareInspector(base.BaseTestCase): + def test_get_inspector(self): + url = netutils.urlsplit("snmp://") + driver = inspector.get_inspector(url) + self.assertTrue(driver) + + def test_get_inspector_illegal(self): + url = netutils.urlsplit("illegal://") + self.assertRaises(RuntimeError, + inspector.get_inspector, + url) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/inspector/test_snmp.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/inspector/test_snmp.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/inspector/test_snmp.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/inspector/test_snmp.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,145 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/hardware/inspector/snmp/inspector.py +""" +from oslo_utils import netutils +from oslotest import mockpatch + +from ceilometer.hardware.inspector import snmp +from ceilometer.tests import base as test_base + +ins = snmp.SNMPInspector + + +class FakeObjectName(object): + def __init__(self, name): + self.name = name + + def prettyPrint(self): + return str(self.name) + + +def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): + varBinds = [(FakeObjectName(oid), + int(oid.split('.')[-1])) for oid in oids] + return (None, None, 0, varBinds) + + +def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, + *oids, **kwargs): + varBindTable = [ + [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] + for oid in oids + ] + return (None, None, 0, varBindTable) + + +class TestSNMPInspector(test_base.BaseTestCase): + mapping = { + 'test_exact': { + 'matching_type': snmp.EXACT, + 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), + 'metadata': { + 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) + }, + 'post_op': '_fake_post_op', + }, + 'test_prefix': { + 'matching_type': snmp.PREFIX, + 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), + 'metadata': { + 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) + }, + 'post_op': None, + }, + } + + def setUp(self): + super(TestSNMPInspector, self).setUp() + self.inspector = snmp.SNMPInspector() + self.host = netutils.urlsplit("snmp://localhost") + self.inspector.MAPPING = self.mapping + self.useFixture(mockpatch.PatchObject( + self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) + self.useFixture(mockpatch.PatchObject( + self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) + + def test_snmp_error(self): + def get_list(func, *args, **kwargs): + return list(func(*args, **kwargs)) + + def faux_parse(ret, is_bulk): + return (True, 'forced error') + + self.useFixture(mockpatch.PatchObject( + snmp, 'parse_snmp_return', new=faux_parse)) + + self.assertRaises(snmp.SNMPException, + get_list, + self.inspector.inspect_generic, + self.host, + 'test_exact', + {}) + + @staticmethod + def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): + metadata.update(post_op_meta=4) + extra.update(project_id=2) + return value + + def test_inspect_generic_exact(self): + self.inspector._fake_post_op = self._fake_post_op + cache = {} + ret = list(self.inspector.inspect_generic(self.host, + 'test_exact', + cache)) + keys = cache[ins._CACHE_KEY_OID].keys() + self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) + self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) + self.assertEqual(1, len(ret)) + self.assertEqual(1, ret[0][0]) + self.assertEqual(8, ret[0][1]['meta']) + self.assertEqual(4, ret[0][1]['post_op_meta']) + self.assertEqual(2, ret[0][2]['project_id']) + + def test_inspect_generic_prefix(self): + cache = {} + ret = list(self.inspector.inspect_generic(self.host, + 'test_prefix', + cache)) + keys = cache[ins._CACHE_KEY_OID].keys() + self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) + self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) + self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) + self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) + self.assertEqual(2, len(ret)) + self.assertIn(ret[0][0], (1, 2)) + self.assertEqual(ret[0][0], ret[0][1]['meta']) + + def test_post_op_net(self): + self.useFixture(mockpatch.PatchObject( + self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) + cache = {} + metadata = {} + ret = self.inspector._post_op_net(self.host, cache, None, + value=8, + metadata=metadata, + extra={}, + suffix=".2") + self.assertEqual(8, ret) + self.assertIn('ip', metadata) + self.assertIn("2", metadata['ip']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,86 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock + +from ceilometer.agent import manager +from ceilometer.hardware.inspector import base as inspector_base +from ceilometer.tests import base as test_base + + +class FakeInspector(inspector_base.Inspector): + net_metadata = dict(name='test.teest', + mac='001122334455', + ip='10.0.0.2', + speed=1000) + disk_metadata = dict(device='/dev/sda1', path='/') + DATA = { + 'cpu.load.1min': (0.99, {}, {}), + 'cpu.load.5min': (0.77, {}, {}), + 'cpu.load.15min': (0.55, {}, {}), + 'memory.total': (1000, {}, {}), + 'memory.used': (90, {}, {}), + 'memory.buffer': (500, {}, {}), + 'memory.cached': (200, {}, {}), + 'network.incoming.bytes': (90, net_metadata, {}), + 'network.outgoing.bytes': (80, net_metadata, {}), + 'network.outgoing.errors': (1, net_metadata, {}), + 'disk.size.total': (1000, disk_metadata, {}), + 'disk.size.used': (90, disk_metadata, {}), + 'system_stats.cpu.idle': (62, {}, {}), + 'system_stats.io.outgoing.blocks': (100, {}, {}), + 'system_stats.io.incoming.blocks': (120, {}, {}), + 'network.ip.outgoing.datagrams': (200, {}, {}), + 'network.ip.incoming.datagrams': (300, {}, {}), + } + + def inspect_generic(self, host, identifier, cache, extra_metadata=None): + yield self.DATA[identifier] + + +class TestPollsterBase(test_base.BaseTestCase): + @staticmethod + def faux_get_inspector(url, namespace=None): + return FakeInspector() + + def setUp(self): + super(TestPollsterBase, self).setUp() + self.hosts = ["test://test", "test://test2"] + self.useFixture(fixtures.MonkeyPatch( + 'ceilometer.hardware.inspector.get_inspector', + self.faux_get_inspector)) + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples(self, factory, name, + expected_value, expected_type, expected_unit=None): + mgr = manager.AgentManager() + pollster = factory() + cache = {} + samples = list(pollster.get_samples(mgr, cache, self.hosts)) + self.assertTrue(samples) + self.assertIn(pollster.CACHE_KEY, cache) + for host in self.hosts: + self.assertIn(host, cache[pollster.CACHE_KEY]) + + self.assertEqual(set([name]), + set([s.name for s in samples])) + match = [s for s in samples if s.name == name] + self.assertEqual(expected_value, match[0].volume) + self.assertEqual(expected_type, match[0].type) + if expected_unit: + self.assertEqual(expected_unit, match[0].unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_cpu.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_cpu.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_cpu.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_cpu.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import cpu +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestCPUPollsters(base.TestPollsterBase): + def test_1min(self): + self._check_get_samples(cpu.CPULoad1MinPollster, + 'hardware.cpu.load.1min', + 0.99, sample.TYPE_GAUGE, + expected_unit='process') + + def test_5min(self): + self._check_get_samples(cpu.CPULoad5MinPollster, + 'hardware.cpu.load.5min', + 0.77, sample.TYPE_GAUGE, + expected_unit='process') + + def test_15min(self): + self._check_get_samples(cpu.CPULoad15MinPollster, + 'hardware.cpu.load.15min', + 0.55, sample.TYPE_GAUGE, + expected_unit='process') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_disk.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_disk.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_disk.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_disk.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,32 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import disk +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestDiskPollsters(base.TestPollsterBase): + def test_disk_size_total(self): + self._check_get_samples(disk.DiskTotalPollster, + 'hardware.disk.size.total', + 1000, sample.TYPE_GAUGE) + + def test_disk_size_used(self): + self._check_get_samples(disk.DiskUsedPollster, + 'hardware.disk.size.used', + 90, sample.TYPE_GAUGE) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_memory.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_memory.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_memory.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_memory.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,42 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import memory +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestMemoryPollsters(base.TestPollsterBase): + def test_memory_size_total(self): + self._check_get_samples(memory.MemoryTotalPollster, + 'hardware.memory.total', + 1000, sample.TYPE_GAUGE) + + def test_memory_size_used(self): + self._check_get_samples(memory.MemoryUsedPollster, + 'hardware.memory.used', + 90, sample.TYPE_GAUGE) + + def test_memory_size_buffer(self): + self._check_get_samples(memory.MemoryBufferPollster, + 'hardware.memory.buffer', + 500, sample.TYPE_GAUGE) + + def test_memory_size_cached(self): + self._check_get_samples(memory.MemoryCachedPollster, + 'hardware.memory.cached', + 200, sample.TYPE_GAUGE) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_net.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_net.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_net.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_net.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import net +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestNetPollsters(base.TestPollsterBase): + def test_incoming(self): + self._check_get_samples(net.IncomingBytesPollster, + 'hardware.network.incoming.bytes', + 90, sample.TYPE_CUMULATIVE, + expected_unit='B') + + def test_outgoing(self): + self._check_get_samples(net.OutgoingBytesPollster, + 'hardware.network.outgoing.bytes', + 80, sample.TYPE_CUMULATIVE, + expected_unit='B') + + def test_error(self): + self._check_get_samples(net.OutgoingErrorsPollster, + 'hardware.network.outgoing.errors', + 1, sample.TYPE_CUMULATIVE, + expected_unit='packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_network_aggregated.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_network_aggregated.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_network_aggregated.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_network_aggregated.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import network_aggregated +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestNetworkAggregatedPollsters(base.TestPollsterBase): + def test_incoming(self): + self._check_get_samples(network_aggregated. + NetworkAggregatedIPOutRequests, + 'hardware.network.ip.outgoing.datagrams', + 200, sample.TYPE_CUMULATIVE, + expected_unit='datagrams') + + def test_outgoing(self): + self._check_get_samples(network_aggregated. + NetworkAggregatedIPInReceives, + 'hardware.network.ip.incoming.datagrams', + 300, sample.TYPE_CUMULATIVE, + expected_unit='datagrams') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_system.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_system.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_system.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_system.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.hardware.pollsters import system +from ceilometer import sample +from ceilometer.tests.unit.hardware.pollsters import base + + +class TestSystemPollsters(base.TestPollsterBase): + def test_cpu_idle(self): + self._check_get_samples(system.SystemCpuIdlePollster, + 'hardware.system_stats.cpu.idle', + 62, sample.TYPE_GAUGE, + expected_unit='%') + + def test_io_outgoing(self): + self._check_get_samples(system.SystemIORawSentPollster, + 'hardware.system_stats.io.outgoing.blocks', + 100, sample.TYPE_CUMULATIVE, + expected_unit='blocks') + + def test_io_incoming(self): + self._check_get_samples(system.SystemIORawReceivedPollster, + 'hardware.system_stats.io.incoming.blocks', + 120, sample.TYPE_CUMULATIVE, + expected_unit='blocks') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_util.py ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_util.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/hardware/pollsters/test_util.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/hardware/pollsters/test_util.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,61 @@ +# +# Copyright 2013 Intel Corp +# +# Authors: Lianhao Lu +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import netutils + +from ceilometer.hardware.pollsters import util +from ceilometer import sample +from ceilometer.tests import base as test_base + + +class TestPollsterUtils(test_base.BaseTestCase): + def setUp(self): + super(TestPollsterUtils, self).setUp() + self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") + + def test_make_sample(self): + s = util.make_sample_from_host(self.host_url, + name='test', + sample_type=sample.TYPE_GAUGE, + unit='B', + volume=1, + res_metadata={ + 'metakey': 'metaval', + }) + self.assertEqual('127.0.0.1', s.resource_id) + self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) + self.assertIn('metakey', s.resource_metadata.keys()) + + def test_make_sample_extra(self): + extra = { + 'project_id': 'project', + 'resource_id': 'resource' + } + s = util.make_sample_from_host(self.host_url, + name='test', + sample_type=sample.TYPE_GAUGE, + unit='B', + volume=1, + extra=extra) + self.assertIsNone(s.user_id) + self.assertEqual('project', s.project_id) + self.assertEqual('resource', s.resource_id) + self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', + 'project_id': 'project', + 'resource_id': + 'resource'}, + s.resource_metadata) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/image/test_glance.py ceilometer-5.0.0~b3/ceilometer/tests/unit/image/test_glance.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/image/test_glance.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/image/test_glance.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,229 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import fixture as fixture_config +from oslo_context import context +from oslotest import base +from oslotest import mockpatch + +from ceilometer.agent import manager +from ceilometer.image import glance + +IMAGE_LIST = [ + type('Image', (object,), + {u'status': u'queued', + u'name': "some name", + u'deleted': False, + u'container_format': None, + u'created_at': u'2012-09-18T16:29:46', + u'disk_format': None, + u'updated_at': u'2012-09-18T16:29:46', + u'properties': {}, + u'min_disk': 0, + u'protected': False, + u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', + u'location': None, + u'checksum': None, + u'owner': u'4c8364fc20184ed7971b76602aa96184', + u'is_public': True, + u'deleted_at': None, + u'min_ram': 0, + u'size': 2048}), + type('Image', (object,), + {u'status': u'active', + u'name': "hello world", + u'deleted': False, + u'container_format': None, + u'created_at': u'2012-09-18T16:27:41', + u'disk_format': None, + u'updated_at': u'2012-09-18T16:27:41', + u'properties': {}, + u'min_disk': 0, + u'protected': False, + u'id': u'22be9f90-864d-494c-aa74-8035fd535989', + u'location': None, + u'checksum': None, + u'owner': u'9e4f98287a0246daa42eaf4025db99d4', + u'is_public': True, + u'deleted_at': None, + u'min_ram': 0, + u'size': 0}), + type('Image', (object,), + {u'status': u'queued', + u'name': None, + u'deleted': False, + u'container_format': None, + u'created_at': u'2012-09-18T16:23:27', + u'disk_format': "raw", + u'updated_at': u'2012-09-18T16:23:27', + u'properties': {}, + u'min_disk': 0, + u'protected': False, + u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', + u'location': None, + u'checksum': None, + u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', + u'is_public': True, + u'deleted_at': None, + u'min_ram': 0, + u'size': 1024}), + type('Image', (object,), + {u'status': u'queued', + u'name': "some name", + u'deleted': False, + u'container_format': None, + u'created_at': u'2012-09-18T16:29:46', + u'disk_format': None, + u'updated_at': u'2012-09-18T16:29:46', + u'properties': {}, + u'min_disk': 0, + u'protected': False, + u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', + u'location': None, + u'checksum': None, + u'owner': u'4c8364fc20184ed7971b76602aa96184', + u'is_public': True, + u'deleted_at': None, + u'min_ram': 0, + u'size': 2048}), +] + +ENDPOINT = 'end://point' + + +class _BaseObject(object): + pass + + +class FakeGlanceClient(object): + class images(object): + pass + + +class TestManager(manager.AgentManager): + + def __init__(self): + super(TestManager, self).__init__() + self.keystone = mock.Mock() + self.keystone.service_catalog.get_endpoints = mock.Mock( + return_value={'image': mock.ANY}) + + +class TestImagePollsterPageSize(base.BaseTestCase): + + @staticmethod + def fake_get_glance_client(ksclient, endpoint): + glanceclient = FakeGlanceClient() + glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) + return glanceclient + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(TestImagePollsterPageSize, self).setUp() + self.context = context.get_admin_context() + self.manager = TestManager() + self.useFixture(mockpatch.PatchObject( + glance._Base, 'get_glance_client', + side_effect=self.fake_get_glance_client)) + self.CONF = self.useFixture(fixture_config.Config()).conf + + def _do_test_iter_images(self, page_size=0, length=0): + self.CONF.set_override("glance_page_size", page_size) + images = list(glance.ImagePollster(). + _iter_images(self.manager.keystone, {}, ENDPOINT)) + kwargs = {} + if page_size > 0: + kwargs['page_size'] = page_size + FakeGlanceClient.images.list.assert_called_with( + filters={'is_public': None}, **kwargs) + self.assertEqual(length, len(images)) + + def test_page_size(self): + self._do_test_iter_images(100, 4) + + def test_page_size_default(self): + self._do_test_iter_images(length=4) + + def test_page_size_negative_number(self): + self._do_test_iter_images(-1, 4) + + +class TestImagePollster(base.BaseTestCase): + + @staticmethod + def fake_get_glance_client(ksclient, endpoint): + glanceclient = _BaseObject() + setattr(glanceclient, "images", _BaseObject()) + setattr(glanceclient.images, + "list", lambda *args, **kwargs: iter(IMAGE_LIST)) + return glanceclient + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(TestImagePollster, self).setUp() + self.context = context.get_admin_context() + self.manager = TestManager() + self.useFixture(mockpatch.PatchObject( + glance._Base, 'get_glance_client', + side_effect=self.fake_get_glance_client)) + + def test_default_discovery(self): + pollster = glance.ImagePollster() + self.assertEqual('endpoint:image', pollster.default_discovery) + + def test_iter_images(self): + # Tests whether the iter_images method returns a unique image + # list when there is nothing in the cache + images = list(glance.ImagePollster(). + _iter_images(self.manager.keystone, {}, ENDPOINT)) + self.assertEqual(len(set(image.id for image in images)), len(images)) + + def test_iter_images_cached(self): + # Tests whether the iter_images method returns the values from + # the cache + cache = {'%s-images' % ENDPOINT: []} + images = list(glance.ImagePollster(). + _iter_images(self.manager.keystone, cache, + ENDPOINT)) + self.assertEqual([], images) + + def test_image(self): + samples = list(glance.ImagePollster().get_samples(self.manager, {}, + [ENDPOINT])) + self.assertEqual(4, len(samples)) + for sample in samples: + self.assertEqual(1, sample.volume) + + def test_image_size(self): + samples = list(glance.ImageSizePollster().get_samples(self.manager, + {}, + [ENDPOINT])) + self.assertEqual(4, len(samples)) + for image in IMAGE_LIST: + self.assertTrue( + any(map(lambda sample: sample.volume == image.size, + samples))) + + def test_image_get_sample_names(self): + samples = list(glance.ImagePollster().get_samples(self.manager, {}, + [ENDPOINT])) + self.assertEqual(set(['image']), set([s.name for s in samples])) + + def test_image_size_get_sample_names(self): + samples = list(glance.ImageSizePollster().get_samples(self.manager, + {}, + [ENDPOINT])) + self.assertEqual(set(['image.size']), set([s.name for s in samples])) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,795 @@ +# +# Copyright 2014 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Sample data for test_ipmi. + +This data is provided as a sample of the data expected from the ipmitool +driver in the Ironic project, which is the publisher of the notifications +being tested. +""" + + +TEMPERATURE_DATA = { + 'DIMM GH VR Temp (0x3b)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '26 (+/- 0.500) degrees C', + 'Entity ID': '20.6 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'DIMM GH VR Temp (0x3b)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'CPU1 VR Temp (0x36)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '32 (+/- 0.500) degrees C', + 'Entity ID': '20.1 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'CPU1 VR Temp (0x36)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'DIMM EF VR Temp (0x3a)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '26 (+/- 0.500) degrees C', + 'Entity ID': '20.5 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'DIMM EF VR Temp (0x3a)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'CPU2 VR Temp (0x37)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '31 (+/- 0.500) degrees C', + 'Entity ID': '20.2 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'CPU2 VR Temp (0x37)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'Ambient Temp (0x32)': { + 'Status': 'ok', + 'Sensor Reading': '25 (+/- 0) degrees C', + 'Entity ID': '12.1 (Front Panel Board)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Event Message Control': 'Per-threshold', + 'Assertion Events': '', + 'Upper non-critical': '43.000', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Upper non-recoverable': '50.000', + 'Positive Hysteresis': '4.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '46.000', + 'Sensor ID': 'Ambient Temp (0x32)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '25.000' + }, + 'Mezz Card Temp (0x35)': { + 'Status': 'Disabled', + 'Sensor Reading': 'Disabled', + 'Entity ID': '44.1 (I/O Module)', + 'Event Message Control': 'Per-threshold', + 'Upper non-critical': '70.000', + 'Upper non-recoverable': '85.000', + 'Positive Hysteresis': '4.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '80.000', + 'Sensor ID': 'Mezz Card Temp (0x35)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '25.000' + }, + 'PCH Temp (0x3c)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '46 (+/- 0.500) degrees C', + 'Entity ID': '45.1 (Processor/IO Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '93.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '103.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '98.000', + 'Sensor ID': 'PCH Temp (0x3c)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'DIMM CD VR Temp (0x39)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '27 (+/- 0.500) degrees C', + 'Entity ID': '20.4 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'DIMM CD VR Temp (0x39)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'PCI Riser 2 Temp (0x34)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '30 (+/- 0) degrees C', + 'Entity ID': '16.2 (System Internal Expansion Board)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '70.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '85.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '80.000', + 'Sensor ID': 'PCI Riser 2 Temp (0x34)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'DIMM AB VR Temp (0x38)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '28 (+/- 0.500) degrees C', + 'Entity ID': '20.3 (Power Module)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '95.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '105.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '100.000', + 'Sensor ID': 'DIMM AB VR Temp (0x38)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + 'PCI Riser 1 Temp (0x33)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': '38 (+/- 0) degrees C', + 'Entity ID': '16.1 (System Internal Expansion Board)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '70.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '85.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '80.000', + 'Sensor ID': 'PCI Riser 1 Temp (0x33)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, +} + + +CURRENT_DATA = { + 'Avg Power (0x2e)': { + 'Status': 'ok', + 'Sensor Reading': '130 (+/- 0) Watts', + 'Entity ID': '21.0 (Power Management)', + 'Assertions Enabled': '', + 'Event Message Control': 'Per-threshold', + 'Readable Thresholds': 'No Thresholds', + 'Positive Hysteresis': 'Unspecified', + 'Sensor Type (Analog)': 'Current', + 'Negative Hysteresis': 'Unspecified', + 'Maximum sensor range': 'Unspecified', + 'Sensor ID': 'Avg Power (0x2e)', + 'Assertion Events': '', + 'Minimum sensor range': '2550.000', + 'Settable Thresholds': 'No Thresholds' + } +} + + +FAN_DATA = { + 'Fan 4A Tach (0x46)': { + 'Status': 'ok', + 'Sensor Reading': '6900 (+/- 0) RPM', + 'Entity ID': '29.4 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 4A Tach (0x46)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + }, + 'Fan 5A Tach (0x48)': { + 'Status': 'ok', + 'Sensor Reading': '7140 (+/- 0) RPM', + 'Entity ID': '29.5 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 5A Tach (0x48)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + }, + 'Fan 3A Tach (0x44)': { + 'Status': 'ok', + 'Sensor Reading': '6900 (+/- 0) RPM', + 'Entity ID': '29.3 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 3A Tach (0x44)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + }, + 'Fan 1A Tach (0x40)': { + 'Status': 'ok', + 'Sensor Reading': '6960 (+/- 0) RPM', + 'Entity ID': '29.1 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 1A Tach (0x40)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + }, + 'Fan 3B Tach (0x45)': { + 'Status': 'ok', + 'Sensor Reading': '7104 (+/- 0) RPM', + 'Entity ID': '29.3 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 3B Tach (0x45)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 2A Tach (0x42)': { + 'Status': 'ok', + 'Sensor Reading': '7080 (+/- 0) RPM', + 'Entity ID': '29.2 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 2A Tach (0x42)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + }, + 'Fan 4B Tach (0x47)': { + 'Status': 'ok', + 'Sensor Reading': '7488 (+/- 0) RPM', + 'Entity ID': '29.4 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 4B Tach (0x47)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 2B Tach (0x43)': { + 'Status': 'ok', + 'Sensor Reading': '7168 (+/- 0) RPM', + 'Entity ID': '29.2 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 2B Tach (0x43)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 5B Tach (0x49)': { + 'Status': 'ok', + 'Sensor Reading': '7296 (+/- 0) RPM', + 'Entity ID': '29.5 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 5B Tach (0x49)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 1B Tach (0x41)': { + 'Status': 'ok', + 'Sensor Reading': '7296 (+/- 0) RPM', + 'Entity ID': '29.1 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 1B Tach (0x41)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 6B Tach (0x4b)': { + 'Status': 'ok', + 'Sensor Reading': '7616 (+/- 0) RPM', + 'Entity ID': '29.6 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2752.000', + 'Positive Hysteresis': '128.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '16320.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '128.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 6B Tach (0x4b)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3968.000' + }, + 'Fan 6A Tach (0x4a)': { + 'Status': 'ok', + 'Sensor Reading': '7080 (+/- 0) RPM', + 'Entity ID': '29.6 (Fan Device)', + 'Assertions Enabled': 'lcr-', + 'Normal Minimum': '2580.000', + 'Positive Hysteresis': '120.000', + 'Assertion Events': '', + 'Event Message Control': 'Per-threshold', + 'Normal Maximum': '15300.000', + 'Deassertions Enabled': 'lcr-', + 'Sensor Type (Analog)': 'Fan', + 'Lower critical': '1920.000', + 'Negative Hysteresis': '120.000', + 'Threshold Read Mask': 'lcr', + 'Maximum sensor range': 'Unspecified', + 'Readable Thresholds': 'lcr', + 'Sensor ID': 'Fan 6A Tach (0x4a)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4020.000' + } +} + + +VOLTAGE_DATA = { + 'Planar 12V (0x18)': { + 'Status': 'ok', + 'Sensor Reading': '12.312 (+/- 0) Volts', + 'Entity ID': '7.1 (System Board)', + 'Assertions Enabled': 'lcr- ucr+', + 'Event Message Control': 'Per-threshold', + 'Assertion Events': '', + 'Maximum sensor range': 'Unspecified', + 'Positive Hysteresis': '0.108', + 'Deassertions Enabled': 'lcr- ucr+', + 'Sensor Type (Analog)': 'Voltage', + 'Lower critical': '10.692', + 'Negative Hysteresis': '0.108', + 'Threshold Read Mask': 'lcr ucr', + 'Upper critical': '13.446', + 'Readable Thresholds': 'lcr ucr', + 'Sensor ID': 'Planar 12V (0x18)', + 'Settable Thresholds': 'lcr ucr', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '12.042' + }, + 'Planar 3.3V (0x16)': { + 'Status': 'ok', + 'Sensor Reading': '3.309 (+/- 0) Volts', + 'Entity ID': '7.1 (System Board)', + 'Assertions Enabled': 'lcr- ucr+', + 'Event Message Control': 'Per-threshold', + 'Assertion Events': '', + 'Maximum sensor range': 'Unspecified', + 'Positive Hysteresis': '0.028', + 'Deassertions Enabled': 'lcr- ucr+', + 'Sensor Type (Analog)': 'Voltage', + 'Lower critical': '3.039', + 'Negative Hysteresis': '0.028', + 'Threshold Read Mask': 'lcr ucr', + 'Upper critical': '3.564', + 'Readable Thresholds': 'lcr ucr', + 'Sensor ID': 'Planar 3.3V (0x16)', + 'Settable Thresholds': 'lcr ucr', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3.309' + }, + 'Planar VBAT (0x1c)': { + 'Status': 'ok', + 'Sensor Reading': '3.137 (+/- 0) Volts', + 'Entity ID': '7.1 (System Board)', + 'Assertions Enabled': 'lnc- lcr-', + 'Event Message Control': 'Per-threshold', + 'Assertion Events': '', + 'Readable Thresholds': 'lcr lnc', + 'Positive Hysteresis': '0.025', + 'Deassertions Enabled': 'lnc- lcr-', + 'Sensor Type (Analog)': 'Voltage', + 'Lower critical': '2.095', + 'Negative Hysteresis': '0.025', + 'Lower non-critical': '2.248', + 'Maximum sensor range': 'Unspecified', + 'Sensor ID': 'Planar VBAT (0x1c)', + 'Settable Thresholds': 'lcr lnc', + 'Threshold Read Mask': 'lcr lnc', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '3.010' + }, + 'Planar 5V (0x17)': { + 'Status': 'ok', + 'Sensor Reading': '5.062 (+/- 0) Volts', + 'Entity ID': '7.1 (System Board)', + 'Assertions Enabled': 'lcr- ucr+', + 'Event Message Control': 'Per-threshold', + 'Assertion Events': '', + 'Maximum sensor range': 'Unspecified', + 'Positive Hysteresis': '0.045', + 'Deassertions Enabled': 'lcr- ucr+', + 'Sensor Type (Analog)': 'Voltage', + 'Lower critical': '4.475', + 'Negative Hysteresis': '0.045', + 'Threshold Read Mask': 'lcr ucr', + 'Upper critical': '5.582', + 'Readable Thresholds': 'lcr ucr', + 'Sensor ID': 'Planar 5V (0x17)', + 'Settable Thresholds': 'lcr ucr', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '4.995' + } +} + + +SENSOR_DATA = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + 'Temperature': TEMPERATURE_DATA, + 'Current': CURRENT_DATA, + 'Fan': FAN_DATA, + 'Voltage': VOLTAGE_DATA + } + } +} + + +EMPTY_PAYLOAD = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + } + } +} + + +MISSING_SENSOR = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + 'Temperature': { + 'PCI Riser 1 Temp (0x33)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Entity ID': '16.1 (System Internal Expansion Board)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '70.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '85.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '80.000', + 'Sensor ID': 'PCI Riser 1 Temp (0x33)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + } + } + } +} + + +BAD_SENSOR = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + 'Temperature': { + 'PCI Riser 1 Temp (0x33)': { + 'Status': 'ok', + 'Deassertions Enabled': 'unc+ ucr+ unr+', + 'Sensor Reading': 'some bad stuff', + 'Entity ID': '16.1 (System Internal Expansion Board)', + 'Assertions Enabled': 'unc+ ucr+ unr+', + 'Positive Hysteresis': '4.000', + 'Assertion Events': '', + 'Upper non-critical': '70.000', + 'Event Message Control': 'Per-threshold', + 'Upper non-recoverable': '85.000', + 'Normal Maximum': '112.000', + 'Maximum sensor range': 'Unspecified', + 'Sensor Type (Analog)': 'Temperature', + 'Readable Thresholds': 'unc ucr unr', + 'Negative Hysteresis': 'Unspecified', + 'Threshold Read Mask': 'unc ucr unr', + 'Upper critical': '80.000', + 'Sensor ID': 'PCI Riser 1 Temp (0x33)', + 'Settable Thresholds': '', + 'Minimum sensor range': 'Unspecified', + 'Nominal Reading': '16.000' + }, + } + } + } +} + + +NO_SENSOR_ID = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + 'Temperature': { + 'PCI Riser 1 Temp (0x33)': { + 'Sensor Reading': '26 C', + }, + } + } + } +} + + +NO_NODE_ID = { + 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', + 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', + 'payload': { + 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', + 'timestamp': '20140223134852', + 'event_type': 'hardware.ipmi.metrics.update', + 'payload': { + 'Temperature': { + 'PCI Riser 1 Temp (0x33)': { + 'Sensor Reading': '26 C', + 'Sensor ID': 'PCI Riser 1 Temp (0x33)', + }, + } + } + } +} diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/notifications/test_ironic.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/notifications/test_ironic.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/notifications/test_ironic.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/notifications/test_ironic.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,214 @@ +# +# Copyright 2014 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for producing IPMI sample messages from notification events. +""" + +import mock +from oslotest import base + +from ceilometer.ipmi.notifications import ironic as ipmi +from ceilometer import sample +from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data + + +class TestNotifications(base.BaseTestCase): + + def test_ipmi_temperature_notification(self): + """Test IPMI Temperature sensor data. + + Based on the above ipmi_testdata the expected sample for a single + temperature reading has:: + + * a resource_id composed from the node_uuid Sensor ID + * a name composed from 'hardware.ipmi.' and 'temperature' + * a volume from the first chunk of the Sensor Reading + * a unit from the last chunk of the Sensor Reading + * some readings are skipped if the value is 'Disabled' + * metatata with the node id + """ + processor = ipmi.TemperatureSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.SENSOR_DATA)]) + + self.assertEqual(10, len(counters), + 'expected 10 temperature readings') + resource_id = ( + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' + ) + test_counter = counters[resource_id] + self.assertEqual(26.0, test_counter.volume) + self.assertEqual('C', test_counter.unit) + self.assertEqual(sample.TYPE_GAUGE, test_counter.type) + self.assertEqual('hardware.ipmi.temperature', test_counter.name) + self.assertEqual('hardware.ipmi.metrics.update', + test_counter.resource_metadata['event_type']) + self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', + test_counter.resource_metadata['node']) + + def test_ipmi_current_notification(self): + """Test IPMI Current sensor data. + + A single current reading is effectively the same as temperature, + modulo "current". + """ + processor = ipmi.CurrentSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.SENSOR_DATA)]) + + self.assertEqual(1, len(counters), 'expected 1 current reading') + resource_id = ( + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' + ) + test_counter = counters[resource_id] + self.assertEqual(130.0, test_counter.volume) + self.assertEqual('W', test_counter.unit) + self.assertEqual(sample.TYPE_GAUGE, test_counter.type) + self.assertEqual('hardware.ipmi.current', test_counter.name) + + def test_ipmi_fan_notification(self): + """Test IPMI Fan sensor data. + + A single fan reading is effectively the same as temperature, + modulo "fan". + """ + processor = ipmi.FanSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.SENSOR_DATA)]) + + self.assertEqual(12, len(counters), 'expected 12 fan readings') + resource_id = ( + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' + ) + test_counter = counters[resource_id] + self.assertEqual(6900.0, test_counter.volume) + self.assertEqual('RPM', test_counter.unit) + self.assertEqual(sample.TYPE_GAUGE, test_counter.type) + self.assertEqual('hardware.ipmi.fan', test_counter.name) + + def test_ipmi_voltage_notification(self): + """Test IPMI Voltage sensor data. + + A single voltage reading is effectively the same as temperature, + modulo "voltage". + """ + processor = ipmi.VoltageSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.SENSOR_DATA)]) + + self.assertEqual(4, len(counters), 'expected 4 volate readings') + resource_id = ( + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' + ) + test_counter = counters[resource_id] + self.assertEqual(3.137, test_counter.volume) + self.assertEqual('V', test_counter.unit) + self.assertEqual(sample.TYPE_GAUGE, test_counter.type) + self.assertEqual('hardware.ipmi.voltage', test_counter.name) + + def test_disabed_skips_metric(self): + """Test that a meter which a disabled volume is skipped.""" + processor = ipmi.TemperatureSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.SENSOR_DATA)]) + + self.assertEqual(10, len(counters), + 'expected 10 temperature readings') + + resource_id = ( + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' + ) + + self.assertNotIn(resource_id, counters) + + def test_empty_payload_no_metrics_success(self): + processor = ipmi.TemperatureSensorNotification(None) + counters = dict([(counter.resource_id, counter) for counter in + processor.process_notification( + ipmi_test_data.EMPTY_PAYLOAD)]) + + self.assertEqual(0, len(counters), 'expected 0 readings') + + @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') + def test_missing_sensor_data(self, mylog): + processor = ipmi.TemperatureSensorNotification(None) + + messages = [] + mylog.warn = lambda *args: messages.extend(args) + + list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) + + self.assertEqual( + 'invalid sensor data for ' + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' + "missing 'Sensor Reading' in payload", + messages[0] + ) + + @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') + def test_sensor_data_malformed(self, mylog): + processor = ipmi.TemperatureSensorNotification(None) + + messages = [] + mylog.warn = lambda *args: messages.extend(args) + + list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) + + self.assertEqual( + 'invalid sensor data for ' + 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' + 'unable to parse sensor reading: some bad stuff', + messages[0] + ) + + @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') + def test_missing_node_uuid(self, mylog): + """Test for desired error message when 'node_uuid' missing. + + Presumably this will never happen given the way the data + is created, but better defensive than dead. + """ + processor = ipmi.TemperatureSensorNotification(None) + + messages = [] + mylog.warn = lambda *args: messages.extend(args) + + list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) + + self.assertEqual( + 'invalid sensor data for missing id: missing key in payload: ' + "'node_uuid'", + messages[0] + ) + + @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') + def test_missing_sensor_id(self, mylog): + """Test for desired error message when 'Sensor ID' missing.""" + processor = ipmi.TemperatureSensorNotification(None) + + messages = [] + mylog.warn = lambda *args: messages.extend(args) + + list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) + + self.assertEqual( + 'invalid sensor data for missing id: missing key in payload: ' + "'Sensor ID'", + messages[0] + ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/fake_utils.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/fake_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/fake_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/fake_utils.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,120 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import binascii + +from ceilometer.ipmi.platform import exception as nmexcept +from ceilometer.ipmi.platform import intel_node_manager as node_manager +from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data + + +def get_sensor_status_init(parameter=''): + return (' 01\n', '') + + +def get_sensor_status_uninit(parameter=''): + return (' 00\n', '') + + +def init_sensor_agent(parameter=''): + return (' 00\n', '') + + +def get_nm_version_v2(parameter=''): + return test_data.nm_version_v2 + + +def get_nm_version_v3(parameter=''): + return test_data.nm_version_v3 + + +def sdr_dump(data_file=''): + if data_file == '': + raise ValueError("No file specified for ipmitool sdr dump") + fake_slave_address = '2c' + fake_channel = '60' + hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel + data = binascii.unhexlify(hexstr) + with open(data_file, 'wb') as bin_fp: + bin_fp.write(data) + + return ('', '') + + +def _execute(funcs, *cmd, **kwargs): + + datas = { + test_data.device_id_cmd: test_data.device_id, + test_data.nm_device_id_cmd: test_data.nm_device_id, + test_data.get_power_cmd: test_data.power_data, + test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, + test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, + test_data.get_airflow_cmd: test_data.airflow_data, + test_data.get_cups_index_cmd: test_data.cups_index_data, + test_data.get_cups_util_cmd: test_data.cups_util_data, + test_data.sdr_info_cmd: test_data.sdr_info, + test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, + test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, + test_data.read_sensor_current_cmd: test_data.sensor_current, + test_data.read_sensor_fan_cmd: test_data.sensor_fan, + } + + if cmd[1] == 'sdr' and cmd[2] == 'dump': + # ipmitool sdr dump /tmp/XXXX + cmd_str = "".join(cmd[:3]) + par_str = cmd[3] + else: + cmd_str = "".join(cmd) + par_str = '' + + try: + return datas[cmd_str] + except KeyError: + return funcs[cmd_str](par_str) + + +def execute_with_nm_v3(*cmd, **kwargs): + """test version of execute on Node Manager V3.0 platform.""" + + funcs = {test_data.sensor_status_cmd: get_sensor_status_init, + test_data.init_sensor_cmd: init_sensor_agent, + test_data.sdr_dump_cmd: sdr_dump, + test_data.nm_version_cmd: get_nm_version_v3} + + return _execute(funcs, *cmd, **kwargs) + + +def execute_with_nm_v2(*cmd, **kwargs): + """test version of execute on Node Manager V2.0 platform.""" + + funcs = {test_data.sensor_status_cmd: get_sensor_status_init, + test_data.init_sensor_cmd: init_sensor_agent, + test_data.sdr_dump_cmd: sdr_dump, + test_data.nm_version_cmd: get_nm_version_v2} + + return _execute(funcs, *cmd, **kwargs) + + +def execute_without_nm(*cmd, **kwargs): + """test version of execute on Non-Node Manager platform.""" + + funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, + test_data.init_sensor_cmd: init_sensor_agent, + test_data.sdr_dump_cmd: sdr_dump} + + return _execute(funcs, *cmd, **kwargs) + + +def execute_without_ipmi(*cmd, **kwargs): + raise nmexcept.IPMIException diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,383 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Sample data for test_intel_node_manager and test_ipmi_sensor. + +This data is provided as a sample of the data expected from the ipmitool +binary, which produce Node Manager/IPMI raw data +""" + +sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) + Entity ID : 7.1 (System Board) + Sensor Type (Discrete): Temperature + Assertions Enabled : Digital State + [State Asserted] + Deassertions Enabled : Digital State + [State Asserted] + +Sensor ID : BB P1 VR Temp (0x20) + Entity ID : 7.1 (System Board) + Sensor Type (Analog) : Temperature + Sensor Reading : 25 (+/- 0) degrees C + Status : ok + Nominal Reading : 58.000 + Normal Minimum : 10.000 + Normal Maximum : 105.000 + Upper critical : 115.000 + Upper non-critical : 110.000 + Lower critical : 0.000 + Lower non-critical : 5.000 + Positive Hysteresis : 2.000 + Negative Hysteresis : 2.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Assertion Events : + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +Sensor ID : Front Panel Temp (0x21) + Entity ID : 12.1 (Front Panel Board) + Sensor Type (Analog) : Temperature + Sensor Reading : 23 (+/- 0) degrees C + Status : ok + Nominal Reading : 28.000 + Normal Minimum : 10.000 + Normal Maximum : 45.000 + Upper critical : 55.000 + Upper non-critical : 50.000 + Lower critical : 0.000 + Lower non-critical : 5.000 + Positive Hysteresis : 2.000 + Negative Hysteresis : 2.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Assertion Events : + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +Sensor ID : SSB Temp (0x22) + Entity ID : 7.1 (System Board) + Sensor Type (Analog) : Temperature + Sensor Reading : 43 (+/- 0) degrees C + Status : ok + Nominal Reading : 52.000 + Normal Minimum : 10.000 + Normal Maximum : 93.000 + Upper critical : 103.000 + Upper non-critical : 98.000 + Lower critical : 0.000 + Lower non-critical : 5.000 + Positive Hysteresis : 2.000 + Negative Hysteresis : 2.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Assertion Events : + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +""" + +sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) + Entity ID : 7.1 (System Board) + Sensor Type (Discrete): Voltage + Assertions Enabled : Digital State + [State Asserted] + Deassertions Enabled : Digital State + [State Asserted] + +Sensor ID : BB +12.0V (0xd0) + Entity ID : 7.1 (System Board) + Sensor Type (Analog) : Voltage + Sensor Reading : 11.831 (+/- 0) Volts + Status : ok + Nominal Reading : 11.935 + Normal Minimum : 11.363 + Normal Maximum : 12.559 + Upper critical : 13.391 + Upper non-critical : 13.027 + Lower critical : 10.635 + Lower non-critical : 10.947 + Positive Hysteresis : 0.052 + Negative Hysteresis : 0.052 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Assertion Events : + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +Sensor ID : BB +1.35 P1LV AB (0xe4) + Entity ID : 7.1 (System Board) + Sensor Type (Analog) : Voltage + Sensor Reading : Disabled + Status : Disabled + Nominal Reading : 1.342 + Normal Minimum : 1.275 + Normal Maximum : 1.409 + Upper critical : 1.488 + Upper non-critical : 1.445 + Lower critical : 1.201 + Lower non-critical : 1.244 + Positive Hysteresis : 0.006 + Negative Hysteresis : 0.006 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Event Status : Unavailable + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +Sensor ID : BB +5.0V (0xd1) + Entity ID : 7.1 (System Board) + Sensor Type (Analog) : Voltage + Sensor Reading : 4.959 (+/- 0) Volts + Status : ok + Nominal Reading : 4.981 + Normal Minimum : 4.742 + Normal Maximum : 5.241 + Upper critical : 5.566 + Upper non-critical : 5.415 + Lower critical : 4.416 + Lower non-critical : 4.546 + Positive Hysteresis : 0.022 + Negative Hysteresis : 0.022 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc unc ucr + Settable Thresholds : lcr lnc unc ucr + Threshold Read Mask : lcr lnc unc ucr + Assertion Events : + Assertions Enabled : lnc- lcr- unc+ ucr+ + Deassertions Enabled : lnc- lcr- unc+ ucr+ + +""" + +sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) + Entity ID : 10.1 (Power Supply) + Sensor Type (Analog) : Current + Sensor Reading : 11 (+/- 0) unspecified + Status : ok + Nominal Reading : 50.000 + Normal Minimum : 0.000 + Normal Maximum : 100.000 + Upper critical : 118.000 + Upper non-critical : 100.000 + Positive Hysteresis : Unspecified + Negative Hysteresis : Unspecified + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : unc ucr + Settable Thresholds : unc ucr + Threshold Read Mask : unc ucr + Assertion Events : + Assertions Enabled : unc+ ucr+ + Deassertions Enabled : unc+ ucr+ + +Sensor ID : PS2 Curr Out % (0x59) + Entity ID : 10.2 (Power Supply) + Sensor Type (Analog) : Current + Sensor Reading : 0 (+/- 0) unspecified + Status : ok + Nominal Reading : 50.000 + Normal Minimum : 0.000 + Normal Maximum : 100.000 + Upper critical : 118.000 + Upper non-critical : 100.000 + Positive Hysteresis : Unspecified + Negative Hysteresis : Unspecified + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : unc ucr + Settable Thresholds : unc ucr + Threshold Read Mask : unc ucr + Assertion Events : + Assertions Enabled : unc+ ucr+ + Deassertions Enabled : unc+ ucr+ + +""" + +sensor_fan_data = """Sensor ID : System Fan 1 (0x30) + Entity ID : 29.1 (Fan Device) + Sensor Type (Analog) : Fan + Sensor Reading : 4704 (+/- 0) RPM + Status : ok + Nominal Reading : 7497.000 + Normal Minimum : 2499.000 + Normal Maximum : 12495.000 + Lower critical : 1715.000 + Lower non-critical : 1960.000 + Positive Hysteresis : 49.000 + Negative Hysteresis : 49.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc + Settable Thresholds : lcr lnc + Threshold Read Mask : lcr lnc + Assertion Events : + Assertions Enabled : lnc- lcr- + Deassertions Enabled : lnc- lcr- + +Sensor ID : System Fan 2 (0x32) + Entity ID : 29.2 (Fan Device) + Sensor Type (Analog) : Fan + Sensor Reading : 4704 (+/- 0) RPM + Status : ok + Nominal Reading : 7497.000 + Normal Minimum : 2499.000 + Normal Maximum : 12495.000 + Lower critical : 1715.000 + Lower non-critical : 1960.000 + Positive Hysteresis : 49.000 + Negative Hysteresis : 49.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc + Settable Thresholds : lcr lnc + Threshold Read Mask : lcr lnc + Assertion Events : + Assertions Enabled : lnc- lcr- + Deassertions Enabled : lnc- lcr- + +Sensor ID : System Fan 3 (0x34) + Entity ID : 29.3 (Fan Device) + Sensor Type (Analog) : Fan + Sensor Reading : 4704 (+/- 0) RPM + Status : ok + Nominal Reading : 7497.000 + Normal Minimum : 2499.000 + Normal Maximum : 12495.000 + Lower critical : 1715.000 + Lower non-critical : 1960.000 + Positive Hysteresis : 49.000 + Negative Hysteresis : 49.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc + Settable Thresholds : lcr lnc + Threshold Read Mask : lcr lnc + Assertion Events : + Assertions Enabled : lnc- lcr- + Deassertions Enabled : lnc- lcr- + +Sensor ID : System Fan 4 (0x36) + Entity ID : 29.4 (Fan Device) + Sensor Type (Analog) : Fan + Sensor Reading : 4606 (+/- 0) RPM + Status : ok + Nominal Reading : 7497.000 + Normal Minimum : 2499.000 + Normal Maximum : 12495.000 + Lower critical : 1715.000 + Lower non-critical : 1960.000 + Positive Hysteresis : 49.000 + Negative Hysteresis : 49.000 + Minimum sensor range : Unspecified + Maximum sensor range : Unspecified + Event Message Control : Per-threshold + Readable Thresholds : lcr lnc + Settable Thresholds : lcr lnc + Threshold Read Mask : lcr lnc + Assertion Events : + Assertions Enabled : lnc- lcr- + Deassertions Enabled : lnc- lcr- + +""" + + +sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' +init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' +sdr_dump_cmd = 'ipmitoolsdrdump' +sdr_info_cmd = 'ipmitoolsdrinfo' + +read_sensor_all_cmd = 'ipmitoolsdr-v' +read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' +read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' +read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' +read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' + +device_id_cmd = 'ipmitoolraw0x060x01' +nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' +nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' +get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' +get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' +get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' +get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' +get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' +get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' + + +device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') +nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') + +nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') +nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') + +# start from byte 3, get cur- 57 00(87), min- 03 00(3) +# max- 37 02(567), avg- 5c 00(92) +power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' + ' 9b 12 01 50\n', '') + +# start from byte 3, get cur- 17 00(23), min- 16 00(22) +# max- 18 00(24), avg- 17 00(23) +inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' + ' b7 02 00 50\n', '') + +# start from byte 3, get cur- 19 00(25), min- 18 00(24) +# max- 1b 00(27), avg- 19 00(25) +outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' + ' b7 02 00 50\n', '') + +# start from byte 3, get cur- be 00(190), min- 96 00(150) +# max- 26 02(550), avg- cb 00(203) +airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' + ' b7 02 00 50\n', '') + +# start from byte 3, cups index 2e 00 (46) +cups_index_data = (' 57 01 00 2e 00\n', '') + +# start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) +# io_util - 00 00 ...(0) +cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' + ' 00 00 00 00 00 00 00 00 00 00 00\n', '') + +sdr_info = ('', '') + +sensor_temperature = (sensor_temperature_data, '') +sensor_voltage = (sensor_voltage_data, '') +sensor_current = (sensor_current_data, '') +sensor_fan = (sensor_fan_data, '') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,168 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import mock +from oslotest import base +import six + +from ceilometer.ipmi.platform import intel_node_manager as node_manager +from ceilometer.tests.unit.ipmi.platform import fake_utils +from ceilometer import utils + + +@six.add_metaclass(abc.ABCMeta) +class _Base(base.BaseTestCase): + + @abc.abstractmethod + def init_test_engine(self): + """Prepare specific ipmitool as engine for different NM version.""" + + def setUp(self): + super(_Base, self).setUp() + self.init_test_engine() + self.nm = node_manager.NodeManager() + + @classmethod + def tearDownClass(cls): + # reset inited to force an initialization of singleton for next test + node_manager.NodeManager()._inited = False + super(_Base, cls).tearDownClass() + + +class TestNodeManagerV3(_Base): + + def init_test_engine(self): + utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) + + def test_read_airflow(self): + airflow = self.nm.read_airflow() + avg_val = node_manager._hex(airflow["Average_value"]) + max_val = node_manager._hex(airflow["Maximum_value"]) + min_val = node_manager._hex(airflow["Minimum_value"]) + cur_val = node_manager._hex(airflow["Current_value"]) + + # get NM 3.0 + self.assertEqual(5, self.nm.nm_version) + + # see ipmi_test_data.py for raw data + self.assertEqual(190, cur_val) + self.assertEqual(150, min_val) + self.assertEqual(550, max_val) + self.assertEqual(203, avg_val) + + def test_read_outlet_temperature(self): + temperature = self.nm.read_outlet_temperature() + avg_val = node_manager._hex(temperature["Average_value"]) + max_val = node_manager._hex(temperature["Maximum_value"]) + min_val = node_manager._hex(temperature["Minimum_value"]) + cur_val = node_manager._hex(temperature["Current_value"]) + + # get NM 3.0 + self.assertEqual(5, self.nm.nm_version) + + # see ipmi_test_data.py for raw data + self.assertEqual(25, cur_val) + self.assertEqual(24, min_val) + self.assertEqual(27, max_val) + self.assertEqual(25, avg_val) + + def test_read_cups_utilization(self): + cups_util = self.nm.read_cups_utilization() + cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) + mem_util = node_manager._hex(cups_util["Mem_Utilization"]) + io_util = node_manager._hex(cups_util["IO_Utilization"]) + + # see ipmi_test_data.py for raw data + self.assertEqual(51, cpu_util) + self.assertEqual(5, mem_util) + self.assertEqual(0, io_util) + + def test_read_cups_index(self): + cups_index = self.nm.read_cups_index() + index = node_manager._hex(cups_index["CUPS_Index"]) + self.assertEqual(46, index) + + +class TestNodeManager(_Base): + + def init_test_engine(self): + utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) + + def test_read_power_all(self): + power = self.nm.read_power_all() + + avg_val = node_manager._hex(power["Average_value"]) + max_val = node_manager._hex(power["Maximum_value"]) + min_val = node_manager._hex(power["Minimum_value"]) + cur_val = node_manager._hex(power["Current_value"]) + + # get NM 2.0 + self.assertEqual(3, self.nm.nm_version) + # see ipmi_test_data.py for raw data + self.assertEqual(87, cur_val) + self.assertEqual(3, min_val) + self.assertEqual(567, max_val) + self.assertEqual(92, avg_val) + + def test_read_inlet_temperature(self): + temperature = self.nm.read_inlet_temperature() + + avg_val = node_manager._hex(temperature["Average_value"]) + max_val = node_manager._hex(temperature["Maximum_value"]) + min_val = node_manager._hex(temperature["Minimum_value"]) + cur_val = node_manager._hex(temperature["Current_value"]) + + # see ipmi_test_data.py for raw data + self.assertEqual(23, cur_val) + self.assertEqual(22, min_val) + self.assertEqual(24, max_val) + self.assertEqual(23, avg_val) + + def test_read_airflow(self): + airflow = self.nm.read_airflow() + self.assertEqual({}, airflow) + + def test_read_outlet_temperature(self): + temperature = self.nm.read_outlet_temperature() + self.assertEqual({}, temperature) + + def test_read_cups_utilization(self): + cups_util = self.nm.read_cups_utilization() + self.assertEqual({}, cups_util) + + def test_read_cups_index(self): + cups_index = self.nm.read_cups_index() + self.assertEqual({}, cups_index) + + +class TestNonNodeManager(_Base): + + def init_test_engine(self): + utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) + + def test_read_power_all(self): + # no NM support + self.assertEqual(0, self.nm.nm_version) + power = self.nm.read_power_all() + + # Non-Node Manager platform return empty data + self.assertEqual({}, power) + + def test_read_inlet_temperature(self): + temperature = self.nm.read_inlet_temperature() + + # Non-Node Manager platform return empty data + self.assertEqual({}, temperature) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,128 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslotest import base + +from ceilometer.ipmi.platform import ipmi_sensor +from ceilometer.tests.unit.ipmi.platform import fake_utils +from ceilometer import utils + + +class TestIPMISensor(base.BaseTestCase): + + def setUp(self): + super(TestIPMISensor, self).setUp() + + utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) + self.ipmi = ipmi_sensor.IPMISensor() + + @classmethod + def tearDownClass(cls): + # reset inited to force an initialization of singleton for next test + ipmi_sensor.IPMISensor()._inited = False + super(TestIPMISensor, cls).tearDownClass() + + def test_read_sensor_temperature(self): + sensors = self.ipmi.read_sensor_any('Temperature') + + self.assertTrue(self.ipmi.ipmi_support) + # only temperature data returned. + self.assertIn('Temperature', sensors) + self.assertEqual(1, len(sensors)) + + # 4 sensor data in total, ignore 1 without 'Sensor Reading'. + # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py + self.assertEqual(3, len(sensors['Temperature'])) + sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] + self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) + + def test_read_sensor_voltage(self): + sensors = self.ipmi.read_sensor_any('Voltage') + + # only voltage data returned. + self.assertIn('Voltage', sensors) + self.assertEqual(1, len(sensors)) + + # 4 sensor data in total, ignore 1 without 'Sensor Reading'. + # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py + self.assertEqual(3, len(sensors['Voltage'])) + sensor = sensors['Voltage']['BB +5.0V (0xd1)'] + self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) + + def test_read_sensor_current(self): + sensors = self.ipmi.read_sensor_any('Current') + + # only Current data returned. + self.assertIn('Current', sensors) + self.assertEqual(1, len(sensors)) + + # 2 sensor data in total. + # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py + self.assertEqual(2, len(sensors['Current'])) + sensor = sensors['Current']['PS1 Curr Out % (0x58)'] + self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) + + def test_read_sensor_fan(self): + sensors = self.ipmi.read_sensor_any('Fan') + + # only Fan data returned. + self.assertIn('Fan', sensors) + self.assertEqual(1, len(sensors)) + + # 2 sensor data in total. + # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py + self.assertEqual(4, len(sensors['Fan'])) + sensor = sensors['Fan']['System Fan 2 (0x32)'] + self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) + + +class TestNonIPMISensor(base.BaseTestCase): + + def setUp(self): + super(TestNonIPMISensor, self).setUp() + + utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) + self.ipmi = ipmi_sensor.IPMISensor() + + @classmethod + def tearDownClass(cls): + # reset inited to force an initialization of singleton for next test + ipmi_sensor.IPMISensor()._inited = False + super(TestNonIPMISensor, cls).tearDownClass() + + def test_read_sensor_temperature(self): + sensors = self.ipmi.read_sensor_any('Temperature') + + self.assertFalse(self.ipmi.ipmi_support) + # Non-IPMI platform return empty data + self.assertEqual({}, sensors) + + def test_read_sensor_voltage(self): + sensors = self.ipmi.read_sensor_any('Voltage') + + # Non-IPMI platform return empty data + self.assertEqual({}, sensors) + + def test_read_sensor_current(self): + sensors = self.ipmi.read_sensor_any('Current') + + # Non-IPMI platform return empty data + self.assertEqual({}, sensors) + + def test_read_sensor_fan(self): + sensors = self.ipmi.read_sensor_any('Fan') + + # Non-IPMI platform return empty data + self.assertEqual({}, sensors) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,74 @@ +# Copyright 2014 Intel +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import mock +from oslotest import mockpatch +import six + +from ceilometer.agent import manager +from ceilometer.tests import base + + +@six.add_metaclass(abc.ABCMeta) +class TestPollsterBase(base.BaseTestCase): + + def fake_data(self): + """Fake data used for test.""" + return None + + def fake_sensor_data(self, sensor_type): + """Fake sensor data used for test.""" + return None + + @abc.abstractmethod + def make_pollster(self): + """Produce right pollster for test.""" + + def _test_get_samples(self): + nm = mock.Mock() + nm.read_inlet_temperature.side_effect = self.fake_data + nm.read_outlet_temperature.side_effect = self.fake_data + nm.read_power_all.side_effect = self.fake_data + nm.read_airflow.side_effect = self.fake_data + nm.read_cups_index.side_effect = self.fake_data + nm.read_cups_utilization.side_effect = self.fake_data + nm.read_sensor_any.side_effect = self.fake_sensor_data + # We should mock the pollster first before initialize the Manager + # so that we don't trigger the sudo in pollsters' __init__(). + self.useFixture(mockpatch.Patch( + 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', + return_value=nm)) + + self.useFixture(mockpatch.Patch( + 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', + return_value=nm)) + + self.mgr = manager.AgentManager(['ipmi']) + + self.pollster = self.make_pollster() + + def _verify_metering(self, length, expected_vol=None, node=None): + cache = {} + resources = ['local_host'] + + samples = list(self.pollster.get_samples(self.mgr, cache, resources)) + self.assertEqual(length, len(samples)) + + if expected_vol: + self.assertTrue(any(s.volume == expected_vol for s in samples)) + if node: + self.assertTrue(any(s.resource_metadata['node'] == node + for s in samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/test_node.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/test_node.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/test_node.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/test_node.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,161 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from ceilometer.ipmi.pollsters import node +from ceilometer.tests.unit.ipmi.pollsters import base + +CONF = cfg.CONF +CONF.import_opt('host', 'ceilometer.service') + + +class TestPowerPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"Current_value": ['13', '00']} + + def make_pollster(self): + return node.PowerPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 19(0x13 as current_value) + self._verify_metering(1, 19, CONF.host) + + +class TestInletTemperaturePollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"Current_value": ['23', '00']} + + def make_pollster(self): + return node.InletTemperaturePollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 35(0x23 as current_value) + self._verify_metering(1, 35, CONF.host) + + +class TestOutletTemperaturePollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"Current_value": ['25', '00']} + + def make_pollster(self): + return node.OutletTemperaturePollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 37(0x25 as current_value) + self._verify_metering(1, 37, CONF.host) + + +class TestAirflowPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"Current_value": ['be', '00']} + + def make_pollster(self): + return node.AirflowPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 190(0xbe as current_value) + self._verify_metering(1, 190, CONF.host) + + +class TestCUPSIndexPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"CUPS_Index": ['2e', '00']} + + def make_pollster(self): + return node.CUPSIndexPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 190(0xbe) + self._verify_metering(1, 46, CONF.host) + + +class CPUUtilPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"CPU_Utilization": + ['33', '00', '00', '00', '00', '00', '00', '00']} + + def make_pollster(self): + return node.CPUUtilPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 190(0xbe) + self._verify_metering(1, 51, CONF.host) + + +class MemUtilPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"Mem_Utilization": + ['05', '00', '00', '00', '00', '00', '00', '00']} + + def make_pollster(self): + return node.MemUtilPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 5(0x05) + self._verify_metering(1, 5, CONF.host) + + +class IOUtilPollster(base.TestPollsterBase): + + def fake_data(self): + # data after parsing Intel Node Manager output + return {"IO_Utilization": + ['00', '00', '00', '00', '00', '00', '00', '00']} + + def make_pollster(self): + return node.IOUtilPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + # only one sample, and value is 0(0x00) + self._verify_metering(1, 0, CONF.host) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,145 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from ceilometer.ipmi.pollsters import sensor +from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data +from ceilometer.tests.unit.ipmi.pollsters import base + +CONF = cfg.CONF +CONF.import_opt('host', 'ceilometer.service') + +TEMPERATURE_SENSOR_DATA = { + 'Temperature': ipmi_test_data.TEMPERATURE_DATA +} + +CURRENT_SENSOR_DATA = { + 'Current': ipmi_test_data.CURRENT_DATA +} + +FAN_SENSOR_DATA = { + 'Fan': ipmi_test_data.FAN_DATA +} + +VOLTAGE_SENSOR_DATA = { + 'Voltage': ipmi_test_data.VOLTAGE_DATA +} + +MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] +MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] +MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] + + +class TestTemperatureSensorPollster(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return TEMPERATURE_SENSOR_DATA + + def make_pollster(self): + return sensor.TemperatureSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + self._verify_metering(10, float(32), CONF.host) + + +class TestMissingSensorData(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return MISSING_SENSOR_DATA + + def make_pollster(self): + return sensor.TemperatureSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + self._verify_metering(0) + + +class TestMalformedSensorData(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return MALFORMED_SENSOR_DATA + + def make_pollster(self): + return sensor.TemperatureSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + self._verify_metering(0) + + +class TestMissingSensorId(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return MISSING_ID_SENSOR_DATA + + def make_pollster(self): + return sensor.TemperatureSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + self._verify_metering(0) + + +class TestFanSensorPollster(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return FAN_SENSOR_DATA + + def make_pollster(self): + return sensor.FanSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + self._verify_metering(12, float(7140), CONF.host) + + +class TestCurrentSensorPollster(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return CURRENT_SENSOR_DATA + + def make_pollster(self): + return sensor.CurrentSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + self._verify_metering(1, float(130), CONF.host) + + +class TestVoltageSensorPollster(base.TestPollsterBase): + + def fake_sensor_data(self, sensor_type): + return VOLTAGE_SENSOR_DATA + + def make_pollster(self): + return sensor.VoltageSensorPollster() + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def test_get_samples(self): + self._test_get_samples() + + self._verify_metering(4, float(3.309), CONF.host) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/meter/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/unit/meter/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/meter/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/meter/test_notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,595 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer.meter.notifications +""" +import copy +import mock +import six +import yaml + +from oslo_config import fixture as fixture_config +from oslo_utils import fileutils +from oslotest import mockpatch + +from ceilometer.meter import notifications +from ceilometer import service as ceilometer_service +from ceilometer.tests import base as test + +NOTIFICATION = { + 'event_type': u'test.create', + 'timestamp': u'2015-06-1909: 19: 35.786893', + 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', + u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', + u'timestamp': u'2015-06-19T09: 19: 35.785330', + u'message_signature': u'fake_signature1', + u'resource_metadata': {u'foo': u'bar'}, + u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', + u'volume': 1.0, + u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', + }, + u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', + u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', + u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', + 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', + 'publisher_id': "foo123" +} + +MIDDLEWARE_EVENT = { + u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', + u'_context_quota_class': None, + u'event_type': u'objectstore.http.request', + u'_context_service_catalog': [], + u'_context_auth_token': None, + u'_context_user_id': None, + u'priority': u'INFO', + u'_context_is_admin': True, + u'_context_user': None, + u'publisher_id': u'ceilometermiddleware', + u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', + u'_context_remote_address': None, + u'_context_roles': [], + u'timestamp': u'2013-07-29 06:51:34.474815', + u'_context_timestamp': u'2013-07-29T06:51:34.348091', + u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', + u'_context_project_name': None, + u'_context_read_deleted': u'no', + u'_context_tenant': None, + u'_context_instance_lock_checked': False, + u'_context_project_id': None, + u'_context_user_name': None, + u'payload': { + 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', + 'eventTime': '2015-01-30T16: 38: 43.233621', + 'target': { + 'action': 'get', + 'typeURI': 'service/storage/object', + 'id': 'account', + 'metadata': { + 'path': '/1.0/CUSTOM_account/container/obj', + 'version': '1.0', + 'container': 'container', + 'object': 'obj' + } + }, + 'observer': { + 'id': 'target' + }, + 'eventType': 'activity', + 'measurements': [ + { + 'metric': { + 'metricId': 'openstack: uuid', + 'name': 'storage.objects.outgoing.bytes', + 'unit': 'B' + }, + 'result': 28 + }, + { + 'metric': { + 'metricId': 'openstack: uuid2', + 'name': 'storage.objects.incoming.bytes', + 'unit': 'B' + }, + 'result': 1 + } + ], + 'initiator': { + 'typeURI': 'service/security/account/user', + 'project_id': None, + 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' + }, + 'action': 'read', + 'outcome': 'success', + 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' + } +} + +FULL_MULTI_MSG = { + u'_context_domain': None, + u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', + 'event_type': u'full.sample', + 'timestamp': u'2015-06-1909: 19: 35.786893', + u'_context_auth_token': None, + u'_context_read_only': False, + 'payload': [{ + u'counter_name': u'instance1', + u'user_id': u'user1', + u'resource_id': u'res1', + u'counter_unit': u'ns', + u'counter_volume': 28.0, + u'project_id': u'proj1', + u'counter_type': u'gauge' + }, + { + u'counter_name': u'instance2', + u'user_id': u'user2', + u'resource_id': u'res2', + u'counter_unit': u'%', + u'counter_volume': 1.0, + u'project_id': u'proj2', + u'counter_type': u'delta' + }], + u'_context_resource_uuid': None, + u'_context_user_identity': u'fake_user_identity---', + u'_context_show_deleted': False, + u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', + 'priority': 'info', + u'_context_is_admin': True, + u'_context_project_domain': None, + u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', + u'_context_user_domain': None, + 'publisher_id': u'ceilometer.api', + 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' +} + +METRICS_UPDATE = { + u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', + u'_context_quota_class': None, + u'event_type': u'compute.metrics.update', + u'_context_service_catalog': [], + u'_context_auth_token': None, + u'_context_user_id': None, + u'payload': { + u'metrics': [ + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.frequency', 'value': 1600, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.user.time', 'value': 17421440000000, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.kernel.time', 'value': 7852600000000, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.idle.time', 'value': 1307374400000000, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.iowait.time', 'value': 11697470000000, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.user.percent', 'value': 0.012959045637294348, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, + 'source': 'libvirt.LibvirtDriver'}, + {'timestamp': u'2013-07-29T06:51:34.472416', + 'name': 'cpu.percent', 'value': 0.027501485834103515, + 'source': 'libvirt.LibvirtDriver'}], + u'nodename': u'tianst.sh.intel.com', + u'host': u'tianst', + u'host_id': u'10.0.1.1'}, + u'priority': u'INFO', + u'_context_is_admin': True, + u'_context_user': None, + u'publisher_id': u'compute.tianst.sh.intel.com', + u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', + u'_context_remote_address': None, + u'_context_roles': [], + u'timestamp': u'2013-07-29 06:51:34.474815', + u'_context_timestamp': u'2013-07-29T06:51:34.348091', + u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', + u'_context_project_name': None, + u'_context_read_deleted': u'no', + u'_context_tenant': None, + u'_context_instance_lock_checked': False, + u'_context_project_id': None, + u'_context_user_name': None +} + + +class TestMeterDefinition(test.BaseTestCase): + + def test_config_definition(self): + cfg = dict(name="test", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id") + handler = notifications.MeterDefinition(cfg) + self.assertTrue(handler.match_type("test.create")) + self.assertEqual(1.0, handler.parse_fields("volume", NOTIFICATION)) + self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", + handler.parse_fields("resource_id", NOTIFICATION)) + self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", + handler.parse_fields("project_id", NOTIFICATION)) + + def test_config_missing_fields(self): + cfg = dict(name="test", type="delta") + try: + notifications.MeterDefinition(cfg) + except notifications.MeterDefinitionException as e: + self.assertEqual("Required field event_type not specified", + e.message) + + def test_bad_type_cfg_definition(self): + cfg = dict(name="test", type="foo", event_type="bar.create") + try: + notifications.MeterDefinition(cfg) + except notifications.MeterDefinitionException as e: + self.assertEqual("Invalid type foo specified", e.message) + + +class TestMeterProcessing(test.BaseTestCase): + + def setUp(self): + super(TestMeterProcessing, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + ceilometer_service.prepare_service(argv=[], config_files=[]) + self.handler = notifications.ProcessMeterNotifications(mock.Mock()) + + def test_fallback_meter_path(self): + self.useFixture(mockpatch.PatchObject(self.CONF, + 'find_file', return_value=None)) + fall_bak_path = notifications.get_config_file() + self.assertIn("meter/data/meters.yaml", fall_bak_path) + + def __setup_meter_def_file(self, cfg): + if six.PY3: + cfg = cfg.encode('utf-8') + meter_cfg_file = fileutils.write_to_tempfile(content=cfg, + prefix="meters", + suffix="yaml") + self.CONF.set_override( + 'meter_definitions_cfg_file', + meter_cfg_file, group='meter') + cfg = notifications.setup_meters_config() + return cfg + + def test_jsonpath_values_parsed(self): + cfg = yaml.dump( + {'metric': [dict(name="test1", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(NOTIFICATION)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual('test1', s1['name']) + self.assertEqual(1.0, s1['volume']) + self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) + self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) + + def test_multiple_meter(self): + cfg = yaml.dump( + {'metric': [dict(name="test1", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id"), + dict(name="test2", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(NOTIFICATION)) + self.assertEqual(2, len(c)) + s1 = c[0].as_dict() + self.assertEqual('test2', s1['name']) + s2 = c[1].as_dict() + self.assertEqual('test1', s2['name']) + + def test_unmatched_meter(self): + cfg = yaml.dump( + {'metric': [dict(name="test1", + event_type="test.update", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(NOTIFICATION)) + self.assertEqual(0, len(c)) + + def test_regex_match_meter(self): + cfg = yaml.dump( + {'metric': [dict(name="test1", + event_type="test.*", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(NOTIFICATION)) + self.assertEqual(1, len(c)) + + def test_default_timestamp(self): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'][1] + cfg = yaml.dump( + {'metric': [dict(name="payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="payload.measurements.[*].metric.[*].unit", + volume="payload.measurements.[*].result", + resource_id="payload.target_id", + project_id="payload.initiator.project_id", + multi="name")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual(MIDDLEWARE_EVENT['timestamp'], s1['timestamp']) + + def test_custom_timestamp(self): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'][1] + cfg = yaml.dump( + {'metric': [dict(name="payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="payload.measurements.[*].metric.[*].unit", + volume="payload.measurements.[*].result", + resource_id="payload.target_id", + project_id="payload.initiator.project_id", + multi="name", + timestamp='payload.eventTime')]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], + s1['timestamp']) + + def test_multi_match_event_meter(self): + cfg = yaml.dump( + {'metric': [dict(name="test1", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id"), + dict(name="test2", + event_type="test.create", + type="delta", + unit="B", + volume="$.payload.volume", + resource_id="$.payload.resource_id", + project_id="$.payload.project_id")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(NOTIFICATION)) + self.assertEqual(2, len(c)) + + def test_multi_meter_payload(self): + cfg = yaml.dump( + {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="$.payload.measurements.[*].metric.[*].unit", + volume="$.payload.measurements.[*].result", + resource_id="$.payload.target_id", + project_id="$.payload.initiator.project_id", + lookup=["name", "volume", "unit"])]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(MIDDLEWARE_EVENT)) + self.assertEqual(2, len(c)) + s1 = c[0].as_dict() + self.assertEqual('storage.objects.outgoing.bytes', s1['name']) + self.assertEqual(28, s1['volume']) + self.assertEqual('B', s1['unit']) + s2 = c[1].as_dict() + self.assertEqual('storage.objects.incoming.bytes', s2['name']) + self.assertEqual(1, s2['volume']) + self.assertEqual('B', s2['unit']) + + def test_multi_meter_payload_single(self): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'][1] + cfg = yaml.dump( + {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="$.payload.measurements.[*].metric.[*].unit", + volume="$.payload.measurements.[*].result", + resource_id="$.payload.target_id", + project_id="$.payload.initiator.project_id", + lookup=["name", "unit"])]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual('storage.objects.outgoing.bytes', s1['name']) + self.assertEqual(28, s1['volume']) + self.assertEqual('B', s1['unit']) + + def test_multi_meter_payload_none(self): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'] + cfg = yaml.dump( + {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="$.payload.measurements.[*].metric.[*].unit", + volume="$.payload.measurements.[*].result", + resource_id="$.payload.target_id", + project_id="$.payload.initiator.project_id", + lookup="name")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(0, len(c)) + + def test_multi_meter_payload_all_multi(self): + cfg = yaml.dump( + {'metric': [dict(name="$.payload.[*].counter_name", + event_type="full.sample", + type="$.payload.[*].counter_type", + unit="$.payload.[*].counter_unit", + volume="$.payload.[*].counter_volume", + resource_id="$.payload.[*].resource_id", + project_id="$.payload.[*].project_id", + user_id="$.payload.[*].user_id", + lookup=['name', 'type', 'unit', 'volume', + 'resource_id', 'project_id', 'user_id'])]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(FULL_MULTI_MSG)) + self.assertEqual(2, len(c)) + msg = FULL_MULTI_MSG['payload'] + for idx, val in enumerate(c): + s1 = val.as_dict() + self.assertEqual(msg[idx]['counter_name'], s1['name']) + self.assertEqual(msg[idx]['counter_volume'], s1['volume']) + self.assertEqual(msg[idx]['counter_unit'], s1['unit']) + self.assertEqual(msg[idx]['counter_type'], s1['type']) + self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) + self.assertEqual(msg[idx]['project_id'], s1['project_id']) + self.assertEqual(msg[idx]['user_id'], s1['user_id']) + + @mock.patch('ceilometer.meter.notifications.LOG') + def test_multi_meter_payload_invalid_missing(self, LOG): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'][0]['result'] + del event['payload']['measurements'][1]['result'] + cfg = yaml.dump( + {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="$.payload.measurements.[*].metric.[*].unit", + volume="$.payload.measurements.[*].result", + resource_id="$.payload.target_id", + project_id="$.payload.initiator.project_id", + lookup=["name", "unit", "volume"])]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(0, len(c)) + LOG.warning.assert_called_with('Could not find %s values', 'volume') + + @mock.patch('ceilometer.meter.notifications.LOG') + def test_multi_meter_payload_invalid_short(self, LOG): + event = copy.deepcopy(MIDDLEWARE_EVENT) + del event['payload']['measurements'][0]['result'] + cfg = yaml.dump( + {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", + event_type="objectstore.http.request", + type="delta", + unit="$.payload.measurements.[*].metric.[*].unit", + volume="$.payload.measurements.[*].result", + resource_id="$.payload.target_id", + project_id="$.payload.initiator.project_id", + lookup=["name", "unit", "volume"])]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(event)) + self.assertEqual(0, len(c)) + LOG.warning.assert_called_with('Not all fetched meters contain "%s" ' + 'field', 'volume') + + def test_arithmetic_expr_meter(self): + cfg = yaml.dump( + {'metric': [dict(name='compute.node.cpu.percent', + event_type="compute.metrics.update", + type='gauge', + unit="percent", + volume="$.payload.metrics[" + "?(@.name='cpu.percent')].value" + " * 100", + resource_id="$.payload.host + '_'" + " + $.payload.nodename")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(METRICS_UPDATE)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual('compute.node.cpu.percent', s1['name']) + self.assertEqual(2.7501485834103514, s1['volume']) + self.assertEqual("tianst_tianst.sh.intel.com", + s1['resource_id']) + + def test_string_expr_meter(self): + cfg = yaml.dump( + {'metric': [dict(name='compute.node.cpu.frequency', + event_type="compute.metrics.update", + type='gauge', + unit="ns", + volume="$.payload.metrics[?(@.name='cpu.frequency')]" + ".value", + resource_id="$.payload.host + '_'" + " + $.payload.nodename")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(METRICS_UPDATE)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual('compute.node.cpu.frequency', s1['name']) + self.assertEqual(1600, s1['volume']) + self.assertEqual("tianst_tianst.sh.intel.com", + s1['resource_id']) + + def test_prefix_expr_meter(self): + cfg = yaml.dump( + {'metric': [dict(name='compute.node.cpu.frequency', + event_type="compute.metrics.update", + type='gauge', + unit="ns", + volume="$.payload.metrics[?(@.name='cpu.frequency')]" + ".value", + resource_id="'prefix-' + $.payload.nodename")]}) + self.handler.definitions = notifications.load_definitions( + self.__setup_meter_def_file(cfg)) + c = list(self.handler.process_notification(METRICS_UPDATE)) + self.assertEqual(1, len(c)) + s1 = c[0].as_dict() + self.assertEqual('compute.node.cpu.frequency', s1['name']) + self.assertEqual(1600, s1['volume']) + self.assertEqual("prefix-tianst.sh.intel.com", + s1['resource_id']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_fwaas.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_fwaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_fwaas.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_fwaas.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,169 @@ +# +# Copyright 2014 Cisco Systems,Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_context import context +from oslotest import base +from oslotest import mockpatch + +from ceilometer.agent import manager +from ceilometer.agent import plugin_base +from ceilometer.network.services import discovery +from ceilometer.network.services import fwaas + + +class _BaseTestFWPollster(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(_BaseTestFWPollster, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self.manager = manager.AgentManager() + plugin_base._get_keystone = mock.Mock() + plugin_base._get_keystone.service_catalog.get_endpoints = ( + mock.MagicMock(return_value={'network': mock.ANY})) + + +class TestFirewallPollster(_BaseTestFWPollster): + + def setUp(self): + super(TestFirewallPollster, self).setUp() + self.pollster = fwaas.FirewallPollster() + fake_fw = self.fake_get_fw_service() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'firewall_get_all', + return_value=fake_fw)) + + @staticmethod + def fake_get_fw_service(): + return [{'status': 'ACTIVE', + 'name': 'myfw', + 'description': '', + 'admin_state_up': True, + 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, + {'status': 'INACTIVE', + 'name': 'myfw', + 'description': '', + 'admin_state_up': True, + 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, + {'status': 'PENDING_CREATE', + 'name': 'myfw', + 'description': '', + 'admin_state_up': True, + 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, + {'status': 'error', + 'name': 'myfw', + 'description': '', + 'admin_state_up': True, + 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, + ] + + def test_fw_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_fw_service())) + self.assertEqual(3, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_fw_service()[0][field], + samples[0].resource_metadata[field]) + + def test_vpn_volume(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_fw_service())) + self.assertEqual(1, samples[0].volume) + self.assertEqual(0, samples[1].volume) + self.assertEqual(2, samples[2].volume) + + def test_get_vpn_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_fw_service())) + self.assertEqual(set(['network.services.firewall']), + set([s.name for s in samples])) + + def test_vpn_discovery(self): + discovered_fws = discovery.FirewallDiscovery().discover(self.manager) + self.assertEqual(3, len(discovered_fws)) + + for vpn in self.fake_get_fw_service(): + if vpn['status'] == 'error': + self.assertNotIn(vpn, discovered_fws) + else: + self.assertIn(vpn, discovered_fws) + + +class TestIPSecConnectionsPollster(_BaseTestFWPollster): + + def setUp(self): + super(TestIPSecConnectionsPollster, self).setUp() + self.pollster = fwaas.FirewallPolicyPollster() + fake_fw_policy = self.fake_get_fw_policy() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'fw_policy_get_all', + return_value=fake_fw_policy)) + + @staticmethod + def fake_get_fw_policy(): + return [{'name': 'my_fw_policy', + 'description': 'fw_policy', + 'admin_state_up': True, + 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', + 'firewall_rules': [{'enabled': True, + 'action': 'allow', + 'ip_version': 4, + 'protocol': 'tcp', + 'destination_port': '80', + 'source_ip_address': '10.24.4.2'}, + {'enabled': True, + 'action': 'deny', + 'ip_version': 4, + 'protocol': 'tcp', + 'destination_port': '22'}], + 'shared': True, + 'audited': True, + 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} + ] + + def test_policy_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_fw_policy())) + self.assertEqual(1, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_fw_policy()[0][field], + samples[0].resource_metadata[field]) + + def test_get_policy_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_fw_policy())) + self.assertEqual(set(['network.services.firewall.policy']), + set([s.name for s in samples])) + + def test_fw_policy_discovery(self): + discovered_policy = discovery.FirewallPolicyDiscovery().discover( + self.manager) + self.assertEqual(1, len(discovered_policy)) + self.assertEqual(self.fake_get_fw_policy(), discovered_policy) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_lbaas.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_lbaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_lbaas.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_lbaas.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,502 @@ +# +# Copyright 2014 Cisco Systems,Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_context import context +from oslotest import base +from oslotest import mockpatch + +from ceilometer.agent import manager +from ceilometer.agent import plugin_base +from ceilometer.network.services import discovery +from ceilometer.network.services import lbaas + + +class _BaseTestLBPollster(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(_BaseTestLBPollster, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self.manager = manager.AgentManager() + plugin_base._get_keystone = mock.Mock() + plugin_base._get_keystone.service_catalog.get_endpoints = ( + mock.MagicMock(return_value={'network': mock.ANY})) + + +class TestLBPoolPollster(_BaseTestLBPollster): + + def setUp(self): + super(TestLBPoolPollster, self).setUp() + self.pollster = lbaas.LBPoolPollster() + fake_pools = self.fake_get_pools() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'pool_get_all', + return_value=fake_pools)) + + @staticmethod + def fake_get_pools(): + return [{'status': 'ACTIVE', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + {'status': 'INACTIVE', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb02', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + {'status': 'PENDING_CREATE', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb03', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + {'status': 'UNKNOWN', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb03', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + {'status': 'error', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb_error', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + ] + + def test_pool_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_pools())) + self.assertEqual(3, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_pools()[0][field], + samples[0].resource_metadata[field]) + + def test_pool_volume(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_pools())) + self.assertEqual(1, samples[0].volume) + self.assertEqual(0, samples[1].volume) + self.assertEqual(2, samples[2].volume) + + def test_get_pool_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_pools())) + self.assertEqual(set(['network.services.lb.pool']), + set([s.name for s in samples])) + + def test_pool_discovery(self): + discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) + self.assertEqual(4, len(discovered_pools)) + for pool in self.fake_get_pools(): + if pool['status'] == 'error': + self.assertNotIn(pool, discovered_pools) + else: + self.assertIn(pool, discovered_pools) + + +class TestLBVipPollster(_BaseTestLBPollster): + + def setUp(self): + super(TestLBVipPollster, self).setUp() + self.pollster = lbaas.LBVipPollster() + fake_vips = self.fake_get_vips() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'vip_get_all', + return_value=fake_vips)) + + @staticmethod + def fake_get_vips(): + return [{'status': 'ACTIVE', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.2', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip'}, + {'status': 'INACTIVE', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.3', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip02'}, + {'status': 'PENDING_CREATE', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.4', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip03'}, + {'status': 'UNKNOWN', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.8', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip03'}, + {'status': 'error', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.8', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip_error'}, + ] + + def test_vip_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vips())) + self.assertEqual(3, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_vips()[0][field], + samples[0].resource_metadata[field]) + + def test_pool_volume(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vips())) + self.assertEqual(1, samples[0].volume) + self.assertEqual(0, samples[1].volume) + self.assertEqual(2, samples[2].volume) + + def test_get_vip_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vips())) + self.assertEqual(set(['network.services.lb.vip']), + set([s.name for s in samples])) + + def test_vip_discovery(self): + discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) + self.assertEqual(4, len(discovered_vips)) + for pool in self.fake_get_vips(): + if pool['status'] == 'error': + self.assertNotIn(pool, discovered_vips) + else: + self.assertIn(pool, discovered_vips) + + +class TestLBMemberPollster(_BaseTestLBPollster): + + def setUp(self): + super(TestLBMemberPollster, self).setUp() + self.pollster = lbaas.LBMemberPollster() + fake_members = self.fake_get_members() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'member_get_all', + return_value=fake_members)) + + @staticmethod + def fake_get_members(): + return [{'status': 'ACTIVE', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.3', + 'status_description': None, + 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, + {'status': 'INACTIVE', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.5', + 'status_description': None, + 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, + {'status': 'PENDING_CREATE', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.6', + 'status_description': None, + 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, + {'status': 'UNKNOWN', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.6', + 'status_description': None, + 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, + {'status': 'error', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.6', + 'status_description': None, + 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, + ] + + def test_get_samples_not_empty(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + self.fake_get_members())) + self.assertEqual(3, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_members()[0][field], + samples[0].resource_metadata[field]) + + def test_pool_volume(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + self.fake_get_members())) + self.assertEqual(1, samples[0].volume) + self.assertEqual(0, samples[1].volume) + self.assertEqual(2, samples[2].volume) + + def test_get_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + self.fake_get_members())) + self.assertEqual(set(['network.services.lb.member']), + set([s.name for s in samples])) + + def test_members_discovery(self): + discovered_members = discovery.LBMembersDiscovery().discover( + self.manager) + self.assertEqual(4, len(discovered_members)) + for pool in self.fake_get_members(): + if pool['status'] == 'error': + self.assertNotIn(pool, discovered_members) + else: + self.assertIn(pool, discovered_members) + + +class TestLBHealthProbePollster(_BaseTestLBPollster): + + def setUp(self): + super(TestLBHealthProbePollster, self).setUp() + self.pollster = lbaas.LBHealthMonitorPollster() + fake_health_monitor = self.fake_get_health_monitor() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'health_monitor_get_all', + return_value=fake_health_monitor)) + + @staticmethod + def fake_get_health_monitor(): + return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', + 'admin_state_up': True, + 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", + 'delay': 2, + 'max_retries': 5, + 'timeout': 5, + 'pools': [], + 'type': 'PING', + }] + + def test_get_samples_not_empty(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + self.fake_get_health_monitor())) + self.assertEqual(1, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_health_monitor()[0][field], + samples[0].resource_metadata[field]) + + def test_get_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + self.fake_get_health_monitor())) + self.assertEqual(set(['network.services.lb.health_monitor']), + set([s.name for s in samples])) + + def test_probes_discovery(self): + discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( + self.manager) + self.assertEqual(discovered_probes, self.fake_get_health_monitor()) + + +class TestLBStatsPollster(_BaseTestLBPollster): + + def setUp(self): + super(TestLBStatsPollster, self).setUp() + fake_pool_stats = self.fake_pool_stats() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'pool_stats', + return_value=fake_pool_stats)) + + fake_pools = self.fake_get_pools() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'pool_get_all', + return_value=fake_pools)) + + @staticmethod + def fake_get_pools(): + return [{'status': 'ACTIVE', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'provider': 'haproxy', + 'status_description': None, + 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + ] + + @staticmethod + def fake_pool_stats(): + return {'stats': {'active_connections': 2, + 'bytes_in': 1, + 'bytes_out': 3, + 'total_connections': 4 + } + } + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def _check_get_samples(self, factory, sample_name, expected_volume, + expected_type): + pollster = factory() + + cache = {} + samples = list(pollster.get_samples(self.manager, cache, + self.fake_get_pools())) + self.assertEqual(1, len(samples)) + self.assertIsNotNone(samples) + self.assertIn('lbstats', cache) + self.assertEqual(set([sample_name]), set([s.name for s in samples])) + + match = [s for s in samples if s.name == sample_name] + self.assertEqual(1, len(match), 'missing counter %s' % sample_name) + self.assertEqual(expected_volume, match[0].volume) + self.assertEqual(expected_type, match[0].type) + + def test_lb_total_connections(self): + self._check_get_samples(lbaas.LBTotalConnectionsPollster, + 'network.services.lb.total.connections', + 4, 'cumulative') + + def test_lb_active_connections(self): + self._check_get_samples(lbaas.LBActiveConnectionsPollster, + 'network.services.lb.active.connections', + 2, 'gauge') + + def test_lb_incoming_bytes(self): + self._check_get_samples(lbaas.LBBytesInPollster, + 'network.services.lb.incoming.bytes', + 1, 'cumulative') + + def test_lb_outgoing_bytes(self): + self._check_get_samples(lbaas.LBBytesOutPollster, + 'network.services.lb.outgoing.bytes', + 3, 'cumulative') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_vpnaas.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_vpnaas.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/services/test_vpnaas.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/services/test_vpnaas.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,175 @@ +# +# Copyright 2014 Cisco Systems,Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_context import context +from oslotest import base +from oslotest import mockpatch + +from ceilometer.agent import manager +from ceilometer.agent import plugin_base +from ceilometer.network.services import discovery +from ceilometer.network.services import vpnaas + + +class _BaseTestVPNPollster(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(_BaseTestVPNPollster, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self.manager = manager.AgentManager() + plugin_base._get_keystone = mock.Mock() + plugin_base._get_keystone.service_catalog.get_endpoints = ( + mock.MagicMock(return_value={'network': mock.ANY})) + + +class TestVPNServicesPollster(_BaseTestVPNPollster): + + def setUp(self): + super(TestVPNServicesPollster, self).setUp() + self.pollster = vpnaas.VPNServicesPollster() + fake_vpn = self.fake_get_vpn_service() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'vpn_get_all', + return_value=fake_vpn)) + + @staticmethod + def fake_get_vpn_service(): + return [{'status': 'ACTIVE', + 'name': 'myvpn', + 'description': '', + 'admin_state_up': True, + 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, + {'status': 'INACTIVE', + 'name': 'myvpn', + 'description': '', + 'admin_state_up': True, + 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, + {'status': 'PENDING_CREATE', + 'name': 'myvpn', + 'description': '', + 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, + {'status': 'error', + 'name': 'myvpn', + 'description': '', + 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', + 'admin_state_up': False, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, + ] + + def test_vpn_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vpn_service())) + self.assertEqual(3, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_vpn_service()[0][field], + samples[0].resource_metadata[field]) + + def test_vpn_volume(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vpn_service())) + self.assertEqual(1, samples[0].volume) + self.assertEqual(0, samples[1].volume) + self.assertEqual(2, samples[2].volume) + + def test_get_vpn_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_vpn_service())) + self.assertEqual(set(['network.services.vpn']), + set([s.name for s in samples])) + + def test_vpn_discovery(self): + discovered_vpns = discovery.VPNServicesDiscovery().discover( + self.manager) + self.assertEqual(3, len(discovered_vpns)) + + for vpn in self.fake_get_vpn_service(): + if vpn['status'] == 'error': + self.assertNotIn(vpn, discovered_vpns) + else: + self.assertIn(vpn, discovered_vpns) + + +class TestIPSecConnectionsPollster(_BaseTestVPNPollster): + + def setUp(self): + super(TestIPSecConnectionsPollster, self).setUp() + self.pollster = vpnaas.IPSecConnectionsPollster() + fake_conns = self.fake_get_ipsec_connections() + self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' + 'ipsec_site_connections_get_all', + return_value=fake_conns)) + + @staticmethod + def fake_get_ipsec_connections(): + return [{'name': 'connection1', + 'description': 'Remote-connection1', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', + '192.168.3.0/24'], + 'mtu': 1500, + 'psk': 'abcd', + 'initiator': 'bi-directional', + 'dpd': { + 'action': 'hold', + 'interval': 30, + 'timeout': 120}, + 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', + 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', + 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', + 'admin_state_up': True, + 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', + 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} + ] + + def test_conns_get_samples(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_ipsec_connections())) + self.assertEqual(1, len(samples)) + for field in self.pollster.FIELDS: + self.assertEqual(self.fake_get_ipsec_connections()[0][field], + samples[0].resource_metadata[field]) + + def test_get_conns_meter_names(self): + samples = list(self.pollster.get_samples( + self.manager, {}, + resources=self.fake_get_ipsec_connections())) + self.assertEqual(set(['network.services.vpn.connections']), + set([s.name for s in samples])) + + def test_conns_discovery(self): + discovered_conns = discovery.IPSecConnectionsDiscovery().discover( + self.manager) + self.assertEqual(1, len(discovered_conns)) + self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/__init__.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/__init__.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,28 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslotest import base + + +class _PollsterTestBase(base.BaseTestCase): + + def _test_pollster(self, pollster_class, meter_name, + meter_type, meter_unit): + + pollster = pollster_class() + + self.assertEqual(pollster.meter_name, meter_name) + self.assertEqual(pollster.meter_type, meter_type) + self.assertEqual(pollster.meter_unit, meter_unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,71 @@ +# Copyright (C) 2014 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import fixture as config_fixture +from oslotest import base + +from ceilometer.network.statistics.opencontrail import client +from ceilometer import service as ceilometer_service + + +class TestOpencontrailClient(base.BaseTestCase): + + def setUp(self): + super(TestOpencontrailClient, self).setUp() + self.conf = self.useFixture(config_fixture.Config()) + ceilometer_service.prepare_service(argv=[], config_files=[]) + self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) + + self.get_resp = mock.MagicMock() + self.get = mock.patch('requests.get', + return_value=self.get_resp).start() + self.get_resp.raw.version = 1.1 + self.get_resp.status_code = 200 + self.get_resp.reason = 'OK' + self.get_resp.content = '' + + def test_vm_statistics(self): + self.client.networks.get_vm_statistics('bbb') + + call_args = self.get.call_args_list[0][0] + call_kwargs = self.get.call_args_list[0][1] + + expected_url = ('http://127.0.0.1:8081/analytics/' + 'uves/virtual-machine/bbb') + self.assertEqual(expected_url, call_args[0]) + + data = call_kwargs.get('data') + + expected_data = {'arg1': 'aaa'} + self.assertEqual(expected_data, data) + + def test_vm_statistics_params(self): + self.client.networks.get_vm_statistics('bbb', + {'resource': 'fip_stats_list', + 'virtual_network': 'ccc'}) + + call_args = self.get.call_args_list[0][0] + call_kwargs = self.get.call_args_list[0][1] + + expected_url = ('http://127.0.0.1:8081/analytics/' + 'uves/virtual-machine/bbb') + self.assertEqual(expected_url, call_args[0]) + + data = call_kwargs.get('data') + + expected_data = {'arg1': 'aaa', + 'resource': 'fip_stats_list', + 'virtual_network': 'ccc'} + self.assertEqual(expected_data, data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,264 @@ +# Copyright (C) 2014 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslotest import base +from six.moves.urllib import parse as urlparse + +from ceilometer.network.statistics.opencontrail import driver + + +class TestOpencontrailDriver(base.BaseTestCase): + + def setUp(self): + super(TestOpencontrailDriver, self).setUp() + + self.nc_ports = mock.patch('ceilometer.neutron_client' + '.Client.port_get_all', + return_value=self.fake_ports()) + self.nc_ports.start() + + self.driver = driver.OpencontrailDriver() + self.parse_url = urlparse.ParseResult('opencontrail', + '127.0.0.1:8143', + '/', None, None, None) + self.params = {'password': ['admin'], + 'scheme': ['http'], + 'username': ['admin'], + 'verify_ssl': ['false'], + 'resource': ['if_stats_list']} + + @staticmethod + def fake_ports(): + return [{'admin_state_up': True, + 'device_owner': 'compute:None', + 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', + 'extra_dhcp_opts': [], + 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', + 'mac_address': 'fa:16:3e:c5:35:93', + 'name': '', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'status': 'ACTIVE', + 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] + + @staticmethod + def fake_port_stats(): + return {"value": [{ + "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", + "value": { + "UveVirtualMachineAgent": { + "if_stats_list": [{ + "out_bytes": 22, + "in_bandwidth_usage": 0, + "in_bytes": 23, + "out_bandwidth_usage": 0, + "out_pkts": 5, + "in_pkts": 6, + "name": ("default-domain:demo:" + "96d49cc3-4e01-40ce-9cac-c0e32642a442") + }], + "fip_stats_list": [{ + "in_bytes": 33, + "iface_name": ("default-domain:demo:" + "96d49cc3-4e01-40ce-9cac-c0e32642a442"), + "out_bytes": 44, + "out_pkts": 10, + "virtual_network": "default-domain:openstack:public", + "in_pkts": 11, + "ip_address": "1.1.1.1" + }] + }}}]} + + @staticmethod + def fake_port_stats_with_node(): + return {"value": [{ + "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", + "value": { + "UveVirtualMachineAgent": { + "if_stats_list": [ + [[{ + "out_bytes": 22, + "in_bandwidth_usage": 0, + "in_bytes": 23, + "out_bandwidth_usage": 0, + "out_pkts": 5, + "in_pkts": 6, + "name": ("default-domain:demo:" + "96d49cc3-4e01-40ce-9cac-c0e32642a442") + }], 'node1'], + [[{ + "out_bytes": 22, + "in_bandwidth_usage": 0, + "in_bytes": 23, + "out_bandwidth_usage": 0, + "out_pkts": 4, + "in_pkts": 13, + "name": ("default-domain:demo:" + "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], + 'node2'] + ] + }}}]} + + def _test_meter(self, meter_name, expected, fake_port_stats=None): + if not fake_port_stats: + fake_port_stats = self.fake_port_stats() + with mock.patch('ceilometer.network.' + 'statistics.opencontrail.' + 'client.NetworksAPIClient.' + 'get_vm_statistics', + return_value=fake_port_stats) as port_stats: + + samples = self.driver.get_sample_data(meter_name, self.parse_url, + self.params, {}) + + self.assertEqual(expected, [s for s in samples]) + + port_stats.assert_called_with('*') + + def test_switch_port_receive_packets_with_node(self): + expected = [(6, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY), + (13, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.receive.packets', expected, + self.fake_port_stats_with_node()) + + def test_switch_port_receive_packets(self): + expected = [(6, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.receive.packets', expected) + + def test_switch_port_transmit_packets(self): + expected = [(5, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.transmit.packets', expected) + + def test_switch_port_receive_bytes(self): + expected = [(23, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.receive.bytes', expected) + + def test_switch_port_transmit_bytes(self): + expected = [(22, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'if_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.transmit.bytes', expected) + + def test_switch_port_receive_packets_fip(self): + self.params['resource'] = ['fip_stats_list'] + expected = [(11, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'fip_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.receive.packets', expected) + + def test_switch_port_transmit_packets_fip(self): + self.params['resource'] = ['fip_stats_list'] + expected = [(10, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'fip_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.transmit.packets', expected) + + def test_switch_port_receive_bytes_fip(self): + self.params['resource'] = ['fip_stats_list'] + expected = [(33, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'fip_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.receive.bytes', expected) + + def test_switch_port_transmit_bytes_fip(self): + self.params['resource'] = ['fip_stats_list'] + expected = [(44, + '96d49cc3-4e01-40ce-9cac-c0e32642a442', + {'device_owner_id': + '674e553b-8df9-4321-87d9-93ba05b93558', + 'domain': 'default-domain', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'project': 'demo', + 'project_id': '89271fa581ab4380bf172f868c3615f9', + 'resource': 'fip_stats_list'}, + mock.ANY)] + self._test_meter('switch.port.transmit.bytes', expected) + + def test_switch_port_transmit_bytes_non_existing_network(self): + self.params['virtual_network'] = ['aaa'] + self.params['resource'] = ['fip_stats_list'] + self._test_meter('switch.port.transmit.bytes', []) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,176 @@ +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock +from oslo_config import fixture as config_fixture +from oslotest import base +from requests import auth as req_auth +import six +from six.moves.urllib import parse as urlparse + +from ceilometer.i18n import _ +from ceilometer.network.statistics.opendaylight import client +from ceilometer import service as ceilometer_service + + +class TestClientHTTPBasicAuth(base.BaseTestCase): + + auth_way = 'basic' + scheme = 'http' + + def setUp(self): + super(TestClientHTTPBasicAuth, self).setUp() + self.conf = self.useFixture(config_fixture.Config()) + ceilometer_service.prepare_service(argv=[], config_files=[]) + self.parsed_url = urlparse.urlparse( + 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' + 'container_name=egg&auth=%s&user=admin&password=admin_pass&' + 'scheme=%s' % (self.auth_way, self.scheme)) + self.params = urlparse.parse_qs(self.parsed_url.query) + self.endpoint = urlparse.urlunparse( + urlparse.ParseResult(self.scheme, + self.parsed_url.netloc, + self.parsed_url.path, + None, None, None)) + odl_params = {'auth': self.params.get('auth')[0], + 'user': self.params.get('user')[0], + 'password': self.params.get('password')[0]} + self.client = client.Client(self.endpoint, odl_params) + + self.resp = mock.MagicMock() + self.get = mock.patch('requests.get', + return_value=self.resp).start() + + self.resp.raw.version = 1.1 + self.resp.status_code = 200 + self.resp.reason = 'OK' + self.resp.headers = {} + self.resp.content = 'dummy' + + def _test_request(self, method, url): + data = method('default') + + call_args = self.get.call_args_list[0][0] + call_kwargs = self.get.call_args_list[0][1] + + # check url + real_url = url % {'container_name': 'default', + 'scheme': self.scheme} + self.assertEqual(real_url, call_args[0]) + + # check auth parameters + auth = call_kwargs.get('auth') + if self.auth_way == 'digest': + self.assertIsInstance(auth, req_auth.HTTPDigestAuth) + else: + self.assertIsInstance(auth, req_auth.HTTPBasicAuth) + self.assertEqual('admin', auth.username) + self.assertEqual('admin_pass', auth.password) + + # check header + self.assertEqual( + {'Accept': 'application/json'}, + call_kwargs['headers']) + + # check return value + self.assertEqual(self.get().json(), data) + + def test_flow_statistics(self): + self._test_request( + self.client.statistics.get_flow_statistics, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/statistics/%(container_name)s/flow') + + def test_port_statistics(self): + self._test_request( + self.client.statistics.get_port_statistics, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/statistics/%(container_name)s/port') + + def test_table_statistics(self): + self._test_request( + self.client.statistics.get_table_statistics, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/statistics/%(container_name)s/table') + + def test_topology(self): + self._test_request( + self.client.topology.get_topology, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/topology/%(container_name)s') + + def test_user_links(self): + self._test_request( + self.client.topology.get_user_links, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/topology/%(container_name)s/userLinks') + + def test_switch(self): + self._test_request( + self.client.switch_manager.get_nodes, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/switchmanager/%(container_name)s/nodes') + + def test_active_hosts(self): + self._test_request( + self.client.host_tracker.get_active_hosts, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/hosttracker/%(container_name)s/hosts/active') + + def test_inactive_hosts(self): + self._test_request( + self.client.host_tracker.get_inactive_hosts, + '%(scheme)s://127.0.0.1:8080/controller/nb/v2' + '/hosttracker/%(container_name)s/hosts/inactive') + + def test_http_error(self): + self.resp.status_code = 404 + self.resp.reason = 'Not Found' + + try: + self.client.statistics.get_flow_statistics('default') + self.fail('') + except client.OpenDaylightRESTAPIFailed as e: + self.assertEqual( + _('OpenDaylitght API returned %(status)s %(reason)s') % + {'status': self.resp.status_code, + 'reason': self.resp.reason}, + six.text_type(e)) + + def test_other_error(self): + + class _Exception(Exception): + pass + + self.get = mock.patch('requests.get', + side_effect=_Exception).start() + + self.assertRaises(_Exception, + self.client.statistics.get_flow_statistics, + 'default') + + +class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): + + auth_way = 'digest' + + +class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): + + scheme = 'https' + + +class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): + + scheme = 'https' diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,1706 @@ +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc + +import mock +from oslotest import base +import six +from six import moves +from six.moves.urllib import parse as url_parse + +from ceilometer.network.statistics.opendaylight import driver + + +@six.add_metaclass(abc.ABCMeta) +class _Base(base.BaseTestCase): + + @abc.abstractproperty + def flow_data(self): + pass + + @abc.abstractproperty + def port_data(self): + pass + + @abc.abstractproperty + def table_data(self): + pass + + @abc.abstractproperty + def topology_data(self): + pass + + @abc.abstractproperty + def switch_data(self): + pass + + @abc.abstractproperty + def user_links_data(self): + pass + + @abc.abstractproperty + def active_hosts_data(self): + pass + + @abc.abstractproperty + def inactive_hosts_data(self): + pass + + fake_odl_url = url_parse.ParseResult('opendaylight', + 'localhost:8080', + 'controller/nb/v2', + None, + None, + None) + + fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' + 'container_name=default&auth=basic') + + fake_params_multi_container = ( + url_parse.parse_qs('user=admin&password=admin&scheme=http&' + 'container_name=first&container_name=second&' + 'auth=basic')) + + def setUp(self): + super(_Base, self).setUp() + self.addCleanup(mock.patch.stopall) + + self.driver = driver.OpenDayLightDriver() + + self.get_flow_statistics = mock.patch( + 'ceilometer.network.statistics.opendaylight.client.' + 'StatisticsAPIClient.get_flow_statistics', + return_value=self.flow_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'StatisticsAPIClient.get_table_statistics', + return_value=self.table_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'StatisticsAPIClient.get_port_statistics', + return_value=self.port_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'TopologyAPIClient.get_topology', + return_value=self.topology_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'TopologyAPIClient.get_user_links', + return_value=self.user_links_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'SwitchManagerAPIClient.get_nodes', + return_value=self.switch_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'HostTrackerAPIClient.get_active_hosts', + return_value=self.active_hosts_data).start() + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'HostTrackerAPIClient.get_inactive_hosts', + return_value=self.inactive_hosts_data).start() + + def _test_for_meter(self, meter_name, expected_data): + sample_data = self.driver.get_sample_data(meter_name, + self.fake_odl_url, + self.fake_params, + {}) + + for sample, expected in moves.zip(sample_data, expected_data): + self.assertEqual(expected[0], sample[0]) # check volume + self.assertEqual(expected[1], sample[1]) # check resource id + self.assertEqual(expected[2], sample[2]) # check resource metadata + self.assertIsNotNone(sample[3]) # timestamp + + +class TestOpenDayLightDriverSpecial(_Base): + + flow_data = {"flowStatistics": []} + port_data = {"portStatistics": []} + table_data = {"tableStatistics": []} + topology_data = {"edgeProperties": []} + switch_data = {"nodeProperties": []} + user_links_data = {"userLinks": []} + active_hosts_data = {"hostConfig": []} + inactive_hosts_data = {"hostConfig": []} + + def test_not_implemented_meter(self): + sample_data = self.driver.get_sample_data('egg', + self.fake_odl_url, + self.fake_params, + {}) + self.assertIsNone(sample_data) + + sample_data = self.driver.get_sample_data('switch.table.egg', + self.fake_odl_url, + self.fake_params, + {}) + self.assertIsNone(sample_data) + + def test_cache(self): + cache = {} + self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params, + cache) + self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params, + cache) + self.assertEqual(1, self.get_flow_statistics.call_count) + + cache = {} + self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params, + cache) + self.assertEqual(2, self.get_flow_statistics.call_count) + + def test_multi_container(self): + cache = {} + self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params_multi_container, + cache) + self.assertEqual(2, self.get_flow_statistics.call_count) + + self.assertIn('network.statistics.opendaylight', cache) + + odl_data = cache['network.statistics.opendaylight'] + + self.assertIn('first', odl_data) + self.assertIn('second', odl_data) + + def test_http_error(self): + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'StatisticsAPIClient.get_flow_statistics', + side_effect=Exception()).start() + + sample_data = self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params, + {}) + + self.assertEqual(0, len(sample_data)) + + mock.patch('ceilometer.network.statistics.opendaylight.client.' + 'StatisticsAPIClient.get_flow_statistics', + side_effect=[Exception(), self.flow_data]).start() + cache = {} + self.driver.get_sample_data('switch', + self.fake_odl_url, + self.fake_params_multi_container, + cache) + + self.assertIn('network.statistics.opendaylight', cache) + + odl_data = cache['network.statistics.opendaylight'] + + self.assertIn('second', odl_data) + + +class TestOpenDayLightDriverSimple(_Base): + + flow_data = { + "flowStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "flowStatistic": [ + { + "flow": { + "match": { + "matchField": [ + { + "type": "DL_TYPE", + "value": "2048" + }, + { + "mask": "255.255.255.255", + "type": "NW_DST", + "value": "1.1.1.1" + } + ] + }, + "actions": { + "@type": "output", + "port": { + "id": "3", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + } + }, + "hardTimeout": "0", + "id": "0", + "idleTimeout": "0", + "priority": "1" + }, + "byteCount": "0", + "durationNanoseconds": "397000000", + "durationSeconds": "1828", + "packetCount": "0", + "tableId": "0" + }, + ] + } + ] + } + port_data = { + "portStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "portStatistic": [ + { + "nodeConnector": { + "id": "4", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "0", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "0", + "transmitBytes": "0", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "0" + }, + ] + } + ] + } + table_data = { + "tableStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "tableStatistic": [ + { + "activeCount": "11", + "lookupCount": "816", + "matchedCount": "220", + "nodeTable": { + "id": "0", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + } + } + }, + ] + } + ] + } + topology_data = {"edgeProperties": []} + switch_data = { + "nodeProperties": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "properties": { + "actions": { + "value": "4095" + }, + "timeStamp": { + "name": "connectedSince", + "value": "1377291227877" + } + } + }, + ] + } + user_links_data = {"userLinks": []} + active_hosts_data = {"hostConfig": []} + inactive_hosts_data = {"hostConfig": []} + + def test_meter_switch(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + "properties_actions": "4095", + "properties_timeStamp_connectedSince": "1377291227877" + }), + ] + + self._test_for_meter('switch', expected_data) + + def test_meter_switch_port(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4', + }), + ] + self._test_for_meter('switch.port', expected_data) + + def test_meter_switch_port_receive_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.packets', expected_data) + + def test_meter_switch_port_transmit_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.transmit.packets', expected_data) + + def test_meter_switch_port_receive_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.bytes', expected_data) + + def test_meter_switch_port_transmit_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.transmit.bytes', expected_data) + + def test_meter_switch_port_receive_drops(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.drops', expected_data) + + def test_meter_switch_port_transmit_drops(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.transmit.drops', expected_data) + + def test_meter_switch_port_receive_errors(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.errors', expected_data) + + def test_meter_switch_port_transmit_errors(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.transmit.errors', expected_data) + + def test_meter_switch_port_receive_frame_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.frame_error', expected_data) + + def test_meter_switch_port_receive_overrun_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.overrun_error', + expected_data) + + def test_meter_switch_port_receive_crc_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.receive.crc_error', expected_data) + + def test_meter_switch_port_collision_count(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + ] + self._test_for_meter('switch.port.collision.count', expected_data) + + def test_meter_switch_table(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + ] + self._test_for_meter('switch.table', expected_data) + + def test_meter_switch_table_active_entries(self): + expected_data = [ + (11, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + ] + self._test_for_meter('switch.table.active.entries', expected_data) + + def test_meter_switch_table_lookup_packets(self): + expected_data = [ + (816, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + ] + self._test_for_meter('switch.table.lookup.packets', expected_data) + + def test_meter_switch_table_matched_packets(self): + expected_data = [ + (220, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + ] + self._test_for_meter('switch.table.matched.packets', expected_data) + + def test_meter_switch_flow(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1" + }), + ] + self._test_for_meter('switch.flow', expected_data) + + def test_meter_switch_flow_duration_seconds(self): + expected_data = [ + (1828, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.duration_seconds', expected_data) + + def test_meter_switch_flow_duration_nanoseconds(self): + expected_data = [ + (397000000, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) + + def test_meter_switch_flow_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.packets', expected_data) + + def test_meter_switch_flow_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.bytes', expected_data) + + +class TestOpenDayLightDriverComplex(_Base): + + flow_data = { + "flowStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "flowStatistic": [ + { + "flow": { + "match": { + "matchField": [ + { + "type": "DL_TYPE", + "value": "2048" + }, + { + "mask": "255.255.255.255", + "type": "NW_DST", + "value": "1.1.1.1" + } + ] + }, + "actions": { + "@type": "output", + "port": { + "id": "3", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + } + }, + "hardTimeout": "0", + "id": "0", + "idleTimeout": "0", + "priority": "1" + }, + "byteCount": "0", + "durationNanoseconds": "397000000", + "durationSeconds": "1828", + "packetCount": "0", + "tableId": "0" + }, + { + "flow": { + "match": { + "matchField": [ + { + "type": "DL_TYPE", + "value": "2048" + }, + { + "mask": "255.255.255.255", + "type": "NW_DST", + "value": "1.1.1.2" + } + ] + }, + "actions": { + "@type": "output", + "port": { + "id": "4", + "node": { + "id": "00:00:00:00:00:00:00:03", + "type": "OF" + }, + "type": "OF" + } + }, + "hardTimeout": "0", + "id": "0", + "idleTimeout": "0", + "priority": "1" + }, + "byteCount": "89", + "durationNanoseconds": "200000", + "durationSeconds": "5648", + "packetCount": "30", + "tableId": "1" + } + ] + } + ] + } + port_data = { + "portStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "portStatistic": [ + { + "nodeConnector": { + "id": "4", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "0", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "0", + "transmitBytes": "0", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "0" + }, + { + "nodeConnector": { + "id": "3", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "12740", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "182", + "transmitBytes": "12110", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "173" + }, + { + "nodeConnector": { + "id": "2", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "12180", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "174", + "transmitBytes": "12670", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "181" + }, + { + "nodeConnector": { + "id": "1", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "0", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "0", + "transmitBytes": "0", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "0" + }, + { + "nodeConnector": { + "id": "0", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "collisionCount": "0", + "receiveBytes": "0", + "receiveCrcError": "0", + "receiveDrops": "0", + "receiveErrors": "0", + "receiveFrameError": "0", + "receiveOverRunError": "0", + "receivePackets": "0", + "transmitBytes": "0", + "transmitDrops": "0", + "transmitErrors": "0", + "transmitPackets": "0" + } + ] + } + ] + } + table_data = { + "tableStatistics": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "tableStatistic": [ + { + "activeCount": "11", + "lookupCount": "816", + "matchedCount": "220", + "nodeTable": { + "id": "0", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + } + } + }, + { + "activeCount": "20", + "lookupCount": "10", + "matchedCount": "5", + "nodeTable": { + "id": "1", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + } + } + } + ] + } + ] + } + topology_data = { + "edgeProperties": [ + { + "edge": { + "headNodeConnector": { + "id": "2", + "node": { + "id": "00:00:00:00:00:00:00:03", + "type": "OF" + }, + "type": "OF" + }, + "tailNodeConnector": { + "id": "2", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + } + }, + "properties": { + "bandwidth": { + "value": 10000000000 + }, + "config": { + "value": 1 + }, + "name": { + "value": "s2-eth3" + }, + "state": { + "value": 1 + }, + "timeStamp": { + "name": "creation", + "value": 1379527162648 + } + } + }, + { + "edge": { + "headNodeConnector": { + "id": "5", + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "type": "OF" + }, + "tailNodeConnector": { + "id": "2", + "node": { + "id": "00:00:00:00:00:00:00:04", + "type": "OF" + }, + "type": "OF" + } + }, + "properties": { + "timeStamp": { + "name": "creation", + "value": 1379527162648 + } + } + } + ] + } + switch_data = { + "nodeProperties": [ + { + "node": { + "id": "00:00:00:00:00:00:00:02", + "type": "OF" + }, + "properties": { + "actions": { + "value": "4095" + }, + "buffers": { + "value": "256" + }, + "capabilities": { + "value": "199" + }, + "description": { + "value": "None" + }, + "macAddress": { + "value": "00:00:00:00:00:02" + }, + "tables": { + "value": "-1" + }, + "timeStamp": { + "name": "connectedSince", + "value": "1377291227877" + } + } + }, + { + "node": { + "id": "00:00:00:00:00:00:00:03", + "type": "OF" + }, + "properties": { + "actions": { + "value": "1024" + }, + "buffers": { + "value": "512" + }, + "capabilities": { + "value": "1000" + }, + "description": { + "value": "Foo Bar" + }, + "macAddress": { + "value": "00:00:00:00:00:03" + }, + "tables": { + "value": "10" + }, + "timeStamp": { + "name": "connectedSince", + "value": "1377291228000" + } + } + } + ] + } + user_links_data = { + "userLinks": [ + { + "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", + "name": "link1", + "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", + "status": "Success" + } + ] + } + active_hosts_data = { + "hostConfig": [ + { + "dataLayerAddress": "00:00:00:00:01:01", + "networkAddress": "1.1.1.1", + "nodeConnectorId": "9", + "nodeConnectorType": "OF", + "nodeId": "00:00:00:00:00:00:00:01", + "nodeType": "OF", + "staticHost": "false", + "vlan": "0" + }, + { + "dataLayerAddress": "00:00:00:00:02:02", + "networkAddress": "2.2.2.2", + "nodeConnectorId": "1", + "nodeConnectorType": "OF", + "nodeId": "00:00:00:00:00:00:00:02", + "nodeType": "OF", + "staticHost": "true", + "vlan": "0" + } + ] + } + inactive_hosts_data = { + "hostConfig": [ + { + "dataLayerAddress": "00:00:00:01:01:01", + "networkAddress": "1.1.1.3", + "nodeConnectorId": "8", + "nodeConnectorType": "OF", + "nodeId": "00:00:00:00:00:00:00:01", + "nodeType": "OF", + "staticHost": "false", + "vlan": "0" + }, + { + "dataLayerAddress": "00:00:00:01:02:02", + "networkAddress": "2.2.2.4", + "nodeConnectorId": "0", + "nodeConnectorType": "OF", + "nodeId": "00:00:00:00:00:00:00:02", + "nodeType": "OF", + "staticHost": "false", + "vlan": "1" + } + ] + } + + def test_meter_switch(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + "properties_actions": "4095", + "properties_buffers": "256", + "properties_capabilities": "199", + "properties_description": "None", + "properties_macAddress": "00:00:00:00:00:02", + "properties_tables": "-1", + "properties_timeStamp_connectedSince": "1377291227877" + }), + (1, "00:00:00:00:00:00:00:03", { + 'controller': 'OpenDaylight', + 'container': 'default', + "properties_actions": "1024", + "properties_buffers": "512", + "properties_capabilities": "1000", + "properties_description": "Foo Bar", + "properties_macAddress": "00:00:00:00:00:03", + "properties_tables": "10", + "properties_timeStamp_connectedSince": "1377291228000" + }), + ] + + self._test_for_meter('switch', expected_data) + + def test_meter_switch_port(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4', + }), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3', + 'user_link_node_id': '00:00:00:00:00:00:00:05', + 'user_link_node_port': '5', + 'user_link_status': 'Success', + 'user_link_name': 'link1', + }), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2', + 'topology_node_id': '00:00:00:00:00:00:00:03', + 'topology_node_port': '2', + "topology_bandwidth": 10000000000, + "topology_config": 1, + "topology_name": "s2-eth3", + "topology_state": 1, + "topology_timeStamp_creation": 1379527162648 + }), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1', + 'host_status': 'active', + 'host_dataLayerAddress': '00:00:00:00:02:02', + 'host_networkAddress': '2.2.2.2', + 'host_staticHost': 'true', + 'host_vlan': '0', + }), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0', + 'host_status': 'inactive', + 'host_dataLayerAddress': '00:00:00:01:02:02', + 'host_networkAddress': '2.2.2.4', + 'host_staticHost': 'false', + 'host_vlan': '1', + }), + ] + self._test_for_meter('switch.port', expected_data) + + def test_meter_switch_port_receive_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (182, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (174, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.packets', expected_data) + + def test_meter_switch_port_transmit_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (173, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (181, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.transmit.packets', expected_data) + + def test_meter_switch_port_receive_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (12740, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (12180, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.bytes', expected_data) + + def test_meter_switch_port_transmit_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (12110, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (12670, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.transmit.bytes', expected_data) + + def test_meter_switch_port_receive_drops(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.drops', expected_data) + + def test_meter_switch_port_transmit_drops(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.transmit.drops', expected_data) + + def test_meter_switch_port_receive_errors(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.errors', expected_data) + + def test_meter_switch_port_transmit_errors(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.transmit.errors', expected_data) + + def test_meter_switch_port_receive_frame_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.frame_error', expected_data) + + def test_meter_switch_port_receive_overrun_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.overrun_error', + expected_data) + + def test_meter_switch_port_receive_crc_error(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.receive.crc_error', expected_data) + + def test_meter_switch_port_collision_count(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '4'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '3'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '2'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '1'}), + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'port': '0'}), + ] + self._test_for_meter('switch.port.collision.count', expected_data) + + def test_meter_switch_table(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1'}), + ] + self._test_for_meter('switch.table', expected_data) + + def test_meter_switch_table_active_entries(self): + expected_data = [ + (11, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + (20, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1'}), + ] + self._test_for_meter('switch.table.active.entries', expected_data) + + def test_meter_switch_table_lookup_packets(self): + expected_data = [ + (816, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + (10, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1'}), + ] + self._test_for_meter('switch.table.lookup.packets', expected_data) + + def test_meter_switch_table_matched_packets(self): + expected_data = [ + (220, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0'}), + (5, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1'}), + ] + self._test_for_meter('switch.table.matched.packets', expected_data) + + def test_meter_switch_flow(self): + expected_data = [ + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1" + }), + (1, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.2", + "flow_actions_@type": "output", + "flow_actions_port_id": "4", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1" + }), + ] + self._test_for_meter('switch.flow', expected_data) + + def test_meter_switch_flow_duration_seconds(self): + expected_data = [ + (1828, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + (5648, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.2", + "flow_actions_@type": "output", + "flow_actions_port_id": "4", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.duration_seconds', expected_data) + + def test_meter_switch_flow_duration_nanoseconds(self): + expected_data = [ + (397000000, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + (200000, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.2", + "flow_actions_@type": "output", + "flow_actions_port_id": "4", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) + + def test_meter_switch_flow_packets(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + (30, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.2", + "flow_actions_@type": "output", + "flow_actions_port_id": "4", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.packets', expected_data) + + def test_meter_switch_flow_bytes(self): + expected_data = [ + (0, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '0', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.1", + "flow_actions_@type": "output", + "flow_actions_port_id": "3", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + (89, "00:00:00:00:00:00:00:02", { + 'controller': 'OpenDaylight', + 'container': 'default', + 'table_id': '1', + 'flow_id': '0', + "flow_match_matchField[0]_type": "DL_TYPE", + "flow_match_matchField[0]_value": "2048", + "flow_match_matchField[1]_mask": "255.255.255.255", + "flow_match_matchField[1]_type": "NW_DST", + "flow_match_matchField[1]_value": "1.1.1.2", + "flow_actions_@type": "output", + "flow_actions_port_id": "4", + "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", + "flow_actions_port_node_type": "OF", + "flow_actions_port_type": "OF", + "flow_hardTimeout": "0", + "flow_idleTimeout": "0", + "flow_priority": "1"}), + ] + self._test_for_meter('switch.flow.bytes', expected_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_driver.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_driver.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_driver.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,37 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslotest import base + +from ceilometer.network.statistics import driver + + +class TestDriver(base.BaseTestCase): + + @staticmethod + def test_driver_ok(): + + class OkDriver(driver.Driver): + + def get_sample_data(self, meter_name, resources, cache): + pass + + OkDriver() + + def test_driver_ng(self): + + class NgDriver(driver.Driver): + """get_sample_data method is lost.""" + + self.assertRaises(TypeError, NgDriver) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_flow.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_flow.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_flow.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_flow.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,56 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.network.statistics import flow +from ceilometer import sample +from ceilometer.tests.unit.network import statistics + + +class TestFlowPollsters(statistics._PollsterTestBase): + + def test_flow_pollster(self): + self._test_pollster( + flow.FlowPollster, + 'switch.flow', + sample.TYPE_GAUGE, + 'flow') + + def test_flow_pollster_duration_seconds(self): + self._test_pollster( + flow.FlowPollsterDurationSeconds, + 'switch.flow.duration_seconds', + sample.TYPE_GAUGE, + 's') + + def test_flow_pollster_duration_nanoseconds(self): + self._test_pollster( + flow.FlowPollsterDurationNanoseconds, + 'switch.flow.duration_nanoseconds', + sample.TYPE_GAUGE, + 'ns') + + def test_flow_pollster_packets(self): + self._test_pollster( + flow.FlowPollsterPackets, + 'switch.flow.packets', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_flow_pollster_bytes(self): + self._test_pollster( + flow.FlowPollsterBytes, + 'switch.flow.bytes', + sample.TYPE_CUMULATIVE, + 'B') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_port.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_port.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_port.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_port.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,112 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.network.statistics import port +from ceilometer import sample +from ceilometer.tests.unit.network import statistics + + +class TestPortPollsters(statistics._PollsterTestBase): + + def test_port_pollster(self): + self._test_pollster( + port.PortPollster, + 'switch.port', + sample.TYPE_GAUGE, + 'port') + + def test_port_pollster_receive_packets(self): + self._test_pollster( + port.PortPollsterReceivePackets, + 'switch.port.receive.packets', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_transmit_packets(self): + self._test_pollster( + port.PortPollsterTransmitPackets, + 'switch.port.transmit.packets', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_receive_bytes(self): + self._test_pollster( + port.PortPollsterReceiveBytes, + 'switch.port.receive.bytes', + sample.TYPE_CUMULATIVE, + 'B') + + def test_port_pollster_transmit_bytes(self): + self._test_pollster( + port.PortPollsterTransmitBytes, + 'switch.port.transmit.bytes', + sample.TYPE_CUMULATIVE, + 'B') + + def test_port_pollster_receive_drops(self): + self._test_pollster( + port.PortPollsterReceiveDrops, + 'switch.port.receive.drops', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_transmit_drops(self): + self._test_pollster( + port.PortPollsterTransmitDrops, + 'switch.port.transmit.drops', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_receive_errors(self): + self._test_pollster( + port.PortPollsterReceiveErrors, + 'switch.port.receive.errors', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_transmit_errors(self): + self._test_pollster( + port.PortPollsterTransmitErrors, + 'switch.port.transmit.errors', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_receive_frame_errors(self): + self._test_pollster( + port.PortPollsterReceiveFrameErrors, + 'switch.port.receive.frame_error', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_receive_overrun_errors(self): + self._test_pollster( + port.PortPollsterReceiveOverrunErrors, + 'switch.port.receive.overrun_error', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_receive_crc_errors(self): + self._test_pollster( + port.PortPollsterReceiveCRCErrors, + 'switch.port.receive.crc_error', + sample.TYPE_CUMULATIVE, + 'packet') + + def test_port_pollster_collision_count(self): + self._test_pollster( + port.PortPollsterCollisionCount, + 'switch.port.collision.count', + sample.TYPE_CUMULATIVE, + 'packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_statistics.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_statistics.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_statistics.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_statistics.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,195 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from oslo_utils import timeutils +from oslotest import base + +from ceilometer.network import statistics +from ceilometer.network.statistics import driver +from ceilometer import sample + + +class TestBase(base.BaseTestCase): + + @staticmethod + def test_subclass_ok(): + + class OkSubclass(statistics._Base): + + meter_name = 'foo' + meter_type = sample.TYPE_GAUGE + meter_unit = 'B' + + OkSubclass() + + def test_subclass_ng(self): + + class NgSubclass1(statistics._Base): + """meter_name is lost.""" + + meter_type = sample.TYPE_GAUGE + meter_unit = 'B' + + class NgSubclass2(statistics._Base): + """meter_type is lost.""" + + meter_name = 'foo' + meter_unit = 'B' + + class NgSubclass3(statistics._Base): + """meter_unit is lost.""" + + meter_name = 'foo' + meter_type = sample.TYPE_GAUGE + + self.assertRaises(TypeError, NgSubclass1) + self.assertRaises(TypeError, NgSubclass2) + self.assertRaises(TypeError, NgSubclass3) + + +class TestBaseGetSamples(base.BaseTestCase): + + def setUp(self): + super(TestBaseGetSamples, self).setUp() + + class FakePollster(statistics._Base): + meter_name = 'foo' + meter_type = sample.TYPE_CUMULATIVE + meter_unit = 'bar' + + self.pollster = FakePollster() + + def tearDown(self): + statistics._Base.drivers = {} + super(TestBaseGetSamples, self).tearDown() + + @staticmethod + def _setup_ext_mgr(**drivers): + statistics._Base.drivers = drivers + + def _make_fake_driver(self, *return_values): + class FakeDriver(driver.Driver): + + def __init__(self): + self.index = 0 + + def get_sample_data(self, meter_name, parse_url, params, cache): + if self.index >= len(return_values): + yield None + retval = return_values[self.index] + self.index += 1 + yield retval + return FakeDriver + + @staticmethod + def _make_timestamps(count): + now = timeutils.utcnow() + return [(now + datetime.timedelta(seconds=i)).isoformat() + for i in range(count)] + + def _get_samples(self, *resources): + + return [v for v in self.pollster.get_samples(self, {}, resources)] + + def _assert_sample(self, s, volume, resource_id, resource_metadata, + timestamp): + self.assertEqual('foo', s.name) + self.assertEqual(sample.TYPE_CUMULATIVE, s.type) + self.assertEqual('bar', s.unit) + self.assertEqual(volume, s.volume) + self.assertIsNone(s.user_id) + self.assertIsNone(s.project_id) + self.assertEqual(resource_id, s.resource_id) + self.assertEqual(timestamp, s.timestamp) + self.assertEqual(resource_metadata, s.resource_metadata) + + def test_get_samples_one_driver_one_resource(self): + times = self._make_timestamps(2) + fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, + times[0]), + (2, 'b', None, times[1])) + + self._setup_ext_mgr(http=fake_driver()) + + samples = self._get_samples('http://foo') + + self.assertEqual(1, len(samples)) + self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) + + def test_get_samples_one_driver_two_resource(self): + times = self._make_timestamps(3) + fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, + times[0]), + (2, 'b', None, times[1]), + (3, 'c', None, times[2])) + + self._setup_ext_mgr(http=fake_driver()) + + samples = self._get_samples('http://foo', 'http://bar') + + self.assertEqual(2, len(samples)) + self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) + self._assert_sample(samples[1], 2, 'b', None, times[1]) + + def test_get_samples_two_driver_one_resource(self): + times = self._make_timestamps(4) + fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'}, + times[0]), + (2, 'b', None), times[1]) + + fake_driver2 = self._make_fake_driver((11, 'A', None, times[2]), + (12, 'B', None, times[3])) + + self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) + + samples = self._get_samples('http://foo') + + self.assertEqual(1, len(samples)) + self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) + + def test_get_samples_multi_samples(self): + times = self._make_timestamps(2) + fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'}, + times[0]), + (2, 'b', None, times[1])]) + + self._setup_ext_mgr(http=fake_driver()) + + samples = self._get_samples('http://foo') + + self.assertEqual(2, len(samples)) + self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) + self._assert_sample(samples[1], 2, 'b', None, times[1]) + + def test_get_samples_return_none(self): + fake_driver = self._make_fake_driver(None) + + self._setup_ext_mgr(http=fake_driver()) + + samples = self._get_samples('http://foo') + + self.assertEqual(0, len(samples)) + + def test_get_samples_return_no_generator(self): + class NoneFakeDriver(driver.Driver): + + def get_sample_data(self, meter_name, parse_url, params, cache): + return None + + self._setup_ext_mgr(http=NoneFakeDriver()) + samples = self._get_samples('http://foo') + self.assertFalse(samples) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_switch.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_switch.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_switch.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_switch.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,28 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.network.statistics import switch +from ceilometer import sample +from ceilometer.tests.unit.network import statistics + + +class TestSwitchPollster(statistics._PollsterTestBase): + + def test_table_pollster(self): + self._test_pollster( + switch.SWPollster, + 'switch', + sample.TYPE_GAUGE, + 'switch') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_table.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_table.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/statistics/test_table.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/statistics/test_table.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,49 @@ +# +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.network.statistics import table +from ceilometer import sample +from ceilometer.tests.unit.network import statistics + + +class TestTablePollsters(statistics._PollsterTestBase): + + def test_table_pollster(self): + self._test_pollster( + table.TablePollster, + 'switch.table', + sample.TYPE_GAUGE, + 'table') + + def test_table_pollster_active_entries(self): + self._test_pollster( + table.TablePollsterActiveEntries, + 'switch.table.active.entries', + sample.TYPE_GAUGE, + 'entry') + + def test_table_pollster_lookup_packets(self): + self._test_pollster( + table.TablePollsterLookupPackets, + 'switch.table.lookup.packets', + sample.TYPE_GAUGE, + 'packet') + + def test_table_pollster_matched_packets(self): + self._test_pollster( + table.TablePollsterMatchedPackets, + 'switch.table.matched.packets', + sample.TYPE_GAUGE, + 'packet') diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/test_floatingip.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/test_floatingip.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/test_floatingip.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/test_floatingip.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# +# Copyright 2012 eNovance +# +# Copyright 2013 IBM Corp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_context import context +from oslotest import base + +from ceilometer.agent import manager +from ceilometer.network import floatingip + + +class TestFloatingIPPollster(base.BaseTestCase): + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(TestFloatingIPPollster, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self.manager = manager.AgentManager() + self.manager.keystone = mock.Mock() + self.manager.keystone.service_catalog.get_endpoints = mock.Mock( + return_value={'network': mock.ANY}) + self.pollster = floatingip.FloatingIPPollster() + fake_ips = self.fake_get_ips() + patch_virt = mock.patch('ceilometer.nova_client.Client.' + 'floating_ip_get_all', + return_value=fake_ips) + patch_virt.start() + + @staticmethod + def fake_get_ips(): + ips = [] + for i in range(1, 4): + ip = mock.MagicMock() + ip.id = i + ip.ip = '1.1.1.%d' % i + ip.pool = 'public' + ips.append(ip) + return ips + + def test_default_discovery(self): + self.assertEqual('endpoint:compute', self.pollster.default_discovery) + + # FIXME(dhellmann): Is there a useful way to define this + # test without a database? + # + # def test_get_samples_none_defined(self): + # try: + # list(self.pollster.get_samples(self.manager, + # self.context) + # ) + # except exception.NoFloatingIpsDefined: + # pass + # else: + # assert False, 'Should have seen an error' + + def test_get_samples_not_empty(self): + samples = list(self.pollster.get_samples(self.manager, {}, ['e'])) + self.assertEqual(3, len(samples)) + # It's necessary to verify all the attributes extracted by Nova + # API /os-floating-ips to make sure they're available and correct. + self.assertEqual(1, samples[0].resource_id) + self.assertEqual("1.1.1.1", samples[0].resource_metadata["address"]) + self.assertEqual("public", samples[0].resource_metadata["pool"]) + + self.assertEqual(2, samples[1].resource_id) + self.assertEqual("1.1.1.2", samples[1].resource_metadata["address"]) + self.assertEqual("public", samples[1].resource_metadata["pool"]) + + self.assertEqual(3, samples[2].resource_id) + self.assertEqual("1.1.1.3", samples[2].resource_metadata["address"]) + self.assertEqual("public", samples[2].resource_metadata["pool"]) + + def test_get_meter_names(self): + samples = list(self.pollster.get_samples(self.manager, {}, ['e'])) + self.assertEqual(set(['ip.floating']), set([s.name for s in samples])) + + def test_get_samples_cached(self): + cache = {'e-floating_ips': self.fake_get_ips()[:2]} + samples = list(self.pollster.get_samples(self.manager, cache, ['e'])) + self.assertEqual(2, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/network/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/unit/network/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/network/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/network/test_notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,1480 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer.network.notifications +""" + +import mock + +from ceilometer.network import notifications +from ceilometer.tests import base as test + +NOTIFICATION_NETWORK_CREATE = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'network.create.end', + u'timestamp': u'2012-09-27 14:11:27.086575', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': {u'network': + {u'status': u'ACTIVE', + u'subnets': [], + u'name': u'abcedf', + u'router:external': False, + u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'admin_state_up': True, + u'shared': False, + u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:11:26.924779', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} + +NOTIFICATION_BULK_NETWORK_CREATE = { + '_context_roles': [u'_member_', + u'heat_stack_owner', + u'admin'], + u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', + u'_context_read_deleted': u'no', + u'event_type': u'network.create.end', + u'_context_user_name': u'admin', + u'_context_project_name': u'admin', + u'timestamp': u'2014-05-1510: 24: 56.335612', + u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'_context_tenant_name': u'admin', + u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', + u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', + u'priority': 'info', + u'_context_is_admin': True, + u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', + u'_context_timestamp': u'2014-05-1510: 24: 56.285975', + u'_context_user': u'7520940056d54cceb25cbce888300bea', + u'_context_user_id': u'7520940056d54cceb25cbce888300bea', + u'publisher_id': u'network.devstack', + u'payload': { + u'networks': [{u'status': u'ACTIVE', + u'subnets': [], + u'name': u'test2', + u'provider: physical_network': None, + u'admin_state_up': True, + u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'provider: network_type': u'local', + u'shared': False, + u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', + u'provider: segmentation_id': None}, + {u'status': u'ACTIVE', + u'subnets': [], + u'name': u'test3', + u'provider: physical_network': None, + u'admin_state_up': True, + u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'provider: network_type': u'local', + u'shared': False, + u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', + u'provider: segmentation_id': None}] + } +} + +NOTIFICATION_SUBNET_CREATE = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'subnet.create.end', + u'timestamp': u'2012-09-27 14:11:27.426620', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': { + u'subnet': { + u'name': u'mysubnet', + u'enable_dhcp': True, + u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', + u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'dns_nameservers': [], + u'allocation_pools': [{u'start': u'192.168.42.2', + u'end': u'192.168.42.254'}], + u'host_routes': [], + u'ip_version': 4, + u'gateway_ip': u'192.168.42.1', + u'cidr': u'192.168.42.0/24', + u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:11:27.214490', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} + +NOTIFICATION_BULK_SUBNET_CREATE = { + '_context_roles': [u'_member_', + u'heat_stack_owner', + u'admin'], + u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', + u'_context_read_deleted': u'no', + u'event_type': u'subnet.create.end', + u'_context_user_name': u'admin', + u'_context_project_name': u'admin', + u'timestamp': u'2014-05-1510: 47: 08.133888', + u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'_context_tenant_name': u'admin', + u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', + u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', + u'priority': u'info', + u'_context_is_admin': True, + u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', + u'_context_timestamp': u'2014-05-1510: 47: 07.970043', + u'_context_user': u'7520940056d54cceb25cbce888300bea', + u'_context_user_id': u'7520940056d54cceb25cbce888300bea', + u'publisher_id': u'network.devstack', + u'payload': { + u'subnets': [{u'name': u'', + u'enable_dhcp': True, + u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', + u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'dns_nameservers': [], + u'ipv6_ra_mode': None, + u'allocation_pools': [{u'start': u'10.0.4.2', + u'end': u'10.0.4.254'}], + u'host_routes': [], + u'ipv6_address_mode': None, + u'ip_version': 4, + u'gateway_ip': u'10.0.4.1', + u'cidr': u'10.0.4.0/24', + u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, + {u'name': u'', + u'enable_dhcp': True, + u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', + u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', + u'dns_nameservers': [], + u'ipv6_ra_mode': None, + u'allocation_pools': [{u'start': u'10.0.5.2', + u'end': u'10.0.5.254'}], + u'host_routes': [], + u'ipv6_address_mode': None, + u'ip_version': 4, + u'gateway_ip': u'10.0.5.1', + u'cidr': u'10.0.5.0/24', + u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] + } +} + +NOTIFICATION_PORT_CREATE = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'port.create.end', + u'timestamp': u'2012-09-27 14:28:31.536370', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': { + u'port': { + u'status': u'ACTIVE', + u'name': u'', + u'admin_state_up': True, + u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', + u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'device_owner': u'', + u'mac_address': u'fa:16:3e:75:0c:49', + u'fixed_ips': [{ + u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', + u'ip_address': u'192.168.42.3'}], + u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', + u'device_id': u''}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:28:31.438919', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} + +NOTIFICATION_BULK_PORT_CREATE = { + u'_context_roles': [u'_member_', + u'SwiftOperator'], + u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', + u'_context_read_deleted': u'no', + u'event_type': u'port.create.end', + u'_context_project_name': u'demo', + u'timestamp': u'2014-05-0909: 19: 58.317548', + u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', + u'_context_timestamp': u'2014-05-0909: 19: 58.160011', + u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', + u'payload': { + u'ports': [{u'status': u'DOWN', + u'name': u'port--1501135095', + u'allowed_address_pairs': [], + u'admin_state_up': True, + u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', + u'tenant_id': u'133087d90fc149528b501dd8b75ea965', + u'binding: vnic_type': u'normal', + u'device_owner': u'', + u'mac_address': u'fa: 16: 3e: 37: 10: 39', + u'fixed_ips': [], + u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', + u'security_groups': [ + u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], + u'device_id': u''}, + {u'status': u'DOWN', + u'name': u'', + u'allowed_address_pairs': [], + u'admin_state_up': False, + u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', + u'tenant_id': u'133087d90fc149528b501dd8b75ea965', + u'binding: vnic_type': u'normal', + u'device_owner': u'', + u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', + u'fixed_ips': [], + u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', + u'security_groups': [ + u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], + u'device_id': u''}] + }, + u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', + u'_context_is_admin': False, + u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', + u'_context_tenant_name': u'demo', + u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', + u'_context_user_name': u'demo', + u'publisher_id': u'network.os-ci-test12', + u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', + u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', + u'priority': u'INFO' +} + +NOTIFICATION_PORT_UPDATE = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'port.update.end', + u'timestamp': u'2012-09-27 14:35:09.514052', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': { + u'port': { + u'status': u'ACTIVE', + u'name': u'bonjour', + u'admin_state_up': True, + u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', + u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'device_owner': u'', + u'mac_address': u'fa:16:3e:75:0c:49', + u'fixed_ips': [{ + u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', + u'ip_address': u'192.168.42.3'}], + u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', + u'device_id': u''}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:35:09.447682', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} + + +NOTIFICATION_NETWORK_EXISTS = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'network.exists', + u'timestamp': u'2012-09-27 14:11:27.086575', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': {u'network': + {u'status': u'ACTIVE', + u'subnets': [], + u'name': u'abcedf', + u'router:external': False, + u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'admin_state_up': True, + u'shared': False, + u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:11:26.924779', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} + + +NOTIFICATION_ROUTER_EXISTS = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'router.exists', + u'timestamp': u'2012-09-27 14:11:27.086575', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': {u'router': + {'status': u'ACTIVE', + 'external_gateway_info': + {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, + 'name': u'router1', + 'admin_state_up': True, + 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', + 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:11:26.924779', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} + + +NOTIFICATION_FLOATINGIP_EXISTS = { + u'_context_roles': [u'anotherrole', + u'Member'], + u'_context_read_deleted': u'no', + u'event_type': u'floatingip.exists', + u'timestamp': u'2012-09-27 14:11:27.086575', + u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', + u'payload': {u'floatingip': + {'router_id': None, + 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', + 'floating_network_id': + u'001400f7-1710-4245-98c3-39ba131cc39a', + 'fixed_ip_address': None, + 'floating_ip_address': u'172.24.4.227', + 'port_id': None, + 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, + u'priority': u'INFO', + u'_context_is_admin': False, + u'_context_timestamp': u'2012-09-27 14:11:26.924779', + u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', + u'publisher_id': u'network.ubuntu-VirtualBox', + u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} + + +NOTIFICATION_FLOATINGIP_UPDATE_START = { + '_context_roles': [u'_member_', + u'admin', + u'heat_stack_owner'], + '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', + '_context_read_deleted': u'no', + 'event_type': u'floatingip.update.start', + '_context_user_name': u'admin', + '_context_project_name': u'admin', + 'timestamp': u'2014-05-3107: 19: 43.463101', + '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', + '_context_tenant_name': u'admin', + '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', + 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', + 'priority': 'info', + '_context_is_admin': True, + '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', + '_context_timestamp': u'2014-05-3107: 19: 43.460767', + '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', + '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', + 'publisher_id': u'network.devstack', + 'payload': { + u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', + u'floatingip': { + u'fixed_ip_address': u'172.24.4.227', + u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' + } + } +} + + +NOTIFICATION_POOL_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", + "event_type": "pool.create.end", + "timestamp": "2014-09-15 17:20:50.687649", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "ce255443233748ce9cc71b480974df28", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "pool": { + "status": "ACTIVE", + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", "description": "", + "health_monitors": [], + "members": [], + "status_description": None, + "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", + "vip_id": None, + "name": "my_pool", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "health_monitors_status": [], + "provider": "haproxy"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:20:49.600299", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} + + +NOTIFICATION_VIP_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "vip.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "vip": { + "status": "ACTIVE", + "protocol": "HTTP", + "description": "", + "address": "10.0.0.2", + "protocol_port": 80, + "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", + "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", + "status_description": None, + "name": "my_vip", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "connection_limit": -1, + "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", + "session_persistence": {"type": "SOURCE_IP"}}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} + + +NOTIFICATION_HEALTH_MONITORS_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "health_monitor.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "health_monitor": { + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "delay": 10, + "max_retries": 10, + "timeout": 10, + "pools": [], + "type": "PING", + "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_MEMBERS_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "member.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "member": {"admin_state_up": True, + "status": "ACTIVE", + "status_description": None, + "weight": 1, + "address": "10.0.0.3", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "protocol_port": 80, + "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", + "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_FIREWALL_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall": { + "status": "ACTIVE", + "name": "my_firewall", + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", + "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_FIREWALL_RULE_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall_rule.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall_rule": { + "protocol": "tcp", + "description": "", + "source_port": 80, + "source_ip_address": '192.168.255.10', + "destination_ip_address": '10.10.10.1', + "firewall_policy_id": '', + "position": None, + "destination_port": 80, + "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", + "name": "rule_01", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "enabled": True, + "action": "allow", + "ip_version": 4, + "shared": False}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_FIREWALL_POLICY_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall_policy.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall_policy": {"name": "my_policy", + "firewall_rules": [], + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "audited": False, + "shared": False, + "id": "c46a1c15-0496-41c9-beff-9a309a25653e", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_VPNSERVICE_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "vpnservice.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", + "status": "ACTIVE", + "name": "my_vpn", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_IPSEC_POLICY_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ipsecpolicy.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ipsecpolicy": {"encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "lifetime": { + "units": "seconds", + "value": 3600}, + "name": "my_ipsec_polixy", + "transform_protocol": "esp", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "id": "998d910d-4506-47c9-a160-47ec51ff53fc", + "auth_algorithm": "sha1", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + + +NOTIFICATION_IKE_POLICY_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ikepolicy.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ikepolicy": {"encryption_algorithm": "aes-128", + "pfs": "group5", + "name": "my_ike_policy", + "phase1_negotiation_mode": "main", + "lifetime": {"units": "seconds", + "value": 3600}, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "ike_version": "v1", + "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", + "auth_algorithm": "sha1", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + + +NOTIFICATION_IPSEC_SITE_CONN_CREATE = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ipsec_site_connection.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ipsec_site_connection": { + "status": "ACTIVE", + "psk": "test", + "initiator": "bi-directional", + "name": "my_ipsec_connection", + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", + "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], + "mtu": 1500, + "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", + "dpd": {"action": "hold", + "interval": 30, + "timeout": 120}, + "route_mode": "static", + "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", + "peer_address": "10.0.0.1", + "peer_id": "10.0.0.254", + "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + + +NOTIFICATION_POOL_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", + "event_type": "pool.update.end", + "timestamp": "2014-09-15 17:20:50.687649", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "ce255443233748ce9cc71b480974df28", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "pool": { + "status": "ACTIVE", + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", "description": "", + "health_monitors": [], + "members": [], + "status_description": None, + "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", + "vip_id": None, + "name": "my_pool", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "health_monitors_status": [], + "provider": "haproxy"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:20:49.600299", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} + + +NOTIFICATION_VIP_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "vip.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "vip": { + "status": "ACTIVE", + "protocol": "HTTP", + "description": "", + "address": "10.0.0.2", + "protocol_port": 80, + "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", + "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", + "status_description": None, + "name": "my_vip", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "connection_limit": -1, + "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", + "session_persistence": {"type": "SOURCE_IP"}}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} + + +NOTIFICATION_HEALTH_MONITORS_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "health_monitor.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "health_monitor": { + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "delay": 10, + "max_retries": 10, + "timeout": 10, + "pools": [], + "type": "PING", + "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_MEMBERS_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "member.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "member": {"admin_state_up": True, + "status": "ACTIVE", + "status_description": None, + "weight": 1, + "address": "10.0.0.3", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "protocol_port": 80, + "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", + "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_FIREWALL_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall": { + "status": "ACTIVE", + "name": "my_firewall", + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", + "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_FIREWALL_RULE_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall_rule.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall_rule": { + "protocol": "tcp", + "description": "", + "source_port": 80, + "source_ip_address": '192.168.255.10', + "destination_ip_address": '10.10.10.1', + "firewall_policy_id": '', + "position": None, + "destination_port": 80, + "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", + "name": "rule_01", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "enabled": True, + "action": "allow", + "ip_version": 4, + "shared": False}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_FIREWALL_POLICY_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "firewall_policy.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "firewall_policy": {"name": "my_policy", + "firewall_rules": [], + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "audited": False, + "shared": False, + "id": "c46a1c15-0496-41c9-beff-9a309a25653e", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} + + +NOTIFICATION_VPNSERVICE_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "vpnservice.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", + "status": "ACTIVE", + "name": "my_vpn", + "admin_state_up": True, + "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +NOTIFICATION_IPSEC_POLICY_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ipsecpolicy.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ipsecpolicy": {"encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "lifetime": { + "units": "seconds", + "value": 3600}, + "name": "my_ipsec_polixy", + "transform_protocol": "esp", + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "id": "998d910d-4506-47c9-a160-47ec51ff53fc", + "auth_algorithm": "sha1", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + + +NOTIFICATION_IKE_POLICY_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ikepolicy.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ikepolicy": {"encryption_algorithm": "aes-128", + "pfs": "group5", + "name": "my_ike_policy", + "phase1_negotiation_mode": "main", + "lifetime": {"units": "seconds", + "value": 3600}, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "ike_version": "v1", + "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", + "auth_algorithm": "sha1", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + + +NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { + "_context_roles": ["admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "ipsec_site_connection.update.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "ipsec_site_connection": { + "status": "ACTIVE", + "psk": "test", + "initiator": "bi-directional", + "name": "my_ipsec_connection", + "admin_state_up": True, + "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", + "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], + "mtu": 1500, + "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", + "dpd": {"action": "hold", + "interval": 30, + "timeout": 120}, + "route_mode": "static", + "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", + "peer_address": "10.0.0.1", + "peer_id": "10.0.0.254", + "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", + "description": ""}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} + +NOTIFICATION_EMPTY_PAYLOAD = { + "_context_roles": ["heat_stack_owner", "admin"], + "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", + "event_type": "health_monitor.create.end", + "timestamp": "2014-09-15 17:22:11.323644", + "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", + "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", + "_context_tenant_name": "demo", + "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", + "payload": { + "health_monitor": {}}, + "_context_project_name": "demo", + "_context_read_deleted": "no", + "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", + "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", + "priority": "INFO", + "_context_is_admin": True, + "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", + "_context_timestamp": "2014-09-15 17:22:11.187163", + "_context_user_name": "admin", + "publisher_id": "network.ubuntu", + "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} + + +class TestNotifications(test.BaseTestCase): + def test_network_create(self): + v = notifications.Network(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.create", samples[1].name) + + def test_bulk_network_create(self): + v = notifications.Network(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_BULK_NETWORK_CREATE)) + self.assertEqual(4, len(samples)) + self.assertEqual("network", samples[0].name) + self.assertEqual("network.create", samples[1].name) + self.assertEqual("network", samples[2].name) + self.assertEqual("network.create", samples[3].name) + + def test_subnet_create(self): + v = notifications.Subnet(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("subnet.create", samples[1].name) + + def test_bulk_subnet_create(self): + v = notifications.Subnet(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) + self.assertEqual(4, len(samples)) + self.assertEqual("subnet", samples[0].name) + self.assertEqual("subnet.create", samples[1].name) + self.assertEqual("subnet", samples[2].name) + self.assertEqual("subnet.create", samples[3].name) + + def test_port_create(self): + v = notifications.Port(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("port.create", samples[1].name) + + def test_bulk_port_create(self): + v = notifications.Port(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) + self.assertEqual(4, len(samples)) + self.assertEqual("port", samples[0].name) + self.assertEqual("port.create", samples[1].name) + self.assertEqual("port", samples[2].name) + self.assertEqual("port.create", samples[3].name) + + def test_port_update(self): + v = notifications.Port(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("port.update", samples[1].name) + + def test_network_exists(self): + v = notifications.Network(mock.Mock()) + samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) + self.assertEqual(1, len(list(samples))) + + def test_router_exists(self): + v = notifications.Router(mock.Mock()) + samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) + self.assertEqual(1, len(list(samples))) + + def test_floatingip_exists(self): + v = notifications.FloatingIP(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) + self.assertEqual(1, len(samples)) + self.assertEqual("ip.floating", samples[0].name) + + def test_floatingip_update(self): + v = notifications.FloatingIP(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_FLOATINGIP_UPDATE_START)) + self.assertEqual(len(samples), 2) + self.assertEqual("ip.floating", samples[0].name) + + def test_pool_create(self): + v = notifications.Pool(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.pool", samples[0].name) + + def test_vip_create(self): + v = notifications.Vip(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.vip", samples[0].name) + + def test_member_create(self): + v = notifications.Member(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.member", samples[0].name) + + def test_health_monitor_create(self): + v = notifications.HealthMonitor(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_HEALTH_MONITORS_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.health_monitor", samples[0].name) + + def test_firewall_create(self): + v = notifications.Firewall(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall", samples[0].name) + + def test_vpnservice_create(self): + v = notifications.VPNService(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn", samples[0].name) + + def test_ipsec_connection_create(self): + v = notifications.IPSecSiteConnection(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IPSEC_SITE_CONN_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.connections", samples[0].name) + + def test_firewall_policy_create(self): + v = notifications.FirewallPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_FIREWALL_POLICY_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall.policy", samples[0].name) + + def test_firewall_rule_create(self): + v = notifications.FirewallRule(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_FIREWALL_RULE_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall.rule", samples[0].name) + + def test_ipsec_policy_create(self): + v = notifications.IPSecPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IPSEC_POLICY_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) + + def test_ike_policy_create(self): + v = notifications.IKEPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IKE_POLICY_CREATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) + + def test_pool_update(self): + v = notifications.Pool(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.pool", samples[0].name) + + def test_vip_update(self): + v = notifications.Vip(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.vip", samples[0].name) + + def test_member_update(self): + v = notifications.Member(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.member", samples[0].name) + + def test_health_monitor_update(self): + v = notifications.HealthMonitor(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_HEALTH_MONITORS_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.lb.health_monitor", samples[0].name) + + def test_firewall_update(self): + v = notifications.Firewall(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall", samples[0].name) + + def test_vpnservice_update(self): + v = notifications.VPNService(mock.Mock()) + samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn", samples[0].name) + + def test_ipsec_connection_update(self): + v = notifications.IPSecSiteConnection(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.connections", samples[0].name) + + def test_firewall_policy_update(self): + v = notifications.FirewallPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_FIREWALL_POLICY_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall.policy", samples[0].name) + + def test_firewall_rule_update(self): + v = notifications.FirewallRule(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_FIREWALL_RULE_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.firewall.rule", samples[0].name) + + def test_ipsec_policy_update(self): + v = notifications.IPSecPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IPSEC_POLICY_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) + + def test_ike_policy_update(self): + v = notifications.IKEPolicy(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_IKE_POLICY_UPDATE)) + self.assertEqual(2, len(samples)) + self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) + + def test_empty_event_payload(self): + v = notifications.HealthMonitor(mock.Mock()) + samples = list(v.process_notification( + NOTIFICATION_EMPTY_PAYLOAD)) + self.assertEqual(0, len(samples)) + + +class TestEventTypes(test.BaseTestCase): + + def test_network(self): + v = notifications.Network(mock.Mock()) + events = v.event_types + self.assertIsNotEmpty(events) + + def test_subnet(self): + v = notifications.Subnet(mock.Mock()) + events = v.event_types + self.assertIsNotEmpty(events) + + def test_port(self): + v = notifications.Port(mock.Mock()) + events = v.event_types + self.assertIsNotEmpty(events) + + def test_router(self): + self.assertTrue(notifications.Router(mock.Mock()).event_types) + + def test_floatingip(self): + self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) + + def test_pool(self): + self.assertTrue(notifications.Pool(mock.Mock()).event_types) + + def test_vip(self): + self.assertTrue(notifications.Vip(mock.Mock()).event_types) + + def test_member(self): + self.assertTrue(notifications.Member(mock.Mock()).event_types) + + def test_health_monitor(self): + self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) + + def test_firewall(self): + self.assertTrue(notifications.Firewall(mock.Mock()).event_types) + + def test_vpnservice(self): + self.assertTrue(notifications.VPNService(mock.Mock()).event_types) + + def test_ipsec_connection(self): + self.assertTrue(notifications.IPSecSiteConnection( + mock.Mock()).event_types) + + def test_firewall_policy(self): + self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) + + def test_firewall_rule(self): + self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) + + def test_ipsec_policy(self): + self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) + + def test_ike_policy(self): + self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_rgw_client.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_rgw_client.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_rgw_client.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# +# Copyright (C) 2015 Reliance Jio Infocomm Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import mock +from oslotest import base + +from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed +from ceilometer.objectstore.rgw_client import RGWAdminClient + + +RGW_ADMIN_BUCKETS = ''' +[ + { + "max_marker": "", + "ver": 2001, + "usage": { + "rgw.main": { + "size_kb_actual": 16000, + "num_objects": 1000, + "size_kb": 1000 + } + }, + "bucket": "somefoo", + "owner": "admin", + "master_ver": 0, + "mtime": 1420176126, + "marker": "default.4126.1", + "bucket_quota": { + "max_objects": -1, + "enabled": false, + "max_size_kb": -1 + }, + "id": "default.4126.1", + "pool": ".rgw.buckets", + "index_pool": ".rgw.buckets.index" + }, + { + "max_marker": "", + "ver": 3, + "usage": { + "rgw.main": { + "size_kb_actual": 43, + "num_objects": 1, + "size_kb": 42 + } + }, + "bucket": "somefoo31", + "owner": "admin", + "master_ver": 0, + "mtime": 1420176134, + "marker": "default.4126.5", + "bucket_quota": { + "max_objects": -1, + "enabled": false, + "max_size_kb": -1 + }, + "id": "default.4126.5", + "pool": ".rgw.buckets", + "index_pool": ".rgw.buckets.index" + } +]''' + +RGW_ADMIN_USAGE = ''' +{ "entries": [ + { "owner": "5f7fe2d5352e466f948f49341e33d107", + "buckets": [ + { "bucket": "", + "time": "2015-01-23 09:00:00.000000Z", + "epoch": 1422003600, + "categories": [ + { "category": "list_buckets", + "bytes_sent": 46, + "bytes_received": 0, + "ops": 3, + "successful_ops": 3}, + { "category": "stat_account", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 1}]}, + { "bucket": "foodsgh", + "time": "2015-01-23 09:00:00.000000Z", + "epoch": 1422003600, + "categories": [ + { "category": "create_bucket", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 1}, + { "category": "get_obj", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 0}, + { "category": "put_obj", + "bytes_sent": 0, + "bytes_received": 238, + "ops": 1, + "successful_ops": 1}]}]}], + "summary": [ + { "user": "5f7fe2d5352e466f948f49341e33d107", + "categories": [ + { "category": "create_bucket", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 1}, + { "category": "get_obj", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 0}, + { "category": "list_buckets", + "bytes_sent": 46, + "bytes_received": 0, + "ops": 3, + "successful_ops": 3}, + { "category": "put_obj", + "bytes_sent": 0, + "bytes_received": 238, + "ops": 1, + "successful_ops": 1}, + { "category": "stat_account", + "bytes_sent": 0, + "bytes_received": 0, + "ops": 1, + "successful_ops": 1}], + "total": { "bytes_sent": 46, + "bytes_received": 238, + "ops": 7, + "successful_ops": 6}}]} +''' + +buckets_json = json.loads(RGW_ADMIN_BUCKETS) +usage_json = json.loads(RGW_ADMIN_USAGE) + + +class TestRGWAdminClient(base.BaseTestCase): + + def setUp(self): + super(TestRGWAdminClient, self).setUp() + self.client = RGWAdminClient('http://127.0.0.1:8080/admin', + 'abcde', 'secret') + self.get_resp = mock.MagicMock() + self.get = mock.patch('requests.get', + return_value=self.get_resp).start() + + def test_make_request_exception(self): + self.get_resp.status_code = 403 + self.assertRaises(RGWAdminAPIFailed, self.client._make_request, + *('foo', {})) + + def test_make_request(self): + self.get_resp.status_code = 200 + self.get_resp.json.return_value = buckets_json + actual = self.client._make_request('foo', []) + self.assertEqual(buckets_json, actual) + + def test_get_buckets(self): + self.get_resp.status_code = 200 + self.get_resp.json.return_value = buckets_json + actual = self.client.get_bucket('foo') + bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), + RGWAdminClient.Bucket('somefoo31', 1, 42), + ] + expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, + 'buckets': bucket_list} + self.assertEqual(expected, actual) + + def test_get_usage(self): + self.get_resp.status_code = 200 + self.get_resp.json.return_value = usage_json + actual = self.client.get_usage('foo') + expected = 7 + self.assertEqual(expected, actual) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_rgw.py ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_rgw.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_rgw.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_rgw.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,181 @@ +#!/usr/bin/env python +# +# Copyright 2015 Reliance Jio Infocomm Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from keystoneclient import exceptions +import mock +from oslotest import base +from oslotest import mockpatch +import testscenarios.testcase + +from ceilometer.agent import manager +from ceilometer.objectstore import rgw +from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client + +bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] +bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] +bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] + +GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, + 'num_objects': 1001, 'buckets': bucket_list1}), + ('tenant-001', {'num_buckets': 2, 'size': 1042, + 'num_objects': 1001, 'buckets': bucket_list2}), + ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, + 'num_objects': 1001, + 'buckets': bucket_list3})] + +GET_USAGE = [('tenant-000', 10), + ('tenant-001', 11), + ('tenant-002-ignored', 12)] + +Tenant = collections.namedtuple('Tenant', 'id') +ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] + + +class TestManager(manager.AgentManager): + + def __init__(self): + super(TestManager, self).__init__() + self.keystone = mock.MagicMock() + self.keystone.service_catalog.url_for.return_value = '/endpoint' + + +class TestRgwPollster(testscenarios.testcase.WithScenarios, + base.BaseTestCase): + + # Define scenarios to run all of the tests against all of the + # pollsters. + scenarios = [ + ('radosgw.objects', + {'factory': rgw.ObjectsPollster}), + ('radosgw.objects.size', + {'factory': rgw.ObjectsSizePollster}), + ('radosgw.objects.containers', + {'factory': rgw.ObjectsContainersPollster}), + ('radosgw.containers.objects', + {'factory': rgw.ContainersObjectsPollster}), + ('radosgw.containers.objects.size', + {'factory': rgw.ContainersSizePollster}), + ('radosgw.api.request', + {'factory': rgw.UsagePollster}), + ] + + @staticmethod + def fake_ks_service_catalog_url_for(*args, **kwargs): + raise exceptions.EndpointNotFound("Fake keystone exception") + + def fake_iter_accounts(self, ksclient, cache, tenants): + tenant_ids = [t.id for t in tenants] + for i in self.ACCOUNTS: + if i[0] in tenant_ids: + yield i + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(TestRgwPollster, self).setUp() + self.pollster = self.factory() + self.manager = TestManager() + + if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': + self.ACCOUNTS = GET_BUCKETS + else: + self.ACCOUNTS = GET_USAGE + + def tearDown(self): + super(TestRgwPollster, self).tearDown() + rgw._Base._ENDPOINT = None + + def test_iter_accounts_no_cache(self): + cache = {} + with mockpatch.PatchObject(self.factory, '_get_account_info', + return_value=[]): + data = list(self.pollster._iter_accounts(mock.Mock(), cache, + ASSIGNED_TENANTS)) + + self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) + self.assertEqual([], data) + + def test_iter_accounts_cached(self): + # Verify that if a method has already been called, _iter_accounts + # uses the cached version and doesn't call rgw_clinet. + mock_method = mock.Mock() + mock_method.side_effect = AssertionError( + 'should not be called', + ) + + api_method = 'get_%s' % self.pollster.METHOD + + with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): + cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} + data = list(self.pollster._iter_accounts(mock.Mock(), cache, + ASSIGNED_TENANTS)) + self.assertEqual([self.ACCOUNTS[0]], data) + + def test_metering(self): + with mockpatch.PatchObject(self.factory, '_iter_accounts', + side_effect=self.fake_iter_accounts): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(2, len(samples), self.pollster.__class__) + + def test_get_meter_names(self): + with mockpatch.PatchObject(self.factory, '_iter_accounts', + side_effect=self.fake_iter_accounts): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(set([samples[0].name]), + set([s.name for s in samples])) + + def test_only_poll_assigned(self): + mock_method = mock.MagicMock() + endpoint = 'http://127.0.0.1:8000/admin' + api_method = 'get_%s' % self.pollster.METHOD + with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + return_value=endpoint): + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + expected = [mock.call(t.id) + for t in ASSIGNED_TENANTS] + self.assertEqual(expected, mock_method.call_args_list) + + def test_get_endpoint_only_once(self): + mock_url_for = mock.MagicMock() + mock_url_for.return_value = '/endpoint' + api_method = 'get_%s' % self.pollster.METHOD + with mockpatch.PatchObject(rgw_client, api_method, + new=mock.MagicMock()): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + new=mock_url_for): + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + self.assertEqual(1, mock_url_for.call_count) + + def test_endpoint_notfound(self): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + side_effect=self.fake_ks_service_catalog_url_for): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(0, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_swift.py ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_swift.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/objectstore/test_swift.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/objectstore/test_swift.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,213 @@ +#!/usr/bin/env python +# +# Copyright 2012 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from keystoneclient import exceptions +import mock +from oslotest import base +from oslotest import mockpatch +from swiftclient import client as swift_client +import testscenarios.testcase + +from ceilometer.agent import manager +from ceilometer.objectstore import swift + +HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, + 'x-account-bytes-used': 321321321, + 'x-account-container-count': 7, + }), + ('tenant-001', {'x-account-object-count': 34, + 'x-account-bytes-used': 9898989898, + 'x-account-container-count': 17, + }), + ('tenant-002-ignored', {'x-account-object-count': 34, + 'x-account-bytes-used': 9898989898, + 'x-account-container-count': 17, + })] + +GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, + 'x-account-bytes-used': 123123, + 'x-account-container-count': 2, + }, + [{'count': 10, + 'bytes': 123123, + 'name': 'my_container'}, + {'count': 0, + 'bytes': 0, + 'name': 'new_container' + }])), + ('tenant-001', ({'x-account-object-count': 0, + 'x-account-bytes-used': 0, + 'x-account-container-count': 0, + }, [])), + ('tenant-002-ignored', ({'x-account-object-count': 0, + 'x-account-bytes-used': 0, + 'x-account-container-count': 0, + }, []))] + +Tenant = collections.namedtuple('Tenant', 'id') +ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] + + +class TestManager(manager.AgentManager): + + def __init__(self): + super(TestManager, self).__init__() + self.keystone = mock.MagicMock() + + +class TestSwiftPollster(testscenarios.testcase.WithScenarios, + base.BaseTestCase): + + # Define scenarios to run all of the tests against all of the + # pollsters. + scenarios = [ + ('storage.objects', + {'factory': swift.ObjectsPollster}), + ('storage.objects.size', + {'factory': swift.ObjectsSizePollster}), + ('storage.objects.containers', + {'factory': swift.ObjectsContainersPollster}), + ('storage.containers.objects', + {'factory': swift.ContainersObjectsPollster}), + ('storage.containers.objects.size', + {'factory': swift.ContainersSizePollster}), + ] + + @staticmethod + def fake_ks_service_catalog_url_for(*args, **kwargs): + raise exceptions.EndpointNotFound("Fake keystone exception") + + def fake_iter_accounts(self, ksclient, cache, tenants): + tenant_ids = [t.id for t in tenants] + for i in self.ACCOUNTS: + if i[0] in tenant_ids: + yield i + + @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) + def setUp(self): + super(TestSwiftPollster, self).setUp() + self.pollster = self.factory() + self.manager = TestManager() + + if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': + self.ACCOUNTS = HEAD_ACCOUNTS + else: + self.ACCOUNTS = GET_ACCOUNTS + + def tearDown(self): + super(TestSwiftPollster, self).tearDown() + swift._Base._ENDPOINT = None + + def test_iter_accounts_no_cache(self): + cache = {} + with mockpatch.PatchObject(self.factory, '_get_account_info', + return_value=[]): + data = list(self.pollster._iter_accounts(mock.Mock(), cache, + ASSIGNED_TENANTS)) + + self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) + self.assertEqual([], data) + + def test_iter_accounts_cached(self): + # Verify that if a method has already been called, _iter_accounts + # uses the cached version and doesn't call swiftclient. + mock_method = mock.Mock() + mock_method.side_effect = AssertionError( + 'should not be called', + ) + + api_method = '%s_account' % self.pollster.METHOD + with mockpatch.PatchObject(swift_client, api_method, new=mock_method): + with mockpatch.PatchObject(self.factory, '_neaten_url'): + cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} + data = list(self.pollster._iter_accounts(mock.Mock(), cache, + ASSIGNED_TENANTS)) + self.assertEqual([self.ACCOUNTS[0]], data) + + def test_neaten_url(self): + test_endpoints = ['http://127.0.0.1:8080', + 'http://127.0.0.1:8080/swift'] + test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' + for test_endpoint in test_endpoints: + standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id + + url = swift._Base._neaten_url(test_endpoint, test_tenant_id) + self.assertEqual(standard_url, url) + url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) + self.assertEqual(standard_url, url) + url = swift._Base._neaten_url(test_endpoint + '/v1', + test_tenant_id) + self.assertEqual(standard_url, url) + url = swift._Base._neaten_url(standard_url, test_tenant_id) + self.assertEqual(standard_url, url) + + def test_metering(self): + with mockpatch.PatchObject(self.factory, '_iter_accounts', + side_effect=self.fake_iter_accounts): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(2, len(samples), self.pollster.__class__) + + def test_get_meter_names(self): + with mockpatch.PatchObject(self.factory, '_iter_accounts', + side_effect=self.fake_iter_accounts): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(set([samples[0].name]), + set([s.name for s in samples])) + + def test_only_poll_assigned(self): + mock_method = mock.MagicMock() + endpoint = 'end://point/' + api_method = '%s_account' % self.pollster.METHOD + with mockpatch.PatchObject(swift_client, api_method, new=mock_method): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + return_value=endpoint): + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), + self.manager.keystone.auth_token) + for t in ASSIGNED_TENANTS] + self.assertEqual(expected, mock_method.call_args_list) + + def test_get_endpoint_only_once(self): + endpoint = 'end://point/' + mock_url_for = mock.MagicMock(return_value=endpoint) + api_method = '%s_account' % self.pollster.METHOD + with mockpatch.PatchObject(swift_client, api_method, + new=mock.MagicMock()): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + new=mock_url_for): + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + self.assertEqual(1, mock_url_for.call_count) + + def test_endpoint_notfound(self): + with mockpatch.PatchObject( + self.manager.keystone.service_catalog, 'url_for', + side_effect=self.fake_ks_service_catalog_url_for): + samples = list(self.pollster.get_samples(self.manager, {}, + ASSIGNED_TENANTS)) + + self.assertEqual(0, len(samples)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_file.py ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_file.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_file.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_file.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,120 @@ +# +# Copyright 2013-2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/file.py +""" + +import datetime +import logging.handlers +import os +import tempfile + +from oslo_utils import netutils +from oslotest import base + +from ceilometer.publisher import file +from ceilometer import sample + + +class TestFilePublisher(base.BaseTestCase): + + test_data = [ + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + ] + + def test_file_publisher_maxbytes(self): + # Test valid configurations + tempdir = tempfile.mkdtemp() + name = '%s/log_file' % tempdir + parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' + % name) + publisher = file.FilePublisher(parsed_url) + publisher.publish_samples(None, + self.test_data) + + handler = publisher.publisher_logger.handlers[0] + self.assertIsInstance(handler, + logging.handlers.RotatingFileHandler) + self.assertEqual([50, name, 3], [handler.maxBytes, + handler.baseFilename, + handler.backupCount]) + # The rotating file gets created since only allow 50 bytes. + self.assertTrue(os.path.exists('%s.1' % name)) + + def test_file_publisher(self): + # Test missing max bytes, backup count configurations + tempdir = tempfile.mkdtemp() + name = '%s/log_file_plain' % tempdir + parsed_url = netutils.urlsplit('file://%s' % name) + publisher = file.FilePublisher(parsed_url) + publisher.publish_samples(None, + self.test_data) + + handler = publisher.publisher_logger.handlers[0] + self.assertIsInstance(handler, + logging.handlers.RotatingFileHandler) + self.assertEqual([0, name, 0], [handler.maxBytes, + handler.baseFilename, + handler.backupCount]) + # Test the content is corrected saved in the file + self.assertTrue(os.path.exists(name)) + with open(name, 'r') as f: + content = f.read() + for sample_item in self.test_data: + self.assertIn(sample_item.id, content) + self.assertIn(sample_item.timestamp, content) + + def test_file_publisher_invalid(self): + # Test invalid max bytes, backup count configurations + tempdir = tempfile.mkdtemp() + parsed_url = netutils.urlsplit( + 'file://%s/log_file_bad' + '?max_bytes=yus&backup_count=5y' % tempdir) + publisher = file.FilePublisher(parsed_url) + publisher.publish_samples(None, + self.test_data) + + self.assertIsNone(publisher.publisher_logger) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,210 @@ +# +# Copyright 2015 Cisco Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/kafka_broker.py +""" +import datetime +import uuid + +import mock +from oslo_utils import netutils + +from ceilometer.event.storage import models as event +from ceilometer.publisher import kafka_broker as kafka +from ceilometer.publisher import messaging as msg_publisher +from ceilometer import sample +from ceilometer.tests import base as tests_base + + +@mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) +@mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', + mock.Mock()) +class TestKafkaPublisher(tests_base.BaseTestCase): + test_event_data = [ + event.Event(message_id=uuid.uuid4(), + event_type='event_%d' % i, + generated=datetime.datetime.utcnow(), + traits=[], raw={}) + for i in range(0, 5) + ] + + test_data = [ + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test3', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + ] + + def test_publish(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + + def test_publish_without_options(self): + publisher = kafka.KafkaBrokerPublisher( + netutils.urlsplit('kafka://127.0.0.1:9092')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + + def test_publish_to_host_without_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer')) + self.assertEqual('default', publisher.policy) + + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) + self.assertEqual('default', publisher.policy) + + def test_publish_to_host_with_default_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = TypeError + self.assertRaises(msg_publisher.DeliveryFailure, + publisher.publish_samples, + mock.MagicMock(), self.test_data) + self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + + def test_publish_to_host_with_drop_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = Exception("test") + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + + def test_publish_to_host_with_queue_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = Exception("test") + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(1, len(publisher.local_queue)) + + def test_publish_to_down_host_with_default_queue_size(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = Exception("test") + + for i in range(0, 2000): + for s in self.test_data: + s.name = 'test-%d' % i + publisher.publish_samples(mock.MagicMock(), self.test_data) + + self.assertEqual(1024, len(publisher.local_queue)) + self.assertEqual('test-976', + publisher.local_queue[0][2][0]['counter_name']) + self.assertEqual('test-1999', + publisher.local_queue[1023][2][0]['counter_name']) + + def test_publish_to_host_from_down_to_up_with_queue(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = Exception("test") + for i in range(0, 16): + for s in self.test_data: + s.name = 'test-%d' % i + publisher.publish_samples(mock.MagicMock(), self.test_data) + + self.assertEqual(16, len(publisher.local_queue)) + + fake_producer.send_messages.side_effect = None + for s in self.test_data: + s.name = 'test-%d' % 16 + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(0, len(publisher.local_queue)) + + def test_publish_event_with_default_policy(self): + publisher = kafka.KafkaBrokerPublisher( + netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) + + with mock.patch.object(publisher, '_producer') as fake_producer: + publisher.publish_events(mock.MagicMock(), self.test_event_data) + self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) + + with mock.patch.object(publisher, '_producer') as fake_producer: + fake_producer.send_messages.side_effect = Exception("test") + self.assertRaises(msg_publisher.DeliveryFailure, + publisher.publish_events, + mock.MagicMock(), self.test_event_data) + self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_messaging_publisher.py ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_messaging_publisher.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_messaging_publisher.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_messaging_publisher.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,400 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/messaging.py +""" +import datetime +import uuid + +import eventlet +import mock +from oslo_config import fixture as fixture_config +from oslo_context import context +from oslo_utils import netutils +import testscenarios.testcase + +from ceilometer.event.storage import models as event +from ceilometer import messaging +from ceilometer.publisher import messaging as msg_publisher +from ceilometer import sample +from ceilometer.tests import base as tests_base + + +class BasePublisherTestCase(tests_base.BaseTestCase): + test_event_data = [ + event.Event(message_id=uuid.uuid4(), + event_type='event_%d' % i, + generated=datetime.datetime.utcnow(), + traits=[], raw={}) + for i in range(0, 5) + ] + + test_sample_data = [ + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='test3', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + ] + + def setUp(self): + super(BasePublisherTestCase, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + + +class RpcOnlyPublisherTest(BasePublisherTestCase): + def test_published_no_mock(self): + publisher = msg_publisher.RPCPublisher( + netutils.urlsplit('rpc://')) + + endpoint = mock.MagicMock(['record_metering_data']) + collector = messaging.get_rpc_server( + self.transport, self.CONF.publisher_rpc.metering_topic, endpoint) + endpoint.record_metering_data.side_effect = (lambda *args, **kwds: + collector.stop()) + + collector.start() + eventlet.sleep() + publisher.publish_samples(context.RequestContext(), + self.test_sample_data) + collector.wait() + + class Matcher(object): + @staticmethod + def __eq__(data): + for i, sample_item in enumerate(data): + if (sample_item['counter_name'] != + self.test_sample_data[i].name): + return False + return True + + endpoint.record_metering_data.assert_called_once_with( + mock.ANY, data=Matcher()) + + def test_publish_target(self): + publisher = msg_publisher.RPCPublisher( + netutils.urlsplit('rpc://?target=custom_procedure_call')) + cast_context = mock.MagicMock() + with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: + prepare.return_value = cast_context + publisher.publish_samples(mock.MagicMock(), + self.test_sample_data) + + prepare.assert_called_once_with( + topic=self.CONF.publisher_rpc.metering_topic) + cast_context.cast.assert_called_once_with( + mock.ANY, 'custom_procedure_call', data=mock.ANY) + + def test_published_with_per_meter_topic(self): + publisher = msg_publisher.RPCPublisher( + netutils.urlsplit('rpc://?per_meter_topic=1')) + with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: + publisher.publish_samples(mock.MagicMock(), + self.test_sample_data) + + class MeterGroupMatcher(object): + def __eq__(self, meters): + return len(set(meter['counter_name'] + for meter in meters)) == 1 + + topic = self.CONF.publisher_rpc.metering_topic + expected = [mock.call(topic=topic), + mock.call().cast(mock.ANY, 'record_metering_data', + data=mock.ANY), + mock.call(topic=topic + '.test'), + mock.call().cast(mock.ANY, 'record_metering_data', + data=MeterGroupMatcher()), + mock.call(topic=topic + '.test2'), + mock.call().cast(mock.ANY, 'record_metering_data', + data=MeterGroupMatcher()), + mock.call(topic=topic + '.test3'), + mock.call().cast(mock.ANY, 'record_metering_data', + data=MeterGroupMatcher())] + self.assertEqual(expected, prepare.mock_calls) + + +class NotifierOnlyPublisherTest(BasePublisherTestCase): + + @mock.patch('oslo_messaging.Notifier') + def test_publish_topic_override(self, notifier): + msg_publisher.SampleNotifierPublisher( + netutils.urlsplit('notifier://?topic=custom_topic')) + notifier.assert_called_with(mock.ANY, topic='custom_topic', + driver=mock.ANY, retry=mock.ANY, + publisher_id=mock.ANY) + + msg_publisher.EventNotifierPublisher( + netutils.urlsplit('notifier://?topic=custom_event_topic')) + notifier.assert_called_with(mock.ANY, topic='custom_event_topic', + driver=mock.ANY, retry=mock.ANY, + publisher_id=mock.ANY) + + +class TestPublisher(testscenarios.testcase.WithScenarios, + BasePublisherTestCase): + scenarios = [ + ('notifier', + dict(protocol="notifier", + publisher_cls=msg_publisher.SampleNotifierPublisher, + test_data=BasePublisherTestCase.test_sample_data, + pub_func='publish_samples', attr='source')), + ('event_notifier', + dict(protocol="notifier", + publisher_cls=msg_publisher.EventNotifierPublisher, + test_data=BasePublisherTestCase.test_event_data, + pub_func='publish_events', attr='event_type')), + ('rpc', dict(protocol="rpc", + publisher_cls=msg_publisher.RPCPublisher, + test_data=BasePublisherTestCase.test_sample_data, + pub_func='publish_samples', attr='source')), + ] + + def setUp(self): + super(TestPublisher, self).setUp() + self.topic = (self.CONF.publisher_notifier.event_topic + if self.pub_func == 'publish_events' else + self.CONF.publisher_rpc.metering_topic) + + +class TestPublisherPolicy(TestPublisher): + def test_published_concurrency(self): + """Test concurrent access to the local queue of the rpc publisher.""" + + publisher = self.publisher_cls( + netutils.urlsplit('%s://' % self.protocol)) + + with mock.patch.object(publisher, '_send') as fake_send: + def fake_send_wait(ctxt, topic, meters): + fake_send.side_effect = mock.Mock() + # Sleep to simulate concurrency and allow other threads to work + eventlet.sleep(0) + + fake_send.side_effect = fake_send_wait + + job1 = eventlet.spawn(getattr(publisher, self.pub_func), + mock.MagicMock(), self.test_data) + job2 = eventlet.spawn(getattr(publisher, self.pub_func), + mock.MagicMock(), self.test_data) + + job1.wait() + job2.wait() + + self.assertEqual('default', publisher.policy) + self.assertEqual(2, len(fake_send.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + + @mock.patch('ceilometer.publisher.messaging.LOG') + def test_published_with_no_policy(self, mylog): + publisher = self.publisher_cls( + netutils.urlsplit('%s://' % self.protocol)) + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + self.assertRaises( + msg_publisher.DeliveryFailure, + getattr(publisher, self.pub_func), + mock.MagicMock(), self.test_data) + self.assertTrue(mylog.info.called) + self.assertEqual('default', publisher.policy) + self.assertEqual(0, len(publisher.local_queue)) + fake_send.assert_called_once_with( + mock.ANY, self.topic, mock.ANY) + + @mock.patch('ceilometer.publisher.messaging.LOG') + def test_published_with_policy_block(self, mylog): + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=default' % self.protocol)) + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + self.assertRaises( + msg_publisher.DeliveryFailure, + getattr(publisher, self.pub_func), + mock.MagicMock(), self.test_data) + self.assertTrue(mylog.info.called) + self.assertEqual(0, len(publisher.local_queue)) + fake_send.assert_called_once_with( + mock.ANY, self.topic, mock.ANY) + + @mock.patch('ceilometer.publisher.messaging.LOG') + def test_published_with_policy_incorrect(self, mylog): + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=notexist' % self.protocol)) + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + self.assertRaises( + msg_publisher.DeliveryFailure, + getattr(publisher, self.pub_func), + mock.MagicMock(), self.test_data) + self.assertTrue(mylog.warn.called) + self.assertEqual('default', publisher.policy) + self.assertEqual(0, len(publisher.local_queue)) + fake_send.assert_called_once_with( + mock.ANY, self.topic, mock.ANY) + + +@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) +class TestPublisherPolicyReactions(TestPublisher): + + def test_published_with_policy_drop_and_rpc_down(self): + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=drop' % self.protocol)) + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + self.assertEqual(0, len(publisher.local_queue)) + fake_send.assert_called_once_with( + mock.ANY, self.topic, mock.ANY) + + def test_published_with_policy_queue_and_rpc_down(self): + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=queue' % self.protocol)) + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + self.assertEqual(1, len(publisher.local_queue)) + fake_send.assert_called_once_with( + mock.ANY, self.topic, mock.ANY) + + def test_published_with_policy_queue_and_rpc_down_up(self): + self.rpc_unreachable = True + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=queue' % self.protocol)) + + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + + self.assertEqual(1, len(publisher.local_queue)) + + fake_send.side_effect = mock.MagicMock() + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + + self.assertEqual(0, len(publisher.local_queue)) + + topic = self.topic + expected = [mock.call(mock.ANY, topic, mock.ANY), + mock.call(mock.ANY, topic, mock.ANY), + mock.call(mock.ANY, topic, mock.ANY)] + self.assertEqual(expected, fake_send.mock_calls) + + def test_published_with_policy_sized_queue_and_rpc_down(self): + publisher = self.publisher_cls(netutils.urlsplit( + '%s://?policy=queue&max_queue_length=3' % self.protocol)) + + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + for i in range(0, 5): + for s in self.test_data: + setattr(s, self.attr, 'test-%d' % i) + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + + self.assertEqual(3, len(publisher.local_queue)) + self.assertEqual( + 'test-2', + publisher.local_queue[0][2][0][self.attr] + ) + self.assertEqual( + 'test-3', + publisher.local_queue[1][2][0][self.attr] + ) + self.assertEqual( + 'test-4', + publisher.local_queue[2][2][0][self.attr] + ) + + def test_published_with_policy_default_sized_queue_and_rpc_down(self): + publisher = self.publisher_cls( + netutils.urlsplit('%s://?policy=queue' % self.protocol)) + + side_effect = msg_publisher.DeliveryFailure() + with mock.patch.object(publisher, '_send') as fake_send: + fake_send.side_effect = side_effect + for i in range(0, 2000): + for s in self.test_data: + setattr(s, self.attr, 'test-%d' % i) + getattr(publisher, self.pub_func)(mock.MagicMock(), + self.test_data) + + self.assertEqual(1024, len(publisher.local_queue)) + self.assertEqual( + 'test-976', + publisher.local_queue[0][2][0][self.attr] + ) + self.assertEqual( + 'test-1999', + publisher.local_queue[1023][2][0][self.attr] + ) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_udp.py ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_udp.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_udp.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_udp.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,176 @@ +# +# Copyright 2013-2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/udp.py +""" + +import datetime +import socket + +import mock +import msgpack +from oslo_config import fixture as fixture_config +from oslo_utils import netutils +from oslotest import base + +from ceilometer.publisher import udp +from ceilometer.publisher import utils +from ceilometer import sample + + +COUNTER_SOURCE = 'testsource' + + +class TestUDPPublisher(base.BaseTestCase): + test_data = [ + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + source=COUNTER_SOURCE, + ), + sample.Sample( + name='test', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + source=COUNTER_SOURCE, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + source=COUNTER_SOURCE, + ), + sample.Sample( + name='test2', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + source=COUNTER_SOURCE, + ), + sample.Sample( + name='test3', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id='test_run_tasks', + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + source=COUNTER_SOURCE, + ), + ] + + @staticmethod + def _make_fake_socket(published): + def _fake_socket_socket(family, type): + def record_data(msg, dest): + published.append((msg, dest)) + + udp_socket = mock.Mock() + udp_socket.sendto = record_data + return udp_socket + + return _fake_socket_socket + + def setUp(self): + super(TestUDPPublisher, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.CONF.publisher.telemetry_secret = 'not-so-secret' + + def _check_udp_socket(self, url, expected_addr_family): + with mock.patch.object(socket, 'socket') as mock_socket: + udp.UDPPublisher(netutils.urlsplit(url)) + mock_socket.assert_called_with(expected_addr_family, + socket.SOCK_DGRAM) + + def test_publisher_udp_socket_ipv4(self): + self._check_udp_socket('udp://127.0.0.1:4952', + socket.AF_INET) + + def test_publisher_udp_socket_ipv6(self): + self._check_udp_socket('udp://[::1]:4952', + socket.AF_INET6) + + def test_published(self): + self.data_sent = [] + with mock.patch('socket.socket', + self._make_fake_socket(self.data_sent)): + publisher = udp.UDPPublisher( + netutils.urlsplit('udp://somehost')) + publisher.publish_samples(None, + self.test_data) + + self.assertEqual(5, len(self.data_sent)) + + sent_counters = [] + + for data, dest in self.data_sent: + counter = msgpack.loads(data, encoding="utf-8") + sent_counters.append(counter) + + # Check destination + self.assertEqual(('somehost', + self.CONF.collector.udp_port), dest) + + # Check that counters are equal + def sort_func(counter): + return counter['counter_name'] + + counters = [utils.meter_message_from_counter(d, "not-so-secret") + for d in self.test_data] + counters.sort(key=sort_func) + sent_counters.sort(key=sort_func) + self.assertEqual(counters, sent_counters) + + @staticmethod + def _raise_ioerror(*args): + raise IOError + + def _make_broken_socket(self, family, type): + udp_socket = mock.Mock() + udp_socket.sendto = self._raise_ioerror + return udp_socket + + def test_publish_error(self): + with mock.patch('socket.socket', + self._make_broken_socket): + publisher = udp.UDPPublisher( + netutils.urlsplit('udp://localhost')) + publisher.publish_samples(None, + self.test_data) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_utils.py ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/publisher/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/publisher/test_utils.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,135 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/utils.py +""" +from oslo_serialization import jsonutils +from oslotest import base + +from ceilometer.publisher import utils + + +class TestSignature(base.BaseTestCase): + def test_compute_signature_change_key(self): + sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, + 'not-so-secret') + sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, + 'not-so-secret') + self.assertNotEqual(sig1, sig2) + + def test_compute_signature_change_value(self): + sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, + 'not-so-secret') + sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, + 'not-so-secret') + self.assertNotEqual(sig1, sig2) + + def test_compute_signature_same(self): + sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, + 'not-so-secret') + sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, + 'not-so-secret') + self.assertEqual(sig1, sig2) + + def test_compute_signature_signed(self): + data = {'a': 'A', 'b': 'B'} + sig1 = utils.compute_signature(data, 'not-so-secret') + data['message_signature'] = sig1 + sig2 = utils.compute_signature(data, 'not-so-secret') + self.assertEqual(sig1, sig2) + + def test_compute_signature_use_configured_secret(self): + data = {'a': 'A', 'b': 'B'} + sig1 = utils.compute_signature(data, 'not-so-secret') + sig2 = utils.compute_signature(data, 'different-value') + self.assertNotEqual(sig1, sig2) + + def test_verify_signature_signed(self): + data = {'a': 'A', 'b': 'B'} + sig1 = utils.compute_signature(data, 'not-so-secret') + data['message_signature'] = sig1 + self.assertTrue(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_unsigned(self): + data = {'a': 'A', 'b': 'B'} + self.assertFalse(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_incorrect(self): + data = {'a': 'A', 'b': 'B', + 'message_signature': 'Not the same'} + self.assertFalse(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_invalid_encoding(self): + data = {'a': 'A', 'b': 'B', + 'message_signature': ''} + self.assertFalse(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_unicode(self): + data = {'a': 'A', 'b': 'B', + 'message_signature': u''} + self.assertFalse(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_nested(self): + data = {'a': 'A', + 'b': 'B', + 'nested': {'a': 'A', + 'b': 'B', + }, + } + data['message_signature'] = utils.compute_signature( + data, + 'not-so-secret') + self.assertTrue(utils.verify_signature(data, 'not-so-secret')) + + def test_verify_signature_nested_json(self): + data = {'a': 'A', + 'b': 'B', + 'nested': {'a': 'A', + 'b': 'B', + 'c': ('c',), + 'd': ['d'] + }, + } + data['message_signature'] = utils.compute_signature( + data, + 'not-so-secret') + jsondata = jsonutils.loads(jsonutils.dumps(data)) + self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) + + def test_verify_unicode_symbols(self): + data = {u'a\xe9\u0437': 'A', + 'b': u'B\xe9\u0437' + } + data['message_signature'] = utils.compute_signature( + data, + 'not-so-secret') + jsondata = jsonutils.loads(jsonutils.dumps(data)) + self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) + + def test_besteffort_compare_digest(self): + hash1 = "f5ac3fe42b80b80f979825d177191bc5" + hash2 = "f5ac3fe42b80b80f979825d177191bc5" + hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" + hash4 = b"f5ac3fe42b80b80f979825d177191bc5" + hash5 = b"f5ac3fe42b80b80f979825d177191bc5" + hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" + + self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) + self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) + self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) + self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) + + def test_verify_no_secret(self): + data = {'a': 'A', 'b': 'B'} + self.assertTrue(utils.verify_signature(data, '')) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/sqlalchemy/test_models.py ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/sqlalchemy/test_models.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/sqlalchemy/test_models.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/sqlalchemy/test_models.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,96 @@ +# +# Copyright 2013 Rackspace Hosting +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import mock +from oslotest import base +import sqlalchemy +from sqlalchemy.dialects.mysql import DECIMAL +from sqlalchemy.types import NUMERIC + +from ceilometer.storage.sqlalchemy import models +from ceilometer import utils + + +class PreciseTimestampTest(base.BaseTestCase): + + @staticmethod + def fake_dialect(name): + def _type_descriptor_mock(desc): + if type(desc) == DECIMAL: + return NUMERIC(precision=desc.precision, scale=desc.scale) + dialect = mock.MagicMock() + dialect.name = name + dialect.type_descriptor = _type_descriptor_mock + return dialect + + def setUp(self): + super(PreciseTimestampTest, self).setUp() + self._mysql_dialect = self.fake_dialect('mysql') + self._postgres_dialect = self.fake_dialect('postgres') + self._type = models.PreciseTimestamp() + self._date = datetime.datetime(2012, 7, 2, 10, 44) + + def test_load_dialect_impl_mysql(self): + result = self._type.load_dialect_impl(self._mysql_dialect) + self.assertEqual(NUMERIC, type(result)) + self.assertEqual(20, result.precision) + self.assertEqual(6, result.scale) + self.assertTrue(result.asdecimal) + + def test_load_dialect_impl_postgres(self): + result = self._type.load_dialect_impl(self._postgres_dialect) + self.assertEqual(sqlalchemy.DateTime, type(result)) + + def test_process_bind_param_store_decimal_mysql(self): + expected = utils.dt_to_decimal(self._date) + result = self._type.process_bind_param(self._date, self._mysql_dialect) + self.assertEqual(expected, result) + + def test_process_bind_param_store_datetime_postgres(self): + result = self._type.process_bind_param(self._date, + self._postgres_dialect) + self.assertEqual(self._date, result) + + def test_process_bind_param_store_none_mysql(self): + result = self._type.process_bind_param(None, self._mysql_dialect) + self.assertIsNone(result) + + def test_process_bind_param_store_none_postgres(self): + result = self._type.process_bind_param(None, + self._postgres_dialect) + self.assertIsNone(result) + + def test_process_result_value_datetime_mysql(self): + dec_value = utils.dt_to_decimal(self._date) + result = self._type.process_result_value(dec_value, + self._mysql_dialect) + self.assertEqual(self._date, result) + + def test_process_result_value_datetime_postgres(self): + result = self._type.process_result_value(self._date, + self._postgres_dialect) + self.assertEqual(self._date, result) + + def test_process_result_value_none_mysql(self): + result = self._type.process_result_value(None, + self._mysql_dialect) + self.assertIsNone(result) + + def test_process_result_value_none_postgres(self): + result = self._type.process_result_value(None, + self._postgres_dialect) + self.assertIsNone(result) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_base.py ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_base.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_base.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,57 @@ +# Copyright 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import math + +from oslotest import base as testbase + +from ceilometer.storage import base + + +class BaseTest(testbase.BaseTestCase): + + def test_iter_period(self): + times = list(base.iter_period( + datetime.datetime(2013, 1, 1, 12, 0), + datetime.datetime(2013, 1, 1, 13, 0), + 60)) + self.assertEqual(60, len(times)) + self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), + datetime.datetime(2013, 1, 1, 12, 11)), times[10]) + self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), + datetime.datetime(2013, 1, 1, 12, 22)), times[21]) + + def test_iter_period_bis(self): + times = list(base.iter_period( + datetime.datetime(2013, 1, 2, 13, 0), + datetime.datetime(2013, 1, 2, 14, 0), + 55)) + self.assertEqual(math.ceil(3600 / 55.0), len(times)) + self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), + datetime.datetime(2013, 1, 2, 13, 10, 5)), + times[10]) + self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), + datetime.datetime(2013, 1, 2, 13, 20, 10)), + times[21]) + + def test_handle_sort_key(self): + sort_keys_alarm = base._handle_sort_key('alarm') + self.assertEqual(['name', 'user_id', 'project_id'], sort_keys_alarm) + + sort_keys_meter = base._handle_sort_key('meter', 'foo') + self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) + + sort_keys_resource = base._handle_sort_key('resource', 'project_id') + self.assertEqual(['project_id', 'user_id', 'timestamp'], + sort_keys_resource) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_get_connection.py ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_get_connection.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_get_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_get_connection.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,135 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/storage/ +""" +import unittest + +import mock +from oslo_config import fixture as fixture_config +from oslotest import base +import retrying + +from ceilometer.alarm.storage import impl_log as impl_log_alarm +from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqlalchemy_alarm +try: + from ceilometer.event.storage import impl_hbase as impl_hbase_event +except ImportError: + impl_hbase_event = None +from ceilometer import storage +from ceilometer.storage import impl_log +from ceilometer.storage import impl_sqlalchemy + +import six + + +class EngineTest(base.BaseTestCase): + def test_get_connection(self): + engine = storage.get_connection('log://localhost', + 'ceilometer.metering.storage') + self.assertIsInstance(engine, impl_log.Connection) + + def test_get_connection_no_such_engine(self): + try: + storage.get_connection('no-such-engine://localhost', + 'ceilometer.metering.storage') + except RuntimeError as err: + self.assertIn('no-such-engine', six.text_type(err)) + + +class ConnectionRetryTest(base.BaseTestCase): + def setUp(self): + super(ConnectionRetryTest, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + def test_retries(self): + with mock.patch.object( + retrying.Retrying, 'should_reject') as retry_reject: + try: + self.CONF.set_override("connection", "no-such-engine://", + group="database") + self.CONF.set_override("retry_interval", 0.00001, + group="database") + storage.get_connection_from_config(self.CONF) + except RuntimeError as err: + self.assertIn('no-such-engine', six.text_type(err)) + self.assertEqual(10, retry_reject.call_count) + + +class ConnectionConfigTest(base.BaseTestCase): + def setUp(self): + super(ConnectionConfigTest, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + + def test_only_default_url(self): + self.CONF.set_override("connection", "log://", group="database") + conn = storage.get_connection_from_config(self.CONF) + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'metering') + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'alarm') + self.assertIsInstance(conn, impl_log_alarm.Connection) + + def test_two_urls(self): + self.CONF.set_override("connection", "log://", group="database") + self.CONF.set_override("alarm_connection", "sqlite://", + group="database") + conn = storage.get_connection_from_config(self.CONF) + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'metering') + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'alarm') + self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) + + @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') + def test_three_urls(self): + self.CONF.set_override("connection", "log://", group="database") + self.CONF.set_override("alarm_connection", "sqlite://", + group="database") + self.CONF.set_override("event_connection", "hbase://__test__", + group="database") + conn = storage.get_connection_from_config(self.CONF) + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'metering') + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'alarm') + self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) + conn = storage.get_connection_from_config(self.CONF, 'event') + self.assertIsInstance(conn, impl_hbase_event.Connection) + + @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') + def test_three_urls_no_default(self): + self.CONF.set_override("connection", None, group="database") + self.CONF.set_override("metering_connection", "log://", + group="database") + self.CONF.set_override("alarm_connection", "sqlite://", + group="database") + self.CONF.set_override("event_connection", "hbase://__test__", + group="database") + conn = storage.get_connection_from_config(self.CONF) + self.assertIsInstance(conn, impl_log.Connection) + conn = storage.get_connection_from_config(self.CONF, 'alarm') + self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) + conn = storage.get_connection_from_config(self.CONF, 'event') + self.assertIsInstance(conn, impl_hbase_event.Connection) + + def test_sqlalchemy_driver(self): + self.CONF.set_override("connection", "sqlite+pysqlite://", + group="database") + conn = storage.get_connection_from_config(self.CONF) + self.assertIsInstance(conn, impl_sqlalchemy.Connection) + conn = storage.get_connection_from_config(self.CONF, 'metering') + self.assertIsInstance(conn, impl_sqlalchemy.Connection) + conn = storage.get_connection_from_config(self.CONF, 'alarm') + self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_models.py ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_models.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/storage/test_models.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/storage/test_models.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,157 @@ +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from oslotest import base as testbase +import six + +from ceilometer.alarm.storage import models as alarm_models +from ceilometer.event.storage import models as event_models +from ceilometer.storage import base +from ceilometer.storage import models + + +class FakeModel(base.Model): + def __init__(self, arg1, arg2): + base.Model.__init__(self, arg1=arg1, arg2=arg2) + + +class ModelTest(testbase.BaseTestCase): + + def test_create_attributes(self): + m = FakeModel(1, 2) + self.assertEqual(1, m.arg1) + self.assertEqual(2, m.arg2) + + def test_as_dict(self): + m = FakeModel(1, 2) + d = m.as_dict() + self.assertEqual({'arg1': 1, 'arg2': 2}, d) + + def test_as_dict_recursive(self): + m = FakeModel(1, FakeModel('a', 'b')) + d = m.as_dict() + self.assertEqual({'arg1': 1, + 'arg2': {'arg1': 'a', + 'arg2': 'b'}}, + d) + + def test_as_dict_recursive_list(self): + m = FakeModel(1, [FakeModel('a', 'b')]) + d = m.as_dict() + self.assertEqual({'arg1': 1, + 'arg2': [{'arg1': 'a', + 'arg2': 'b'}]}, + d) + + def test_event_repr_no_traits(self): + x = event_models.Event("1", "name", "now", None, {}) + self.assertEqual("", repr(x)) + + def test_get_field_names_of_sample(self): + sample_fields = ["source", "counter_name", "counter_type", + "counter_unit", "counter_volume", "user_id", + "project_id", "resource_id", "timestamp", + "resource_metadata", "message_id", + "message_signature", "recorded_at"] + + self.assertEqual(set(sample_fields), + set(models.Sample.get_field_names())) + + def test_get_field_names_of_alarm(self): + alarm_fields = ["alarm_id", "type", "enabled", "name", "description", + "timestamp", "user_id", "project_id", "state", + "state_timestamp", "ok_actions", "alarm_actions", + "insufficient_data_actions", "repeat_actions", "rule", + "severity", "time_constraints"] + + self.assertEqual(set(alarm_fields), + set(alarm_models.Alarm.get_field_names())) + + def test_get_field_names_of_alarmchange(self): + alarmchange_fields = ["event_id", "alarm_id", "type", "detail", + "user_id", "project_id", "severity", + "on_behalf_of", "timestamp"] + + self.assertEqual(set(alarmchange_fields), + set(alarm_models.AlarmChange.get_field_names())) + + +class TestTraitModel(testbase.BaseTestCase): + + def test_convert_value(self): + v = event_models.Trait.convert_value( + event_models.Trait.INT_TYPE, '10') + self.assertEqual(10, v) + self.assertIsInstance(v, int) + v = event_models.Trait.convert_value( + event_models.Trait.FLOAT_TYPE, '10') + self.assertEqual(10.0, v) + self.assertIsInstance(v, float) + + v = event_models.Trait.convert_value( + event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') + self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) + self.assertIsInstance(v, datetime.datetime) + + v = event_models.Trait.convert_value( + event_models.Trait.TEXT_TYPE, 10) + self.assertEqual("10", v) + self.assertIsInstance(v, six.text_type) + + +class TestClassModel(testbase.BaseTestCase): + + ALARM = { + 'alarm_id': '503490ea-ee9e-40d6-9cad-93a71583f4b2', + 'enabled': True, + 'type': 'threshold', + 'name': 'alarm-test', + 'description': 'alarm-test-description', + 'timestamp': None, + 'user_id': '5c76351f5fef4f6490d1048355094ca3', + 'project_id': 'd83ed14a457141fc8661b4dcb3fd883d', + 'state': "insufficient data", + 'state_timestamp': None, + 'ok_actions': [], + 'alarm_actions': [], + 'insufficient_data_actions': [], + 'repeat_actions': False, + 'time_constraints': [], + 'rule': { + 'comparison_operator': 'lt', + 'threshold': 34, + 'statistic': 'max', + 'evaluation_periods': 1, + 'period': 60, + 'meter_name': 'cpu_util', + 'query': [] + } + } + + def test_timestamp_cannot_be_none(self): + self.ALARM['timestamp'] = None + self.ALARM['state_timestamp'] = datetime.datetime.utcnow() + self.assertRaises(TypeError, + alarm_models.Alarm.__init__, + **self.ALARM) + + def test_state_timestamp_cannot_be_none(self): + self.ALARM['timestamp'] = datetime.datetime.utcnow() + self.ALARM['state_timestamp'] = None + self.assertRaises(TypeError, + alarm_models.Alarm.__init__, + **self.ALARM) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/telemetry/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/unit/telemetry/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/telemetry/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/telemetry/test_notifications.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslotest import base + +from ceilometer.telemetry import notifications + +NOTIFICATION = { + u'_context_domain': None, + u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', + 'event_type': u'telemetry.api', + 'timestamp': u'2015-06-1909: 19: 35.786893', + u'_context_auth_token': None, + u'_context_read_only': False, + 'payload': {'samples': + [{'counter_name': u'instance100', + u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', + u'resource_id': u'instance', + u'timestamp': u'2015-06-19T09: 19: 35.785330', + u'message_signature': u'fake_signature1', + u'resource_metadata': {u'foo': u'bar'}, + u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', + u'counter_unit': u'instance', + u'counter_volume': 1.0, + u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', + u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', + u'counter_type': u'gauge'}, + {u'counter_name': u'instance100', + u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', + u'resource_id': u'instance', + u'timestamp': u'2015-06-19T09: 19: 35.785330', + u'message_signature': u'fake_signature12', + u'resource_metadata': {u'foo': u'bar'}, + u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', + u'counter_unit': u'instance', + u'counter_volume': 1.0, + u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', + u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', + u'counter_type': u'gauge'}]}, + u'_context_resource_uuid': None, + u'_context_user_identity': u'fake_user_identity---', + u'_context_show_deleted': False, + u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', + 'priority': 'info', + u'_context_is_admin': True, + u'_context_project_domain': None, + u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', + u'_context_user_domain': None, + 'publisher_id': u'ceilometer.api', + 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' +} + + +class TelemetryApiPostTestCase(base.BaseTestCase): + + def test_process_notification(self): + sample_creation = notifications.TelemetryApiPost(None) + samples = list(sample_creation.process_notification(NOTIFICATION)) + self.assertEqual(2, len(samples)) + payload = NOTIFICATION["payload"]['samples'] + for index, sample in enumerate(samples): + self.assertEqual(payload[index]["user_id"], sample.user_id) + self.assertEqual(payload[index]["counter_name"], sample.name) + self.assertEqual(payload[index]["resource_id"], sample.resource_id) + self.assertEqual(payload[index]["timestamp"], sample.timestamp) + self.assertEqual(payload[index]["resource_metadata"], + sample.resource_metadata) + self.assertEqual(payload[index]["counter_volume"], sample.volume) + self.assertEqual(payload[index]["source"], sample.source) + self.assertEqual(payload[index]["counter_type"], sample.type) + self.assertEqual(payload[index]["message_id"], sample.id) + self.assertEqual(payload[index]["counter_unit"], sample.unit) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_coordination.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_coordination.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_coordination.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_coordination.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,249 @@ +# +# Copyright 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +import mock +from oslo_config import fixture as fixture_config +import tooz.coordination + +from ceilometer import coordination +from ceilometer.tests import base +from ceilometer import utils + + +class MockToozCoordinator(object): + def __init__(self, member_id, shared_storage): + self._member_id = member_id + self._groups = shared_storage + self.is_started = False + + def start(self): + self.is_started = True + + def stop(self): + pass + + def heartbeat(self): + pass + + def create_group(self, group_id): + if group_id in self._groups: + return MockAsyncError( + tooz.coordination.GroupAlreadyExist(group_id)) + self._groups[group_id] = {} + return MockAsyncResult(None) + + def join_group(self, group_id, capabilities=b''): + if group_id not in self._groups: + return MockAsyncError( + tooz.coordination.GroupNotCreated(group_id)) + if self._member_id in self._groups[group_id]: + return MockAsyncError( + tooz.coordination.MemberAlreadyExist(group_id, + self._member_id)) + self._groups[group_id][self._member_id] = { + "capabilities": capabilities, + } + return MockAsyncResult(None) + + def leave_group(self, group_id): + return MockAsyncResult(None) + + def get_members(self, group_id): + if group_id not in self._groups: + return MockAsyncError( + tooz.coordination.GroupNotCreated(group_id)) + return MockAsyncResult(self._groups[group_id]) + + +class MockToozCoordExceptionRaiser(MockToozCoordinator): + def start(self): + raise tooz.coordination.ToozError('error') + + def heartbeat(self): + raise tooz.coordination.ToozError('error') + + def join_group(self, group_id, capabilities=b''): + raise tooz.coordination.ToozError('error') + + def get_members(self, group_id): + raise tooz.coordination.ToozError('error') + + +class MockAsyncResult(tooz.coordination.CoordAsyncResult): + def __init__(self, result): + self.result = result + + def get(self, timeout=0): + return self.result + + @staticmethod + def done(): + return True + + +class MockAsyncError(tooz.coordination.CoordAsyncResult): + def __init__(self, error): + self.error = error + + def get(self, timeout=0): + raise self.error + + @staticmethod + def done(): + return True + + +class MockLoggingHandler(logging.Handler): + """Mock logging handler to check for expected logs.""" + + def __init__(self, *args, **kwargs): + self.reset() + logging.Handler.__init__(self, *args, **kwargs) + + def emit(self, record): + self.messages[record.levelname.lower()].append(record.getMessage()) + + def reset(self): + self.messages = {'debug': [], + 'info': [], + 'warning': [], + 'error': [], + 'critical': []} + + +class TestPartitioning(base.BaseTestCase): + + def setUp(self): + super(TestPartitioning, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.str_handler = MockLoggingHandler() + coordination.LOG.logger.addHandler(self.str_handler) + self.shared_storage = {} + + def _get_new_started_coordinator(self, shared_storage, agent_id=None, + coordinator_cls=None): + coordinator_cls = coordinator_cls or MockToozCoordinator + self.CONF.set_override('backend_url', 'xxx://yyy', + group='coordination') + with mock.patch('tooz.coordination.get_coordinator', + lambda _, member_id: + coordinator_cls(member_id, shared_storage)): + pc = coordination.PartitionCoordinator(agent_id) + pc.start() + return pc + + def _usage_simulation(self, *agents_kwargs): + partition_coordinators = [] + for kwargs in agents_kwargs: + partition_coordinator = self._get_new_started_coordinator( + self.shared_storage, kwargs['agent_id'], kwargs.get( + 'coordinator_cls')) + partition_coordinator.join_group(kwargs['group_id']) + partition_coordinators.append(partition_coordinator) + + for i, kwargs in enumerate(agents_kwargs): + all_resources = kwargs.get('all_resources', []) + expected_resources = kwargs.get('expected_resources', []) + actual_resources = partition_coordinators[i].extract_my_subset( + kwargs['group_id'], all_resources) + self.assertEqual(expected_resources, actual_resources) + + def test_single_group(self): + agents = [dict(agent_id='agent1', group_id='group'), + dict(agent_id='agent2', group_id='group')] + self._usage_simulation(*agents) + + self.assertEqual(['group'], sorted(self.shared_storage.keys())) + self.assertEqual(['agent1', 'agent2'], + sorted(self.shared_storage['group'].keys())) + + def test_multiple_groups(self): + agents = [dict(agent_id='agent1', group_id='group1'), + dict(agent_id='agent2', group_id='group2')] + self._usage_simulation(*agents) + + self.assertEqual(['group1', 'group2'], + sorted(self.shared_storage.keys())) + + def test_partitioning(self): + all_resources = ['resource_%s' % i for i in range(1000)] + agents = ['agent_%s' % i for i in range(10)] + + expected_resources = [list() for _ in range(len(agents))] + hr = utils.HashRing(agents) + for r in all_resources: + key = agents.index(hr.get_node(r)) + expected_resources[key].append(r) + + agents_kwargs = [] + for i, agent in enumerate(agents): + agents_kwargs.append(dict(agent_id=agent, + group_id='group', + all_resources=all_resources, + expected_resources=expected_resources[i])) + self._usage_simulation(*agents_kwargs) + + def test_coordination_backend_offline(self): + agents = [dict(agent_id='agent1', + group_id='group', + all_resources=['res1', 'res2'], + expected_resources=[], + coordinator_cls=MockToozCoordExceptionRaiser)] + self._usage_simulation(*agents) + expected_errors = ['Error getting group membership info from ' + 'coordination backend.', + 'Error connecting to coordination backend.'] + for e in expected_errors: + self.assertIn(e, self.str_handler.messages['error']) + + def test_reconnect(self): + coord = self._get_new_started_coordinator({}, 'a', + MockToozCoordExceptionRaiser) + with mock.patch('tooz.coordination.get_coordinator', + return_value=MockToozCoordExceptionRaiser('a', {})): + coord.heartbeat() + expected_errors = ['Error connecting to coordination backend.', + 'Error sending a heartbeat to coordination ' + 'backend.'] + for e in expected_errors: + self.assertIn(e, self.str_handler.messages['error']) + + self.str_handler.messages['error'] = [] + with mock.patch('tooz.coordination.get_coordinator', + return_value=MockToozCoordinator('a', {})): + coord.heartbeat() + for e in expected_errors: + self.assertNotIn(e, self.str_handler.messages['error']) + + def test_group_id_none(self): + coord = self._get_new_started_coordinator({}, 'a') + self.assertTrue(coord._coordinator.is_started) + + with mock.patch.object(coord._coordinator, 'join_group') as mocked: + coord.join_group(None) + self.assertEqual(0, mocked.call_count) + with mock.patch.object(coord._coordinator, 'leave_group') as mocked: + coord.leave_group(None) + self.assertEqual(0, mocked.call_count) + + def test_stop(self): + coord = self._get_new_started_coordinator({}, 'a') + self.assertTrue(coord._coordinator.is_started) + coord.join_group("123") + coord.stop() + self.assertIsEmpty(coord._groups) + self.assertIsNone(coord._coordinator) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_decoupled_pipeline.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_decoupled_pipeline.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_decoupled_pipeline.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_decoupled_pipeline.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,292 @@ +# +# Copyright 2014 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from ceilometer import pipeline +from ceilometer import sample +from ceilometer.tests import pipeline_base + + +class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): + def _setup_pipeline_cfg(self): + source = {'name': 'test_source', + 'interval': 5, + 'counters': ['a'], + 'resources': [], + 'sinks': ['test_sink']} + sink = {'name': 'test_sink', + 'transformers': [{'name': 'update', 'parameters': {}}], + 'publishers': ['test://']} + self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} + + def _augment_pipeline_cfg(self): + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'interval': 5, + 'counters': ['b'], + 'resources': [], + 'sinks': ['second_sink'] + }) + self.pipeline_cfg['sinks'].append({ + 'name': 'second_sink', + 'transformers': [{ + 'name': 'update', + 'parameters': + { + 'append_name': '_new', + } + }], + 'publishers': ['new'], + }) + + def _break_pipeline_cfg(self): + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'interval': 5, + 'counters': ['b'], + 'resources': [], + 'sinks': ['second_sink'] + }) + self.pipeline_cfg['sinks'].append({ + 'name': 'second_sink', + 'transformers': [{ + 'name': 'update', + 'parameters': + { + 'append_name': '_new', + } + }], + 'publishers': ['except'], + }) + + def _dup_pipeline_name_cfg(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_source', + 'interval': 5, + 'counters': ['b'], + 'resources': [], + 'sinks': ['test_sink'] + }) + + def _set_pipeline_cfg(self, field, value): + if field in self.pipeline_cfg['sources'][0]: + self.pipeline_cfg['sources'][0][field] = value + else: + self.pipeline_cfg['sinks'][0][field] = value + + def _extend_pipeline_cfg(self, field, value): + if field in self.pipeline_cfg['sources'][0]: + self.pipeline_cfg['sources'][0][field].extend(value) + else: + self.pipeline_cfg['sinks'][0][field].extend(value) + + def _unset_pipeline_cfg(self, field): + if field in self.pipeline_cfg['sources'][0]: + del self.pipeline_cfg['sources'][0][field] + else: + del self.pipeline_cfg['sinks'][0][field] + + def test_source_no_sink(self): + del self.pipeline_cfg['sinks'] + self._exception_create_pipelinemanager() + + def test_source_dangling_sink(self): + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'interval': 5, + 'counters': ['b'], + 'resources': [], + 'sinks': ['second_sink'] + }) + self._exception_create_pipelinemanager() + + def test_sink_no_source(self): + del self.pipeline_cfg['sources'] + self._exception_create_pipelinemanager() + + def test_source_with_multiple_sinks(self): + counter_cfg = ['a', 'b'] + self._set_pipeline_cfg('counters', counter_cfg) + self.pipeline_cfg['sinks'].append({ + 'name': 'second_sink', + 'transformers': [{ + 'name': 'update', + 'parameters': + { + 'append_name': '_new', + } + }], + 'publishers': ['new'], + }) + self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') + + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + with pipeline_manager.publisher(None) as p: + p([self.test_counter]) + + self.test_counter = sample.Sample( + name='b', + type=self.test_counter.type, + volume=self.test_counter.volume, + unit=self.test_counter.unit, + user_id=self.test_counter.user_id, + project_id=self.test_counter.project_id, + resource_id=self.test_counter.resource_id, + timestamp=self.test_counter.timestamp, + resource_metadata=self.test_counter.resource_metadata, + ) + + with pipeline_manager.publisher(None) as p: + p([self.test_counter]) + + self.assertEqual(2, len(pipeline_manager.pipelines)) + self.assertEqual('test_source:test_sink', + str(pipeline_manager.pipelines[0])) + self.assertEqual('test_source:second_sink', + str(pipeline_manager.pipelines[1])) + test_publisher = pipeline_manager.pipelines[0].publishers[0] + new_publisher = pipeline_manager.pipelines[1].publishers[0] + for publisher, sfx in [(test_publisher, '_update'), + (new_publisher, '_new')]: + self.assertEqual(2, len(publisher.samples)) + self.assertEqual(2, publisher.calls) + self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) + self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) + + def test_multiple_sources_with_single_sink(self): + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'interval': 5, + 'counters': ['b'], + 'resources': [], + 'sinks': ['test_sink'] + }) + + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager) + with pipeline_manager.publisher(None) as p: + p([self.test_counter]) + + self.test_counter = sample.Sample( + name='b', + type=self.test_counter.type, + volume=self.test_counter.volume, + unit=self.test_counter.unit, + user_id=self.test_counter.user_id, + project_id=self.test_counter.project_id, + resource_id=self.test_counter.resource_id, + timestamp=self.test_counter.timestamp, + resource_metadata=self.test_counter.resource_metadata, + ) + + with pipeline_manager.publisher(None) as p: + p([self.test_counter]) + + self.assertEqual(2, len(pipeline_manager.pipelines)) + self.assertEqual('test_source:test_sink', + str(pipeline_manager.pipelines[0])) + self.assertEqual('second_source:test_sink', + str(pipeline_manager.pipelines[1])) + test_publisher = pipeline_manager.pipelines[0].publishers[0] + another_publisher = pipeline_manager.pipelines[1].publishers[0] + for publisher in [test_publisher, another_publisher]: + self.assertEqual(2, len(publisher.samples)) + self.assertEqual(2, publisher.calls) + self.assertEqual('a_update', getattr(publisher.samples[0], "name")) + self.assertEqual('b_update', getattr(publisher.samples[1], "name")) + + transformed_samples = self.TransformerClass.samples + self.assertEqual(2, len(transformed_samples)) + self.assertEqual(['a', 'b'], + [getattr(s, 'name') for s in transformed_samples]) + + def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, + meters, units): + with open('etc/ceilometer/pipeline.yaml') as fap: + data = fap.read() + pipeline_cfg = yaml.safe_load(data) + for s in pipeline_cfg['sinks']: + s['publishers'] = ['test://'] + pipeline_manager = pipeline.PipelineManager(pipeline_cfg, + self.transformer_manager) + pipe = pipeline_manager.pipelines[index] + self._do_test_rate_of_change_mapping(pipe, meters, units) + + def test_rate_of_change_boilerplate_disk_read_cfg(self): + meters = ('disk.read.bytes', 'disk.read.requests') + units = ('B', 'request') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, + meters, + units) + + def test_rate_of_change_boilerplate_disk_write_cfg(self): + meters = ('disk.write.bytes', 'disk.write.requests') + units = ('B', 'request') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, + meters, + units) + + def test_rate_of_change_boilerplate_network_incoming_cfg(self): + meters = ('network.incoming.bytes', 'network.incoming.packets') + units = ('B', 'packet') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, + meters, + units) + + def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): + meters = ('disk.device.read.bytes', 'disk.device.read.requests') + units = ('B', 'request') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, + meters, + units) + + def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): + meters = ('disk.device.write.bytes', 'disk.device.write.requests') + units = ('B', 'request') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(2, + meters, + units) + + def test_rate_of_change_boilerplate_network_outgoing_cfg(self): + meters = ('network.outgoing.bytes', 'network.outgoing.packets') + units = ('B', 'packet') + self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, + meters, + units) + + def test_duplicated_sinks_names(self): + self.pipeline_cfg['sinks'].append({ + 'name': 'test_sink', + 'publishers': ['except'], + }) + self.assertRaises(pipeline.PipelineException, + pipeline.PipelineManager, + self.pipeline_cfg, + self.transformer_manager) + + def test_duplicated_source_names(self): + self.pipeline_cfg['sources'].append({ + 'name': 'test_source', + 'interval': 5, + 'counters': ['a'], + 'resources': [], + 'sinks': ['test_sink'] + }) + self.assertRaises(pipeline.PipelineException, + pipeline.PipelineManager, + self.pipeline_cfg, + self.transformer_manager) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_event_pipeline.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_event_pipeline.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_event_pipeline.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_event_pipeline.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,366 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import traceback +import uuid + +from oslotest import base +from oslotest import mockpatch + +from ceilometer.event.storage import models +from ceilometer import pipeline +from ceilometer import publisher +from ceilometer.publisher import test as test_publisher + + +class EventPipelineTestCase(base.BaseTestCase): + + def get_publisher(self, url, namespace=''): + fake_drivers = {'test://': test_publisher.TestPublisher, + 'new://': test_publisher.TestPublisher, + 'except://': self.PublisherClassException} + return fake_drivers[url](url) + + class PublisherClassException(publisher.PublisherBase): + def publish_samples(self, ctxt, samples): + pass + + def publish_events(self, ctxt, events): + raise Exception() + + def setUp(self): + super(EventPipelineTestCase, self).setUp() + self.p_type = pipeline.EVENT_TYPE + self.transformer_manager = None + + self.test_event = models.Event( + message_id=uuid.uuid4(), + event_type='a', + generated=datetime.datetime.utcnow(), + traits=[ + models.Trait('t_text', 1, 'text_trait'), + models.Trait('t_int', 2, 'int_trait'), + models.Trait('t_float', 3, 'float_trait'), + models.Trait('t_datetime', 4, 'datetime_trait') + ], + raw={'status': 'started'} + ) + + self.test_event2 = models.Event( + message_id=uuid.uuid4(), + event_type='b', + generated=datetime.datetime.utcnow(), + traits=[ + models.Trait('t_text', 1, 'text_trait'), + models.Trait('t_int', 2, 'int_trait'), + models.Trait('t_float', 3, 'float_trait'), + models.Trait('t_datetime', 4, 'datetime_trait') + ], + raw={'status': 'stopped'} + ) + + self.useFixture(mockpatch.PatchObject( + publisher, 'get_publisher', side_effect=self.get_publisher)) + + self._setup_pipeline_cfg() + + self._reraise_exception = True + self.useFixture(mockpatch.Patch( + 'ceilometer.pipeline.LOG.exception', + side_effect=self._handle_reraise_exception)) + + def _handle_reraise_exception(self, msg): + if self._reraise_exception: + raise Exception(traceback.format_exc()) + + def _setup_pipeline_cfg(self): + """Setup the appropriate form of pipeline config.""" + source = {'name': 'test_source', + 'events': ['a'], + 'sinks': ['test_sink']} + sink = {'name': 'test_sink', + 'publishers': ['test://']} + self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} + + def _augment_pipeline_cfg(self): + """Augment the pipeline config with an additional element.""" + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'events': ['b'], + 'sinks': ['second_sink'] + }) + self.pipeline_cfg['sinks'].append({ + 'name': 'second_sink', + 'publishers': ['new://'], + }) + + def _break_pipeline_cfg(self): + """Break the pipeline config with a malformed element.""" + self.pipeline_cfg['sources'].append({ + 'name': 'second_source', + 'events': ['b'], + 'sinks': ['second_sink'] + }) + self.pipeline_cfg['sinks'].append({ + 'name': 'second_sink', + 'publishers': ['except'], + }) + + def _dup_pipeline_name_cfg(self): + """Break the pipeline config with duplicate pipeline name.""" + self.pipeline_cfg['sources'].append({ + 'name': 'test_source', + 'events': ['a'], + 'sinks': ['test_sink'] + }) + + def _set_pipeline_cfg(self, field, value): + if field in self.pipeline_cfg['sources'][0]: + self.pipeline_cfg['sources'][0][field] = value + else: + self.pipeline_cfg['sinks'][0][field] = value + + def _extend_pipeline_cfg(self, field, value): + if field in self.pipeline_cfg['sources'][0]: + self.pipeline_cfg['sources'][0][field].extend(value) + else: + self.pipeline_cfg['sinks'][0][field].extend(value) + + def _unset_pipeline_cfg(self, field): + if field in self.pipeline_cfg['sources'][0]: + del self.pipeline_cfg['sources'][0][field] + else: + del self.pipeline_cfg['sinks'][0][field] + + def _exception_create_pipelinemanager(self): + self.assertRaises(pipeline.PipelineException, + pipeline.PipelineManager, + self.pipeline_cfg, + self.transformer_manager, + self.p_type) + + def test_no_events(self): + self._unset_pipeline_cfg('events') + self._exception_create_pipelinemanager() + + def test_no_name(self): + self._unset_pipeline_cfg('name') + self._exception_create_pipelinemanager() + + def test_name(self): + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + for pipe in pipeline_manager.pipelines: + self.assertTrue(pipe.name.startswith('event:')) + + def test_no_publishers(self): + self._unset_pipeline_cfg('publishers') + self._exception_create_pipelinemanager() + + def test_check_events_include_exclude_same(self): + event_cfg = ['a', '!a'] + self._set_pipeline_cfg('events', event_cfg) + self._exception_create_pipelinemanager() + + def test_check_events_include_exclude(self): + event_cfg = ['a', '!b'] + self._set_pipeline_cfg('events', event_cfg) + self._exception_create_pipelinemanager() + + def test_check_events_wildcard_included(self): + event_cfg = ['a', '*'] + self._set_pipeline_cfg('events', event_cfg) + self._exception_create_pipelinemanager() + + def test_check_publishers_invalid_publisher(self): + publisher_cfg = ['test_invalid'] + self._set_pipeline_cfg('publishers', publisher_cfg) + + def test_multiple_included_events(self): + event_cfg = ['a', 'b'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(1, len(publisher.events)) + + with pipeline_manager.publisher(None) as p: + p([self.test_event2]) + + self.assertEqual(2, len(publisher.events)) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + self.assertEqual('b', getattr(publisher.events[1], 'event_type')) + + def test_event_non_match(self): + event_cfg = ['nomatch'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(0, len(publisher.events)) + self.assertEqual(0, publisher.calls) + + def test_wildcard_event(self): + event_cfg = ['*'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(1, len(publisher.events)) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + + def test_wildcard_excluded_events(self): + event_cfg = ['*', '!a'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) + + def test_wildcard_excluded_events_not_excluded(self): + event_cfg = ['*', '!b'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(1, len(publisher.events)) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + + def test_all_excluded_events_not_excluded(self): + event_cfg = ['!b', '!c'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(1, len(publisher.events)) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + + def test_all_excluded_events_excluded(self): + event_cfg = ['!a', '!c'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) + self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) + self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) + + def test_wildcard_and_excluded_wildcard_events(self): + event_cfg = ['*', '!compute.*'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + self.assertFalse(pipeline_manager.pipelines[0]. + support_event('compute.instance.create.start')) + self.assertTrue(pipeline_manager.pipelines[0]. + support_event('identity.user.create')) + + def test_included_event_and_wildcard_events(self): + event_cfg = ['compute.instance.create.start', 'identity.*'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + self.assertTrue(pipeline_manager.pipelines[0]. + support_event('identity.user.create')) + self.assertTrue(pipeline_manager.pipelines[0]. + support_event('compute.instance.create.start')) + self.assertFalse(pipeline_manager.pipelines[0]. + support_event('compute.instance.create.stop')) + + def test_excluded_event_and_excluded_wildcard_events(self): + event_cfg = ['!compute.instance.create.start', '!identity.*'] + self._set_pipeline_cfg('events', event_cfg) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + self.assertFalse(pipeline_manager.pipelines[0]. + support_event('identity.user.create')) + self.assertFalse(pipeline_manager.pipelines[0]. + support_event('compute.instance.create.start')) + self.assertTrue(pipeline_manager.pipelines[0]. + support_event('compute.instance.create.stop')) + + def test_multiple_pipeline(self): + self._augment_pipeline_cfg() + + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event, self.test_event2]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + self.assertEqual(1, len(publisher.events)) + self.assertEqual(1, publisher.calls) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + new_publisher = pipeline_manager.pipelines[1].publishers[0] + self.assertEqual(1, len(new_publisher.events)) + self.assertEqual(1, new_publisher.calls) + self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) + + def test_multiple_publisher(self): + self._set_pipeline_cfg('publishers', ['test://', 'new://']) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[0] + new_publisher = pipeline_manager.pipelines[0].publishers[1] + self.assertEqual(1, len(publisher.events)) + self.assertEqual(1, len(new_publisher.events)) + self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + + def test_multiple_publisher_isolation(self): + self._reraise_exception = False + self._set_pipeline_cfg('publishers', ['except://', 'new://']) + pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, + self.transformer_manager, + self.p_type) + with pipeline_manager.publisher(None) as p: + p([self.test_event]) + + publisher = pipeline_manager.pipelines[0].publishers[1] + self.assertEqual(1, len(publisher.events)) + self.assertEqual('a', getattr(publisher.events[0], 'event_type')) + + def test_unique_pipeline_names(self): + self._dup_pipeline_name_cfg() + self._exception_create_pipelinemanager() diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_hacking.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_hacking.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_hacking.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_hacking.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2015 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import textwrap + +import mock +import pep8 +from testtools import testcase + +from ceilometer.hacking import checks + + +class HackingTestCase(testcase.TestCase): + """Test cases for ceilometer specific hacking rules. + + This class tests the hacking checks in ceilometer.hacking.checks by + passing strings to the check methods like the pep8/flake8 parser would. + The parser loops over each line in the file and then passes the parameters + to the check method. The parameter names in the check method dictate what + type of object is passed to the check method. The parameter types are:: + + logical_line: A processed line with the following modifications: + - Multi-line statements converted to a single line. + - Stripped left and right. + - Contents of strings replaced with "xxx" of same length. + - Comments removed. + physical_line: Raw line of text from the input file. + lines: a list of the raw lines from the input file + tokens: the tokens that contribute to this logical line + line_number: line number in the input file + total_lines: number of lines in the input file + blank_lines: blank lines before this one + indent_char: indentation character in this file (" " or "\t") + indent_level: indentation (with tabs expanded to multiples of 8) + previous_indent_level: indentation on previous line + previous_logical: previous logical line + filename: Path of the file being run through pep8 + + When running a test on a check method the return will be False/None if + there is no violation in the sample input. If there is an error a tuple is + returned with a position in the line, and a message. So to check the result + just assertTrue if the check is expected to fail and assertFalse if it + should pass. + """ + + # We are patching pep8 so that only the check under test is actually + # installed. + @mock.patch('pep8._checks', + {'physical_line': {}, 'logical_line': {}, 'tree': {}}) + def _run_check(self, code, checker, filename=None): + pep8.register_check(checker) + + lines = textwrap.dedent(code).strip().splitlines(True) + + checker = pep8.Checker(filename=filename, lines=lines) + checker.check_all() + checker.report._deferred_print.sort() + return checker.report._deferred_print + + def _assert_has_errors(self, code, checker, expected_errors=None, + filename=None): + actual_errors = [e[:3] for e in + self._run_check(code, checker, filename)] + self.assertEqual(expected_errors or [], actual_errors) + + def test_oslo_namespace_imports_check(self): + codes = [ + "from oslo.concurrency import processutils", + "from oslo.config import cfg", + "import oslo.i18n", + "from oslo.utils import timeutils", + "from oslo.serialization import jsonutils", + ] + for code in codes: + self._assert_has_errors(code, checks.check_oslo_namespace_imports, + expected_errors=[(1, 0, "C300")]) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_messaging.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_messaging.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_messaging.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_messaging.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,65 @@ +# Copyright (C) 2014 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import fixture as fixture_config +import oslo_messaging.conffixture +from oslotest import base + +from ceilometer import messaging + + +class MessagingTests(base.BaseTestCase): + def setUp(self): + super(MessagingTests, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) + + def test_get_transport_invalid_url(self): + self.assertRaises(oslo_messaging.InvalidTransportURL, + messaging.get_transport, "notvalid!") + + def test_get_transport_url_caching(self): + t1 = messaging.get_transport('fake://') + t2 = messaging.get_transport('fake://') + self.assertEqual(t1, t2) + + def test_get_transport_default_url_caching(self): + t1 = messaging.get_transport() + t2 = messaging.get_transport() + self.assertEqual(t1, t2) + + def test_get_transport_default_url_no_caching(self): + t1 = messaging.get_transport(cache=False) + t2 = messaging.get_transport(cache=False) + self.assertNotEqual(t1, t2) + + def test_get_transport_url_no_caching(self): + t1 = messaging.get_transport('fake://', cache=False) + t2 = messaging.get_transport('fake://', cache=False) + self.assertNotEqual(t1, t2) + + def test_get_transport_default_url_caching_mix(self): + t1 = messaging.get_transport() + t2 = messaging.get_transport(cache=False) + self.assertNotEqual(t1, t2) + + def test_get_transport_url_caching_mix(self): + t1 = messaging.get_transport('fake://') + t2 = messaging.get_transport('fake://', cache=False) + self.assertNotEqual(t1, t2) + + def test_get_transport_optional(self): + self.CONF.set_override('rpc_backend', '') + self.assertIsNone(messaging.get_transport(optional=True, + cache=False)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_middleware.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_middleware.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_middleware.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_middleware.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,100 @@ +# +# Copyright 2013-2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock +from oslo_config import fixture as fixture_config + +from ceilometer import middleware +from ceilometer.tests import base + + +HTTP_REQUEST = { + u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'_context_is_admin': True, + u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': u'10.0.2.15', + u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T20:23:41.425105', + u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'event_type': u'http.request', + u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', + u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', + 'HTTP_X_USER_ID': 'jd-x32', + 'HTTP_X_PROJECT_ID': 'project-id', + 'HTTP_X_SERVICE_NAME': 'nova'}}, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 20:23:48.028195', +} + +HTTP_RESPONSE = { + u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', + u'_context_is_admin': True, + u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', + u'_context_quota_class': None, + u'_context_read_deleted': u'no', + u'_context_remote_address': u'10.0.2.15', + u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', + u'_context_roles': [u'admin'], + u'_context_timestamp': u'2012-05-08T20:23:41.425105', + u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', + u'event_type': u'http.response', + u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', + u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', + 'HTTP_X_USER_ID': 'jd-x32', + 'HTTP_X_PROJECT_ID': 'project-id', + 'HTTP_X_SERVICE_NAME': 'nova'}, + u'response': {'status': '200 OK'}}, + u'priority': u'INFO', + u'publisher_id': u'compute.vagrant-precise', + u'timestamp': u'2012-05-08 20:23:48.028195', +} + + +class TestNotifications(base.BaseTestCase): + + def setUp(self): + super(TestNotifications, self).setUp() + self.CONF = self.useFixture(fixture_config.Config()).conf + self.setup_messaging(self.CONF) + + def test_process_request_notification(self): + sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( + HTTP_REQUEST + ))[0] + self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], + sample.user_id) + self.assertEqual(HTTP_REQUEST['payload']['request'] + ['HTTP_X_PROJECT_ID'], sample.project_id) + self.assertEqual(HTTP_REQUEST['payload']['request'] + ['HTTP_X_SERVICE_NAME'], sample.resource_id) + self.assertEqual(1, sample.volume) + + def test_process_response_notification(self): + sample = list(middleware.HTTPResponse( + mock.Mock()).process_notification(HTTP_RESPONSE))[0] + self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], + sample.user_id) + self.assertEqual(HTTP_RESPONSE['payload']['request'] + ['HTTP_X_PROJECT_ID'], sample.project_id) + self.assertEqual(HTTP_RESPONSE['payload']['request'] + ['HTTP_X_SERVICE_NAME'], sample.resource_id) + self.assertEqual(1, sample.volume) + + def test_targets(self): + targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) + self.assertEqual(4, len(targets)) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_neutronclient.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_neutronclient.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_neutronclient.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_neutronclient.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,193 @@ +# Copyright (C) 2014 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslotest import base + +from ceilometer import neutron_client + + +class TestNeutronClient(base.BaseTestCase): + + def setUp(self): + super(TestNeutronClient, self).setUp() + self.nc = neutron_client.Client() + + @staticmethod + def fake_ports_list(): + return {'ports': + [{'admin_state_up': True, + 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', + 'device_owner': 'network:router_gateway', + 'extra_dhcp_opts': [], + 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', + 'mac_address': 'fa:16:3e:c5:35:93', + 'name': '', + 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'status': 'ACTIVE', + 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, + ]} + + def test_port_get_all(self): + with mock.patch.object(self.nc.client, 'list_ports', + side_effect=self.fake_ports_list): + ports = self.nc.port_get_all() + + self.assertEqual(1, len(ports)) + self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', + ports[0]['id']) + + @staticmethod + def fake_networks_list(): + return {'networks': + [{'admin_state_up': True, + 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', + 'name': 'public', + 'provider:network_type': 'gre', + 'provider:physical_network': None, + 'provider:segmentation_id': 2, + 'router:external': True, + 'shared': False, + 'status': 'ACTIVE', + 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], + 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, + ]} + + def test_network_get_all(self): + with mock.patch.object(self.nc.client, 'list_networks', + side_effect=self.fake_networks_list): + networks = self.nc.network_get_all() + + self.assertEqual(1, len(networks)) + self.assertEqual('298a3088-a446-4d5a-bad8-f92ecacd786b', + networks[0]['id']) + + @staticmethod + def fake_pool_list(): + return {'pools': [{'status': 'ACTIVE', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'description': '', + 'health_monitors': [], + 'members': [], + 'status_description': None, + 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'mylb', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'health_monitors_status': []}, + ]} + + def test_pool_list(self): + with mock.patch.object(self.nc.client, 'list_pools', + side_effect=self.fake_pool_list): + pools = self.nc.pool_get_all() + + self.assertEqual(1, len(pools)) + self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', + pools[0]['id']) + + @staticmethod + def fake_vip_list(): + return {'vips': [{'status': 'ACTIVE', + 'status_description': None, + 'protocol': 'HTTP', + 'description': '', + 'admin_state_up': True, + 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'connection_limit': -1, + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'session_persistence': None, + 'address': '10.0.0.2', + 'protocol_port': 80, + 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', + 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + 'name': 'myvip'}, + ]} + + def test_vip_list(self): + with mock.patch.object(self.nc.client, 'list_vips', + side_effect=self.fake_vip_list): + vips = self.nc.vip_get_all() + + self.assertEqual(1, len(vips)) + self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', + vips[0]['id']) + + @staticmethod + def fake_member_list(): + return {'members': [{'status': 'ACTIVE', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', + 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', + 'address': '10.0.0.3', + 'status_description': None, + 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, + ]} + + def test_member_list(self): + with mock.patch.object(self.nc.client, 'list_members', + side_effect=self.fake_member_list): + members = self.nc.member_get_all() + + self.assertEqual(1, len(members)) + self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', + members[0]['id']) + + @staticmethod + def fake_monitors_list(): + return {'health_monitors': + [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', + 'admin_state_up': True, + 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", + 'delay': 2, + 'max_retries': 5, + 'timeout': 5, + 'pools': [], + 'type': 'PING', + }]} + + def test_monitor_list(self): + with mock.patch.object(self.nc.client, 'list_health_monitors', + side_effect=self.fake_monitors_list): + monitors = self.nc.health_monitor_get_all() + + self.assertEqual(1, len(monitors)) + self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', + monitors[0]['id']) + + @staticmethod + def fake_pool_stats(fake_pool): + return {'stats': + [{'active_connections': 1, + 'total_connections': 2, + 'bytes_in': 3, + 'bytes_out': 4 + }]} + + def test_pool_stats(self): + with mock.patch.object(self.nc.client, 'retrieve_pool_stats', + side_effect=self.fake_pool_stats): + stats = self.nc.pool_stats('fake_pool')['stats'] + + self.assertEqual(1, len(stats)) + self.assertEqual(1, stats[0]['active_connections']) + self.assertEqual(2, stats[0]['total_connections']) + self.assertEqual(3, stats[0]['bytes_in']) + self.assertEqual(4, stats[0]['bytes_out']) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_novaclient.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_novaclient.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_novaclient.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_novaclient.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,250 @@ +#!/usr/bin/env python +# +# Copyright 2013-2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import novaclient +from oslo_config import fixture as fixture_config +from oslotest import base +from oslotest import mockpatch + +from ceilometer import nova_client + + +class TestNovaClient(base.BaseTestCase): + + def setUp(self): + super(TestNovaClient, self).setUp() + self._flavors_count = 0 + self._images_count = 0 + self.nv = nova_client.Client() + self.useFixture(mockpatch.PatchObject( + self.nv.nova_client.flavors, 'get', + side_effect=self.fake_flavors_get)) + self.useFixture(mockpatch.PatchObject( + self.nv.nova_client.images, 'get', + side_effect=self.fake_images_get)) + self.CONF = self.useFixture(fixture_config.Config()).conf + + def fake_flavors_get(self, *args, **kwargs): + self._flavors_count += 1 + a = mock.MagicMock() + a.id = args[0] + if a.id == 1: + a.name = 'm1.tiny' + elif a.id == 2: + a.name = 'm1.large' + else: + raise novaclient.exceptions.NotFound('foobar') + return a + + def fake_images_get(self, *args, **kwargs): + self._images_count += 1 + a = mock.MagicMock() + a.id = args[0] + image_details = { + 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), + 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), + 3: ('rhel-6-x64', None), + 4: ('rhel-6-x64', dict()), + 5: ('rhel-6-x64', dict(kernel_id=11)), + 6: ('rhel-6-x64', dict(ramdisk_id=21)) + } + + if a.id in image_details: + a.name = image_details[a.id][0] + a.metadata = image_details[a.id][1] + else: + raise novaclient.exceptions.NotFound('foobar') + + return a + + @staticmethod + def fake_servers_list(*args, **kwargs): + a = mock.MagicMock() + a.id = 42 + a.flavor = {'id': 1} + a.image = {'id': 1} + b = mock.MagicMock() + b.id = 43 + b.flavor = {'id': 2} + b.image = {'id': 2} + return [a, b] + + def test_instance_get_all_by_host(self): + with mock.patch.object(self.nv.nova_client.servers, 'list', + side_effect=self.fake_servers_list): + instances = self.nv.instance_get_all_by_host('foobar') + + self.assertEqual(2, len(instances)) + self.assertEqual('m1.tiny', instances[0].flavor['name']) + self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) + self.assertEqual(11, instances[0].kernel_id) + self.assertEqual(21, instances[0].ramdisk_id) + + def test_instance_get_all(self): + with mock.patch.object(self.nv.nova_client.servers, 'list', + side_effect=self.fake_servers_list): + instances = self.nv.instance_get_all() + + self.assertEqual(2, len(instances)) + self.assertEqual(42, instances[0].id) + self.assertEqual(1, instances[0].flavor['id']) + self.assertEqual(1, instances[0].image['id']) + + @staticmethod + def fake_servers_list_unknown_flavor(*args, **kwargs): + a = mock.MagicMock() + a.id = 42 + a.flavor = {'id': 666} + a.image = {'id': 1} + return [a] + + def test_instance_get_all_by_host_unknown_flavor(self): + with mock.patch.object( + self.nv.nova_client.servers, 'list', + side_effect=self.fake_servers_list_unknown_flavor): + instances = self.nv.instance_get_all_by_host('foobar') + + self.assertEqual(1, len(instances)) + self.assertEqual('unknown-id-666', instances[0].flavor['name']) + + @staticmethod + def fake_servers_list_unknown_image(*args, **kwargs): + a = mock.MagicMock() + a.id = 42 + a.flavor = {'id': 1} + a.image = {'id': 666} + return [a] + + @staticmethod + def fake_servers_list_image_missing_metadata(*args, **kwargs): + a = mock.MagicMock() + a.id = 42 + a.flavor = {'id': 1} + a.image = {'id': args[0]} + return [a] + + @staticmethod + def fake_instance_image_missing(*args, **kwargs): + a = mock.MagicMock() + a.id = 42 + a.flavor = {'id': 666} + a.image = None + return [a] + + def test_instance_get_all_by_host_unknown_image(self): + with mock.patch.object( + self.nv.nova_client.servers, 'list', + side_effect=self.fake_servers_list_unknown_image): + instances = self.nv.instance_get_all_by_host('foobar') + + self.assertEqual(1, len(instances)) + self.assertEqual('unknown-id-666', instances[0].image['name']) + + def test_with_flavor_and_image(self): + results = self.nv._with_flavor_and_image(self.fake_servers_list()) + instance = results[0] + self.assertEqual(2, len(results)) + self.assertEqual('ubuntu-12.04-x86', instance.image['name']) + self.assertEqual('m1.tiny', instance.flavor['name']) + self.assertEqual(11, instance.kernel_id) + self.assertEqual(21, instance.ramdisk_id) + + def test_with_flavor_and_image_unknown_image(self): + instances = self.fake_servers_list_unknown_image() + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertEqual('unknown-id-666', instance.image['name']) + self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') + self.assertIsNone(instance.kernel_id) + self.assertIsNone(instance.ramdisk_id) + + def test_with_flavor_and_image_unknown_flavor(self): + instances = self.fake_servers_list_unknown_flavor() + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertEqual('unknown-id-666', instance.flavor['name']) + self.assertEqual(0, instance.flavor['vcpus']) + self.assertEqual(0, instance.flavor['ram']) + self.assertEqual(0, instance.flavor['disk']) + self.assertNotEqual(instance.image['name'], 'unknown-id-666') + self.assertEqual(11, instance.kernel_id) + self.assertEqual(21, instance.ramdisk_id) + + def test_with_flavor_and_image_none_metadata(self): + instances = self.fake_servers_list_image_missing_metadata(3) + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertIsNone(instance.kernel_id) + self.assertIsNone(instance.ramdisk_id) + + def test_with_flavor_and_image_missing_metadata(self): + instances = self.fake_servers_list_image_missing_metadata(4) + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertIsNone(instance.kernel_id) + self.assertIsNone(instance.ramdisk_id) + + def test_with_flavor_and_image_missing_ramdisk(self): + instances = self.fake_servers_list_image_missing_metadata(5) + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertEqual(11, instance.kernel_id) + self.assertIsNone(instance.ramdisk_id) + + def test_with_flavor_and_image_missing_kernel(self): + instances = self.fake_servers_list_image_missing_metadata(6) + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertIsNone(instance.kernel_id) + self.assertEqual(21, instance.ramdisk_id) + + def test_with_flavor_and_image_no_cache(self): + results = self.nv._with_flavor_and_image(self.fake_servers_list()) + self.assertEqual(2, len(results)) + self.assertEqual(2, self._flavors_count) + self.assertEqual(2, self._images_count) + + def test_with_flavor_and_image_cache(self): + results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) + self.assertEqual(4, len(results)) + self.assertEqual(2, self._flavors_count) + self.assertEqual(2, self._images_count) + + def test_with_flavor_and_image_unknown_image_cache(self): + instances = self.fake_servers_list_unknown_image() + results = self.nv._with_flavor_and_image(instances * 2) + self.assertEqual(2, len(results)) + self.assertEqual(1, self._flavors_count) + self.assertEqual(1, self._images_count) + for instance in results: + self.assertEqual('unknown-id-666', instance.image['name']) + self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') + self.assertIsNone(instance.kernel_id) + self.assertIsNone(instance.ramdisk_id) + + def test_with_missing_image_instance(self): + instances = self.fake_instance_image_missing() + results = self.nv._with_flavor_and_image(instances) + instance = results[0] + self.assertIsNone(instance.kernel_id) + self.assertIsNone(instance.image) + self.assertIsNone(instance.ramdisk_id) + + def test_with_nova_http_log_debug(self): + self.CONF.set_override("nova_http_log_debug", True) + self.nv = nova_client.Client() + self.assertTrue(self.nv.nova_client.client.http_log_debug) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_sample.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_sample.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_sample.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_sample.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for ceilometer/sample.py""" + +import datetime + +from ceilometer import sample +from ceilometer.tests import base + + +class TestSample(base.BaseTestCase): + SAMPLE = sample.Sample( + name='cpu', + type=sample.TYPE_CUMULATIVE, + unit='ns', + volume='1234567', + user_id='56c5692032f34041900342503fecab30', + project_id='ac9494df2d9d4e709bac378cceabaf23', + resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', + timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), + resource_metadata={} + ) + + def test_sample_string_format(self): + expected = ('') + self.assertEqual(expected, str(self.SAMPLE)) + + def test_sample_from_notifications_list(self): + msg = { + 'event_type': u'sample.create', + 'timestamp': u'2015-06-1909: 19: 35.786893', + 'payload': [{u'counter_name': u'instance100'}], + 'priority': 'info', + 'publisher_id': u'ceilometer.api', + 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' + } + s = sample.Sample.from_notification( + 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) + expected = {'event_type': msg['event_type'], + 'host': msg['publisher_id']} + self.assertEqual(expected, s.resource_metadata) + + def test_sample_from_notifications_dict(self): + msg = { + 'event_type': u'sample.create', + 'timestamp': u'2015-06-1909: 19: 35.786893', + 'payload': {u'counter_name': u'instance100'}, + 'priority': 'info', + 'publisher_id': u'ceilometer.api', + 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' + } + s = sample.Sample.from_notification( + 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) + msg['payload']['event_type'] = msg['event_type'] + msg['payload']['host'] = msg['publisher_id'] + self.assertEqual(msg['payload'], s.resource_metadata) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/unit/test_utils.py ceilometer-5.0.0~b3/ceilometer/tests/unit/test_utils.py --- ceilometer-5.0.0~b2/ceilometer/tests/unit/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/unit/test_utils.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,180 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/utils.py +""" +import datetime +import decimal + +from oslotest import base + +from ceilometer import utils + + +class TestUtils(base.BaseTestCase): + + def test_datetime_to_decimal(self): + expected = 1356093296.12 + utc_datetime = datetime.datetime.utcfromtimestamp(expected) + actual = utils.dt_to_decimal(utc_datetime) + self.assertAlmostEqual(expected, float(actual), places=5) + + def test_decimal_to_datetime(self): + expected = 1356093296.12 + dexpected = decimal.Decimal(str(expected)) # Python 2.6 wants str() + expected_datetime = datetime.datetime.utcfromtimestamp(expected) + actual_datetime = utils.decimal_to_dt(dexpected) + # Python 3 have rounding issue on this, so use float + self.assertAlmostEqual(utils.dt_to_decimal(expected_datetime), + utils.dt_to_decimal(actual_datetime), + places=5) + + def test_recursive_keypairs(self): + data = {'a': 'A', 'b': 'B', + 'nested': {'a': 'A', 'b': 'B'}} + pairs = list(utils.recursive_keypairs(data)) + self.assertEqual([('a', 'A'), ('b', 'B'), + ('nested:a', 'A'), ('nested:b', 'B')], + pairs) + + def test_recursive_keypairs_with_separator(self): + data = {'a': 'A', + 'b': 'B', + 'nested': {'a': 'A', + 'b': 'B', + }, + } + separator = '.' + pairs = list(utils.recursive_keypairs(data, separator)) + self.assertEqual([('a', 'A'), + ('b', 'B'), + ('nested.a', 'A'), + ('nested.b', 'B')], + pairs) + + def test_recursive_keypairs_with_list_of_dict(self): + small = 1 + big = 1 << 64 + expected = [('a', 'A'), + ('b', 'B'), + ('nested:list', [{small: 99, big: 42}])] + data = {'a': 'A', + 'b': 'B', + 'nested': {'list': [{small: 99, big: 42}]}} + pairs = list(utils.recursive_keypairs(data)) + self.assertEqual(len(expected), len(pairs)) + for k, v in pairs: + # the keys 1 and 1<<64 cause a hash collision on 64bit platforms + if k == 'nested:list': + self.assertIn(v, + [[{small: 99, big: 42}], + [{big: 42, small: 99}]]) + else: + self.assertIn((k, v), expected) + + def test_restore_nesting_unested(self): + metadata = {'a': 'A', 'b': 'B'} + unwound = utils.restore_nesting(metadata) + self.assertIs(metadata, unwound) + + def test_restore_nesting(self): + metadata = {'a': 'A', 'b': 'B', + 'nested:a': 'A', + 'nested:b': 'B', + 'nested:twice:c': 'C', + 'nested:twice:d': 'D', + 'embedded:e': 'E'} + unwound = utils.restore_nesting(metadata) + expected = {'a': 'A', 'b': 'B', + 'nested': {'a': 'A', 'b': 'B', + 'twice': {'c': 'C', 'd': 'D'}}, + 'embedded': {'e': 'E'}} + self.assertEqual(expected, unwound) + self.assertIsNot(metadata, unwound) + + def test_restore_nesting_with_separator(self): + metadata = {'a': 'A', 'b': 'B', + 'nested.a': 'A', + 'nested.b': 'B', + 'nested.twice.c': 'C', + 'nested.twice.d': 'D', + 'embedded.e': 'E'} + unwound = utils.restore_nesting(metadata, separator='.') + expected = {'a': 'A', 'b': 'B', + 'nested': {'a': 'A', 'b': 'B', + 'twice': {'c': 'C', 'd': 'D'}}, + 'embedded': {'e': 'E'}} + self.assertEqual(expected, unwound) + self.assertIsNot(metadata, unwound) + + def test_decimal_to_dt_with_none_parameter(self): + self.assertIsNone(utils.decimal_to_dt(None)) + + def test_dict_to_kv(self): + data = {'a': 'A', + 'b': 'B', + 'nested': {'a': 'A', + 'b': 'B', + }, + 'nested2': [{'c': 'A'}, {'c': 'B'}] + } + pairs = list(utils.dict_to_keyval(data)) + self.assertEqual([('a', 'A'), + ('b', 'B'), + ('nested.a', 'A'), + ('nested.b', 'B'), + ('nested2[0].c', 'A'), + ('nested2[1].c', 'B')], + sorted(pairs, key=lambda x: x[0])) + + def test_hash_of_set(self): + x = ['a', 'b'] + y = ['a', 'b', 'a'] + z = ['a', 'c'] + self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) + self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) + self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) + + def test_hash_ring(self): + num_nodes = 10 + num_keys = 1000 + + nodes = [str(x) for x in range(num_nodes)] + hr = utils.HashRing(nodes) + + buckets = [0] * num_nodes + assignments = [-1] * num_keys + for k in range(num_keys): + n = int(hr.get_node(str(k))) + self.assertTrue(0 <= n <= num_nodes) + buckets[n] += 1 + assignments[k] = n + + # at least something in each bucket + self.assertTrue(all((c > 0 for c in buckets))) + + # approximately even distribution + diff = max(buckets) - min(buckets) + self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) + + # consistency + num_nodes += 1 + nodes.append(str(num_nodes + 1)) + hr = utils.HashRing(nodes) + for k in range(num_keys): + n = int(hr.get_node(str(k))) + assignments[k] -= n + reassigned = len([c for c in assignments if c != 0]) + self.assertTrue(reassigned < num_keys / num_nodes) diff -Nru ceilometer-5.0.0~b2/ceilometer/tests/volume/test_notifications.py ceilometer-5.0.0~b3/ceilometer/tests/volume/test_notifications.py --- ceilometer-5.0.0~b2/ceilometer/tests/volume/test_notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/tests/volume/test_notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,345 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslotest import base - -from ceilometer import sample -from ceilometer.volume import notifications - - -def fake_uuid(x): - return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12) - - -NOW = datetime.datetime.isoformat(datetime.datetime.utcnow()) - -VOLUME_META = {u'status': u'exists', - u'instance_uuid': None, - u'user_id': u'bcb7746c7a41472d88a1ffac89ba6a9b', - u'availability_zone': u'nova', - u'tenant_id': u'7ffe17a15c724e2aa79fc839540aec15', - u'created_at': u'2014-10-28 09:31:20', - u'volume_id': fake_uuid('c'), - u'volume_type': u'3a9a398b-7e3b-40da-b09e-2115ad8cd68b', - u'replication_extended_status': None, - u'host': u'volumes.example.com', - u'snapshot_id': None, - u'replication_status': u'disabled', - u'size': 1, - u'display_name': u'2'} - -NOTIFICATION_VOLUME_EXISTS = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.exists", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_CREATE_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.create.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_CREATE_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.create.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_DELETE_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.delete.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_DELETE_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.delete.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_RESIZE_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.resize.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_RESIZE_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.resize.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_ATTACH_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.attach.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_ATTACH_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.attach.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_DETACH_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.detach.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_DETACH_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.detach.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_UPDATE_START = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.update.start", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -NOTIFICATION_VOLUME_UPDATE_END = { - "message_id": "0c65cb9c-018c-11e2-bc91-5453ed1bbb5f", - "publisher_id": "volumes.example.com", - "event_type": "volume.update.end", - "priority": "info", - "payload": VOLUME_META, - "timestamp": NOW} - -SNAPSHOT_META = {u'status': u'creating', - u'user_id': u'bcb7746c7a41472d88a1ffac89ba6a9b', - u'availability_zone': u'nova', - u'deleted': u'', - u'tenant_id': u'7ffe17a15c724e2aa79fc839540aec15', - u'created_at': u'2014-10-28 09:49:07', - u'snapshot_id': fake_uuid('c'), - u'volume_size': 1, - u'volume_id': u'2925bb3b-2b51-496a-bb6e-01a20e950e07', - u'display_name': u'11'} - -NOTIFICATION_SNAPSHOT_EXISTS = { - "message_id": "1d2944f9-f8e9-4b2b-8df1-465f759a63e8", - "publisher_id": "snapshots.example.com", - "event_type": "snapshot.exists", - "priority": "info", - "payload": SNAPSHOT_META, - "timestamp": NOW} - -NOTIFICATION_SNAPSHOT_CREATE_START = { - "message_id": "1d2944f9-f8e9-4b2b-8df1-465f759a63e8", - "publisher_id": "snapshots.example.com", - "event_type": "snapshot.create.start", - "priority": "info", - "payload": SNAPSHOT_META, - "timestamp": NOW} - -NOTIFICATION_SNAPSHOT_CREATE_END = { - "message_id": "1d2944f9-f8e9-4b2b-8df1-465f759a63e8", - "publisher_id": "snapshots.example.com", - "event_type": "snapshot.create.end", - "priority": "info", - "payload": SNAPSHOT_META, - "timestamp": NOW} - -NOTIFICATION_SNAPSHOT_DELETE_START = { - "message_id": "1d2944f9-f8e9-4b2b-8df1-465f759a63e8", - "publisher_id": "snapshots.example.com", - "event_type": "snapshot.delete.start", - "priority": "info", - "payload": SNAPSHOT_META, - "timestamp": NOW} - -NOTIFICATION_SNAPSHOT_DELETE_END = { - "message_id": "1d2944f9-f8e9-4b2b-8df1-465f759a63e8", - "publisher_id": "snapshots.example.com", - "event_type": "snapshot.delete.end", - "priority": "info", - "payload": SNAPSHOT_META, - "timestamp": NOW} - - -class TestNotifications(base.BaseTestCase): - - def setUp(self): - super(TestNotifications, self).setUp() - self.host = None - self.handler_crud = None - self.handler = None - self.handler_size = None - self.name = None - self.name_size = None - self.size = None - - def _verify_common_counter(self, c, name, volume): - self.assertIsNotNone(c) - self.assertEqual(name, c.name) - self.assertEqual(fake_uuid('c'), c.resource_id) - self.assertEqual(NOW, c.timestamp) - self.assertEqual(volume, c.volume) - metadata = c.resource_metadata - self.assertEqual(self.host, metadata.get('host')) - - def _check_crud(self, notification_type, notification_name): - counters = list(self.handler_crud.process_notification( - notification_type)) - self.assertEqual(1, len(counters)) - notification = counters[0] - self._verify_common_counter( - notification, notification_name, 1) - self.assertEqual(sample.TYPE_DELTA, notification.type) - - def _check(self, notification_type): - counters = list(self.handler.process_notification(notification_type)) - self.assertEqual(1, len(counters)) - notification = counters[0] - self._verify_common_counter(notification, self.name, 1) - self.assertEqual(sample.TYPE_GAUGE, notification.type) - - def _check_size(self, notification_type): - counters = list(self.handler_size.process_notification( - notification_type)) - self.assertEqual(1, len(counters)) - notification = counters[0] - self._verify_common_counter( - notification, self.name_size, self.size) - self.assertEqual(sample.TYPE_GAUGE, notification.type) - - -class TestVolumeNotifications(TestNotifications): - - def setUp(self): - super(TestVolumeNotifications, self).setUp() - self.host = 'volumes.example.com' - self.handler_crud = notifications.VolumeCRUD(mock.Mock()) - self.handler = notifications.Volume(mock.Mock()) - self.handler_size = notifications.VolumeSize(mock.Mock()) - self.name = 'volume' - self.name_size = 'volume.size' - self.size = VOLUME_META['size'] - - def test_volume_notifications(self): - self._check_crud( - NOTIFICATION_VOLUME_EXISTS, 'volume.exists') - self._check_crud( - NOTIFICATION_VOLUME_CREATE_START, 'volume.create.start') - self._check_crud( - NOTIFICATION_VOLUME_CREATE_END, 'volume.create.end') - self._check_crud( - NOTIFICATION_VOLUME_DELETE_START, 'volume.delete.start') - self._check_crud( - NOTIFICATION_VOLUME_DELETE_END, 'volume.delete.end') - self._check_crud( - NOTIFICATION_VOLUME_RESIZE_START, 'volume.resize.start') - self._check_crud( - NOTIFICATION_VOLUME_RESIZE_END, 'volume.resize.end') - self._check_crud( - NOTIFICATION_VOLUME_ATTACH_START, 'volume.attach.start') - self._check_crud( - NOTIFICATION_VOLUME_ATTACH_END, 'volume.attach.end') - self._check_crud( - NOTIFICATION_VOLUME_DETACH_START, 'volume.detach.start') - self._check_crud( - NOTIFICATION_VOLUME_DETACH_END, 'volume.detach.end') - self._check_crud( - NOTIFICATION_VOLUME_UPDATE_START, 'volume.update.start') - self._check_crud( - NOTIFICATION_VOLUME_UPDATE_END, 'volume.update.end') - self._check(NOTIFICATION_VOLUME_EXISTS) - self._check(NOTIFICATION_VOLUME_CREATE_START) - self._check(NOTIFICATION_VOLUME_CREATE_END) - self._check(NOTIFICATION_VOLUME_DELETE_START) - self._check(NOTIFICATION_VOLUME_DELETE_END) - self._check(NOTIFICATION_VOLUME_RESIZE_START) - self._check(NOTIFICATION_VOLUME_RESIZE_END) - self._check(NOTIFICATION_VOLUME_ATTACH_START) - self._check(NOTIFICATION_VOLUME_ATTACH_END) - self._check(NOTIFICATION_VOLUME_DETACH_START) - self._check(NOTIFICATION_VOLUME_DETACH_END) - self._check(NOTIFICATION_VOLUME_UPDATE_START) - self._check(NOTIFICATION_VOLUME_UPDATE_END) - self._check_size(NOTIFICATION_VOLUME_EXISTS) - self._check_size(NOTIFICATION_VOLUME_CREATE_START) - self._check_size(NOTIFICATION_VOLUME_CREATE_END) - self._check_size(NOTIFICATION_VOLUME_DELETE_START) - self._check_size(NOTIFICATION_VOLUME_DELETE_END) - self._check_size(NOTIFICATION_VOLUME_RESIZE_START) - self._check_size(NOTIFICATION_VOLUME_RESIZE_END) - self._check_size(NOTIFICATION_VOLUME_ATTACH_START) - self._check_size(NOTIFICATION_VOLUME_ATTACH_END) - self._check_size(NOTIFICATION_VOLUME_DETACH_START) - self._check_size(NOTIFICATION_VOLUME_DETACH_END) - self._check_size(NOTIFICATION_VOLUME_UPDATE_START) - self._check_size(NOTIFICATION_VOLUME_UPDATE_END) - - -class TestSnapshotNotifications(TestNotifications): - - def setUp(self): - super(TestSnapshotNotifications, self).setUp() - self.host = 'snapshots.example.com' - self.handler_crud = notifications.SnapshotCRUD(mock.Mock()) - self.handler = notifications.Snapshot(mock.Mock()) - self.handler_size = notifications.SnapshotSize(mock.Mock()) - self.name = 'snapshot' - self.name_size = 'snapshot.size' - self.size = SNAPSHOT_META['volume_size'] - - def test_snapshot_notifications(self): - self._check_crud( - NOTIFICATION_SNAPSHOT_EXISTS, 'snapshot.exists') - self._check_crud( - NOTIFICATION_SNAPSHOT_CREATE_START, 'snapshot.create.start') - self._check_crud( - NOTIFICATION_SNAPSHOT_CREATE_END, 'snapshot.create.end') - self._check_crud( - NOTIFICATION_SNAPSHOT_DELETE_START, 'snapshot.delete.start') - self._check_crud( - NOTIFICATION_SNAPSHOT_DELETE_END, 'snapshot.delete.end') - self._check(NOTIFICATION_SNAPSHOT_EXISTS) - self._check(NOTIFICATION_SNAPSHOT_CREATE_START) - self._check(NOTIFICATION_SNAPSHOT_CREATE_END) - self._check(NOTIFICATION_SNAPSHOT_DELETE_START) - self._check(NOTIFICATION_SNAPSHOT_DELETE_END) - self._check_size(NOTIFICATION_SNAPSHOT_EXISTS) - self._check_size(NOTIFICATION_SNAPSHOT_CREATE_START) - self._check_size(NOTIFICATION_SNAPSHOT_CREATE_END) - self._check_size(NOTIFICATION_SNAPSHOT_DELETE_START) - self._check_size(NOTIFICATION_SNAPSHOT_DELETE_END) diff -Nru ceilometer-5.0.0~b2/ceilometer/transformer/accumulator.py ceilometer-5.0.0~b3/ceilometer/transformer/accumulator.py --- ceilometer-5.0.0~b2/ceilometer/transformer/accumulator.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/transformer/accumulator.py 2015-09-03 13:05:55.000000000 +0000 @@ -22,6 +22,8 @@ And then flushes them out into the wild. """ + grouping_keys = ['resource_id'] + def __init__(self, size=1, **kwargs): if size >= 1: self.samples = [] diff -Nru ceilometer-5.0.0~b2/ceilometer/transformer/arithmetic.py ceilometer-5.0.0~b3/ceilometer/transformer/arithmetic.py --- ceilometer-5.0.0~b2/ceilometer/transformer/arithmetic.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/transformer/arithmetic.py 2015-09-03 13:05:55.000000000 +0000 @@ -35,6 +35,8 @@ over one or more meters and/or their metadata. """ + grouping_keys = ['resource_id'] + meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') def __init__(self, target=None, **kwargs): diff -Nru ceilometer-5.0.0~b2/ceilometer/transformer/conversions.py ceilometer-5.0.0~b3/ceilometer/transformer/conversions.py --- ceilometer-5.0.0~b2/ceilometer/transformer/conversions.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/transformer/conversions.py 2015-09-03 13:05:55.000000000 +0000 @@ -30,6 +30,8 @@ class ScalingTransformer(transformer.TransformerBase): """Transformer to apply a scaling conversion.""" + grouping_keys = ['resource_id'] + def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. @@ -43,10 +45,9 @@ self.source = source self.target = target self.scale = target.get('scale') - LOG.debug(_('scaling conversion transformer with source:' - ' %(source)s target: %(target)s:') - % {'source': source, - 'target': target}) + LOG.debug('scaling conversion transformer with source:' + ' %(source)s target: %(target)s:', {'source': source, + 'target': target}) super(ScalingTransformer, self).__init__(**kwargs) def _scale(self, s): @@ -89,10 +90,10 @@ def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" - LOG.debug(_('handling sample %s'), (s,)) + LOG.debug('handling sample %s', s) if self.source.get('unit', s.unit) == s.unit: s = self._convert(s) - LOG.debug(_('converted to: %s'), (s,)) + LOG.debug('converted to: %s', s) return s @@ -111,7 +112,7 @@ def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" - LOG.debug(_('handling sample %s'), (s,)) + LOG.debug('handling sample %s', s) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) @@ -121,9 +122,16 @@ prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) - # we only allow negative deltas for noncumulative samples, whereas - # for cumulative we assume that a reset has occurred in the interim - # so that the current volume gives a lower bound on growth + # disallow violations of the arrow of time + if time_delta < 0: + LOG.warn(_('dropping out of time order sample: %s'), (s,)) + # Reset the cache to the newer sample. + self.cache[key] = prev + return None + # we only allow negative volume deltas for noncumulative + # samples, whereas for cumulative we assume that a reset has + # occurred in the interim so that the current volume gives a + # lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) @@ -132,7 +140,7 @@ if time_delta else 0.0) s = self._convert(s, rate_of_change) - LOG.debug(_('converted to: %s'), (s,)) + LOG.debug('converted to: %s', s) else: LOG.warn(_('dropping sample with no predecessor: %s'), (s,)) diff -Nru ceilometer-5.0.0~b2/ceilometer/transformer/__init__.py ceilometer-5.0.0~b3/ceilometer/transformer/__init__.py --- ceilometer-5.0.0~b2/ceilometer/transformer/__init__.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/transformer/__init__.py 2015-09-03 13:05:55.000000000 +0000 @@ -43,6 +43,10 @@ :param sample: A sample. """ + @abc.abstractproperty + def grouping_keys(self): + """Keys used to group transformer.""" + def flush(self, context): """Flush samples cached previously. diff -Nru ceilometer-5.0.0~b2/ceilometer/utils.py ceilometer-5.0.0~b3/ceilometer/utils.py --- ceilometer-5.0.0~b2/ceilometer/utils.py 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/utils.py 2015-09-03 13:05:55.000000000 +0000 @@ -24,7 +24,6 @@ import datetime import decimal import hashlib -import multiprocessing import struct from oslo_concurrency import processutils @@ -73,8 +72,10 @@ # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] - elif isinstance(input, six.text_type): + elif six.PY2 and isinstance(input, six.text_type): return input.encode('utf-8') + elif six.PY3 and isinstance(input, six.binary_type): + return input.decode('utf-8') else: return input @@ -200,13 +201,6 @@ return dict_to_update -def cpu_count(): - try: - return multiprocessing.cpu_count() or 1 - except NotImplementedError: - return 1 - - def uniq(dupes, attrs): """Exclude elements of dupes with a duplicated set of attribute values.""" key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs]) diff -Nru ceilometer-5.0.0~b2/ceilometer/version.py ceilometer-5.0.0~b3/ceilometer/version.py --- ceilometer-5.0.0~b2/ceilometer/version.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/version.py 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,17 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import pbr.version + +version_info = pbr.version.VersionInfo('ceilometer') diff -Nru ceilometer-5.0.0~b2/ceilometer/volume/notifications.py ceilometer-5.0.0~b3/ceilometer/volume/notifications.py --- ceilometer-5.0.0~b2/ceilometer/volume/notifications.py 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer/volume/notifications.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing volume counter messages from cinder notification -events. -""" - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('cinder_control_exchange', - default='cinder', - help="Exchange name for Cinder notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class VolumeBase(plugin_base.NotificationBase): - """Convert volume/snapshot notification into Counters.""" - - @staticmethod - def get_targets(conf): - """Return a sequence of oslo.messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.cinder_control_exchange) - for topic in conf.notification_topics] - - -class VolumeCRUDBase(VolumeBase): - """Convert volume notifications into Counters.""" - - event_types = [ - 'volume.exists', - 'volume.create.*', - 'volume.delete.*', - 'volume.resize.*', - 'volume.attach.*', - 'volume.detach.*', - 'volume.update.*' - ] - - -class VolumeCRUD(VolumeCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - unit='volume', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['volume_id'], - message=message) - - -class Volume(VolumeCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='volume', - type=sample.TYPE_GAUGE, - unit='volume', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['volume_id'], - message=message) - - -class VolumeSize(VolumeCRUDBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='volume.size', - type=sample.TYPE_GAUGE, - unit='GB', - volume=message['payload']['size'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['volume_id'], - message=message) - - -class SnapshotCRUDBase(VolumeBase): - """Convert snapshot notifications into Counters.""" - - event_types = [ - 'snapshot.exists', - 'snapshot.create.*', - 'snapshot.delete.*', - ] - - -class SnapshotCRUD(SnapshotCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - unit='snapshot', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['snapshot_id'], - message=message) - - -class Snapshot(SnapshotCRUDBase, plugin_base.NonMetricNotificationBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='snapshot', - type=sample.TYPE_GAUGE, - unit='snapshot', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['snapshot_id'], - message=message) - - -class SnapshotSize(SnapshotCRUDBase): - def process_notification(self, message): - yield sample.Sample.from_notification( - name='snapshot.size', - type=sample.TYPE_GAUGE, - unit='GB', - volume=message['payload']['volume_size'], - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['snapshot_id'], - message=message) diff -Nru ceilometer-5.0.0~b2/ceilometer.egg-info/entry_points.txt ceilometer-5.0.0~b3/ceilometer.egg-info/entry_points.txt --- ceilometer-5.0.0~b2/ceilometer.egg-info/entry_points.txt 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer.egg-info/entry_points.txt 2015-09-03 13:09:31.000000000 +0000 @@ -90,42 +90,18 @@ [ceilometer.notification] _sample = ceilometer.telemetry.notifications:TelemetryApiPost -authenticate = ceilometer.identity.notifications:Authenticate -bandwidth = ceilometer.network.notifications:Bandwidth -cpu_frequency = ceilometer.compute.notifications.cpu:CpuFrequency -cpu_idle_percent = ceilometer.compute.notifications.cpu:CpuIdlePercent -cpu_idle_time = ceilometer.compute.notifications.cpu:CpuIdleTime -cpu_iowait_percent = ceilometer.compute.notifications.cpu:CpuIowaitPercent -cpu_iowait_time = ceilometer.compute.notifications.cpu:CpuIowaitTime -cpu_kernel_percent = ceilometer.compute.notifications.cpu:CpuKernelPercent -cpu_kernel_time = ceilometer.compute.notifications.cpu:CpuKernelTime -cpu_percent = ceilometer.compute.notifications.cpu:CpuPercent -cpu_user_percent = ceilometer.compute.notifications.cpu:CpuUserPercent -cpu_user_time = ceilometer.compute.notifications.cpu:CpuUserTime -data_processing = ceilometer.data_processing.notifications:DataProcessing -disk_ephemeral_size = ceilometer.compute.notifications.instance:EphemeralDiskSize -disk_root_size = ceilometer.compute.notifications.instance:RootDiskSize dns.domain.exists = ceilometer.dns.notifications:DomainExists floatingip = ceilometer.network.notifications:FloatingIP -group = ceilometer.identity.notifications:Group hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse -image = ceilometer.image.notifications:Image -image_crud = ceilometer.image.notifications:ImageCRUD -image_download = ceilometer.image.notifications:ImageDownload -image_serve = ceilometer.image.notifications:ImageServe -image_size = ceilometer.image.notifications:ImageSize instance = ceilometer.compute.notifications.instance:Instance instance_delete = ceilometer.compute.notifications.instance:InstanceDelete -instance_flavor = ceilometer.compute.notifications.instance:InstanceFlavor instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled -magnetodb_index_count = ceilometer.key_value_storage.notifications:Index -magnetodb_table = ceilometer.key_value_storage.notifications:Table -memory = ceilometer.compute.notifications.instance:Memory +meter = ceilometer.meter.notifications:ProcessMeterNotifications network = ceilometer.network.notifications:Network network.services.firewall = ceilometer.network.notifications:Firewall network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy @@ -138,26 +114,10 @@ network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy -objectstore.request = ceilometer.objectstore.notifications:SwiftWsgiMiddleware -objectstore.request.meters = ceilometer.objectstore.notifications:SwiftWsgiMiddlewareMeters port = ceilometer.network.notifications:Port -profiler = ceilometer.profiler.notifications:ProfilerNotifications -project = ceilometer.identity.notifications:Project -role = ceilometer.identity.notifications:Role -role_assignment = ceilometer.identity.notifications:RoleAssignment router = ceilometer.network.notifications:Router -snapshot = ceilometer.volume.notifications:Snapshot -snapshot_crud = ceilometer.volume.notifications:SnapshotCRUD -snapshot_size = ceilometer.volume.notifications:SnapshotSize -stack_crud = ceilometer.orchestration.notifications:StackCRUD subnet = ceilometer.network.notifications:Subnet trove.instance.exists = ceilometer.database.notifications:InstanceExists -trust = ceilometer.identity.notifications:Trust -user = ceilometer.identity.notifications:User -vcpus = ceilometer.compute.notifications.instance:VCpus -volume = ceilometer.volume.notifications:Volume -volume_crud = ceilometer.volume.notifications:VolumeCRUD -volume_size = ceilometer.volume.notifications:VolumeSize [ceilometer.poll.central] energy = ceilometer.energy.kwapi:EnergyPollster @@ -261,7 +221,6 @@ disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster -instance_flavor = ceilometer.compute.pollsters.instance:InstanceFlavorPollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster diff -Nru ceilometer-5.0.0~b2/ceilometer.egg-info/pbr.json ceilometer-5.0.0~b3/ceilometer.egg-info/pbr.json --- ceilometer-5.0.0~b2/ceilometer.egg-info/pbr.json 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer.egg-info/pbr.json 2015-09-03 13:09:31.000000000 +0000 @@ -1 +1 @@ -{"is_release": true, "git_version": "29d3cd4"} \ No newline at end of file +{"is_release": true, "git_version": "6c3c5a1"} \ No newline at end of file diff -Nru ceilometer-5.0.0~b2/ceilometer.egg-info/PKG-INFO ceilometer-5.0.0~b3/ceilometer.egg-info/PKG-INFO --- ceilometer-5.0.0~b2/ceilometer.egg-info/PKG-INFO 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer.egg-info/PKG-INFO 2015-09-03 13:09:31.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: ceilometer -Version: 5.0.0.0b2 +Version: 5.0.0.0b3 Summary: OpenStack Telemetry Home-page: http://www.openstack.org/ Author: OpenStack diff -Nru ceilometer-5.0.0~b2/ceilometer.egg-info/requires.txt ceilometer-5.0.0~b3/ceilometer.egg-info/requires.txt --- ceilometer-5.0.0~b2/ceilometer.egg-info/requires.txt 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer.egg-info/requires.txt 2015-09-03 13:09:31.000000000 +0000 @@ -1,35 +1,35 @@ -retrying!=1.3.0,>=1.2.3 # Apache-2.0 -alembic>=0.7.2 -croniter>=0.3.4 # MIT License +retrying!=1.3.0,>=1.2.3 +croniter>=0.3.4 eventlet>=0.17.4 -jsonpath-rw<2.0,>=1.2.0 +jsonpath-rw-ext>=0.1.7 jsonschema!=2.5.0,<3.0.0,>=2.0.0 -kafka-python>=0.9.2 # Apache-2.0 +kafka-python>=0.9.2 keystonemiddleware>=2.0.0 lxml>=2.3 msgpack-python>=0.4.0 -oslo.context>=0.2.0 # Apache-2.0 -oslo.db>=1.12.0 # Apache-2.0 -oslo.concurrency>=2.1.0 # Apache-2.0 -oslo.config>=1.11.0 # Apache-2.0 -oslo.i18n>=1.5.0 # Apache-2.0 -oslo.log>=1.6.0 # Apache-2.0 -oslo.policy>=0.5.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.service>=0.1.0 # Apache-2.0 +oslo.context>=0.2.0 +oslo.db>=2.4.1 +oslo.concurrency>=2.3.0 +oslo.config>=2.3.0 +oslo.i18n>=1.5.0 +oslo.log>=1.8.0 +oslo.policy>=0.5.0 +oslo.reports>=0.1.0 +oslo.rootwrap>=2.0.0 +oslo.service>=0.7.0 PasteDeploy>=1.5.0 -pbr<2.0,>=1.3 -pecan>=0.8.0 -oslo.messaging!=1.17.0,!=1.17.1,>=1.16.0 # Apache-2.0 -oslo.middleware>=2.4.0 # Apache-2.0 -oslo.serialization>=1.4.0 # Apache-2.0 -oslo.utils>=1.9.0 # Apache-2.0 +pbr<2.0,>=1.6 +pecan>=1.0.0 +oslo.messaging!=1.17.0,!=1.17.1,>=1.16.0 +oslo.middleware>=2.8.0 +oslo.serialization>=1.4.0 +oslo.utils>=2.0.0 pysnmp<5.0.0,>=4.2.1 python-ceilometerclient>=1.0.13 python-glanceclient>=0.18.0 python-keystoneclient>=1.6.0 -python-neutronclient<3,>=2.3.11 -python-novaclient>=2.22.0 +python-neutronclient<3,>=2.6.0 +python-novaclient>=2.26.0 python-swiftclient>=2.2.0 pytz>=2013.6 PyYAML>=3.1.0 @@ -37,8 +37,8 @@ six>=1.9.0 SQLAlchemy<1.1.0,>=0.9.7 sqlalchemy-migrate>=0.9.6 -stevedore>=1.5.0 # Apache-2.0 -tooz>=0.16.0 # Apache-2.0 -Werkzeug>=0.7 # BSD License +stevedore>=1.5.0 +tooz>=1.19.0 +Werkzeug>=0.7 WebOb>=1.2.3 WSME>=0.7 diff -Nru ceilometer-5.0.0~b2/ceilometer.egg-info/SOURCES.txt ceilometer-5.0.0~b3/ceilometer.egg-info/SOURCES.txt --- ceilometer-5.0.0~b2/ceilometer.egg-info/SOURCES.txt 2015-07-30 12:17:54.000000000 +0000 +++ ceilometer-5.0.0~b3/ceilometer.egg-info/SOURCES.txt 2015-09-03 13:09:31.000000000 +0000 @@ -25,6 +25,7 @@ ceilometer/__init__.py ceilometer/collector.py ceilometer/coordination.py +ceilometer/exchange_control.py ceilometer/i18n.py ceilometer/keystone_client.py ceilometer/messaging.py @@ -38,6 +39,7 @@ ceilometer/service.py ceilometer/service_base.py ceilometer/utils.py +ceilometer/version.py ceilometer.egg-info/PKG-INFO ceilometer.egg-info/SOURCES.txt ceilometer.egg-info/dependency_links.txt @@ -113,7 +115,6 @@ ceilometer/compute/discovery.py ceilometer/compute/util.py ceilometer/compute/notifications/__init__.py -ceilometer/compute/notifications/cpu.py ceilometer/compute/notifications/instance.py ceilometer/compute/pollsters/__init__.py ceilometer/compute/pollsters/cpu.py @@ -134,14 +135,13 @@ ceilometer/compute/virt/vmware/vsphere_operations.py ceilometer/compute/virt/xenapi/__init__.py ceilometer/compute/virt/xenapi/inspector.py -ceilometer/data_processing/__init__.py -ceilometer/data_processing/notifications.py ceilometer/database/__init__.py ceilometer/database/notifications.py ceilometer/dispatcher/__init__.py ceilometer/dispatcher/database.py ceilometer/dispatcher/file.py ceilometer/dispatcher/gnocchi.py +ceilometer/dispatcher/gnocchi_client.py ceilometer/dispatcher/http.py ceilometer/dns/__init__.py ceilometer/dns/notifications.py @@ -177,11 +177,8 @@ ceilometer/hardware/pollsters/network_aggregated.py ceilometer/hardware/pollsters/system.py ceilometer/hardware/pollsters/util.py -ceilometer/identity/__init__.py -ceilometer/identity/notifications.py ceilometer/image/__init__.py ceilometer/image/glance.py -ceilometer/image/notifications.py ceilometer/ipmi/__init__.py ceilometer/ipmi/notifications/__init__.py ceilometer/ipmi/notifications/ironic.py @@ -193,8 +190,6 @@ ceilometer/ipmi/pollsters/__init__.py ceilometer/ipmi/pollsters/node.py ceilometer/ipmi/pollsters/sensor.py -ceilometer/key_value_storage/__init__.py -ceilometer/key_value_storage/notifications.py ceilometer/locale/ceilometer-log-critical.pot ceilometer/locale/ceilometer-log-error.pot ceilometer/locale/ceilometer-log-info.pot @@ -211,6 +206,7 @@ ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer/meter/__init__.py ceilometer/meter/notifications.py +ceilometer/meter/data/meters.yaml ceilometer/network/__init__.py ceilometer/network/floatingip.py ceilometer/network/notifications.py @@ -233,14 +229,9 @@ ceilometer/network/statistics/opendaylight/client.py ceilometer/network/statistics/opendaylight/driver.py ceilometer/objectstore/__init__.py -ceilometer/objectstore/notifications.py ceilometer/objectstore/rgw.py ceilometer/objectstore/rgw_client.py ceilometer/objectstore/swift.py -ceilometer/orchestration/__init__.py -ceilometer/orchestration/notifications.py -ceilometer/profiler/__init__.py -ceilometer/profiler/notifications.py ceilometer/publisher/__init__.py ceilometer/publisher/direct.py ceilometer/publisher/file.py @@ -294,11 +285,9 @@ ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py -ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_downgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py -ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_downgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py @@ -321,6 +310,7 @@ ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py +ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py ceilometer/telemetry/__init__.py ceilometer/telemetry/notifications.py @@ -330,216 +320,218 @@ ceilometer/tests/db.py ceilometer/tests/mocks.py ceilometer/tests/pipeline_base.py -ceilometer/tests/test_bin.py -ceilometer/tests/test_collector.py -ceilometer/tests/test_coordination.py -ceilometer/tests/test_decoupled_pipeline.py -ceilometer/tests/test_event_pipeline.py -ceilometer/tests/test_hacking.py -ceilometer/tests/test_messaging.py -ceilometer/tests/test_middleware.py -ceilometer/tests/test_neutronclient.py -ceilometer/tests/test_notification.py -ceilometer/tests/test_novaclient.py -ceilometer/tests/test_sample.py -ceilometer/tests/test_utils.py -ceilometer/tests/agent/__init__.py -ceilometer/tests/agent/agentbase.py -ceilometer/tests/agent/test_discovery.py -ceilometer/tests/agent/test_manager.py -ceilometer/tests/agent/test_plugin.py -ceilometer/tests/alarm/__init__.py -ceilometer/tests/alarm/test_alarm_svc.py -ceilometer/tests/alarm/test_notifier.py -ceilometer/tests/alarm/test_rpc.py -ceilometer/tests/alarm/evaluator/__init__.py -ceilometer/tests/alarm/evaluator/base.py -ceilometer/tests/alarm/evaluator/test_base.py -ceilometer/tests/alarm/evaluator/test_combination.py -ceilometer/tests/alarm/evaluator/test_gnocchi.py -ceilometer/tests/alarm/evaluator/test_threshold.py -ceilometer/tests/api/__init__.py -ceilometer/tests/api/test_app.py -ceilometer/tests/api/test_hooks.py -ceilometer/tests/api/test_versions.py -ceilometer/tests/api/v2/__init__.py -ceilometer/tests/api/v2/test_acl_scenarios.py -ceilometer/tests/api/v2/test_alarm_scenarios.py -ceilometer/tests/api/v2/test_api_upgrade.py -ceilometer/tests/api/v2/test_app.py -ceilometer/tests/api/v2/test_capabilities.py -ceilometer/tests/api/v2/test_complex_query.py -ceilometer/tests/api/v2/test_complex_query_scenarios.py -ceilometer/tests/api/v2/test_compute_duration_by_resource_scenarios.py -ceilometer/tests/api/v2/test_event_scenarios.py -ceilometer/tests/api/v2/test_list_events_scenarios.py -ceilometer/tests/api/v2/test_list_meters_scenarios.py -ceilometer/tests/api/v2/test_list_resources_scenarios.py -ceilometer/tests/api/v2/test_post_samples_scenarios.py -ceilometer/tests/api/v2/test_query.py -ceilometer/tests/api/v2/test_statistics.py -ceilometer/tests/api/v2/test_statistics_scenarios.py -ceilometer/tests/api/v2/test_wsme_custom_type.py -ceilometer/tests/compute/__init__.py -ceilometer/tests/compute/notifications/__init__.py -ceilometer/tests/compute/notifications/test_cpu.py -ceilometer/tests/compute/notifications/test_instance.py -ceilometer/tests/compute/pollsters/__init__.py -ceilometer/tests/compute/pollsters/base.py -ceilometer/tests/compute/pollsters/test_cpu.py -ceilometer/tests/compute/pollsters/test_diskio.py -ceilometer/tests/compute/pollsters/test_instance.py -ceilometer/tests/compute/pollsters/test_location_metadata.py -ceilometer/tests/compute/pollsters/test_memory.py -ceilometer/tests/compute/pollsters/test_net.py -ceilometer/tests/compute/virt/__init__.py -ceilometer/tests/compute/virt/hyperv/__init__.py -ceilometer/tests/compute/virt/hyperv/test_inspector.py -ceilometer/tests/compute/virt/hyperv/test_utilsv2.py -ceilometer/tests/compute/virt/libvirt/__init__.py -ceilometer/tests/compute/virt/libvirt/test_inspector.py -ceilometer/tests/compute/virt/vmware/__init__.py -ceilometer/tests/compute/virt/vmware/test_inspector.py -ceilometer/tests/compute/virt/vmware/test_vsphere_operations.py -ceilometer/tests/compute/virt/xenapi/__init__.py -ceilometer/tests/compute/virt/xenapi/test_inspector.py -ceilometer/tests/data_processing/__init__.py -ceilometer/tests/data_processing/test_notifications.py -ceilometer/tests/database/__init__.py -ceilometer/tests/database/test_notifications.py -ceilometer/tests/dispatcher/__init__.py -ceilometer/tests/dispatcher/test_db.py -ceilometer/tests/dispatcher/test_file.py -ceilometer/tests/dispatcher/test_gnocchi.py -ceilometer/tests/dispatcher/test_http.py -ceilometer/tests/dns/__init__.py -ceilometer/tests/dns/test_notifications.py -ceilometer/tests/energy/__init__.py -ceilometer/tests/energy/test_kwapi.py -ceilometer/tests/event/__init__.py -ceilometer/tests/event/test_converter.py -ceilometer/tests/event/test_endpoint.py -ceilometer/tests/event/test_trait_plugins.py ceilometer/tests/functional/__init__.py -ceilometer/tests/functional/test_empty.py +ceilometer/tests/functional/test_bin.py +ceilometer/tests/functional/test_collector.py +ceilometer/tests/functional/test_notification.py +ceilometer/tests/functional/api/__init__.py +ceilometer/tests/functional/api/v2/__init__.py +ceilometer/tests/functional/api/v2/test_acl_scenarios.py +ceilometer/tests/functional/api/v2/test_alarm_scenarios.py +ceilometer/tests/functional/api/v2/test_api_upgrade.py +ceilometer/tests/functional/api/v2/test_app.py +ceilometer/tests/functional/api/v2/test_capabilities.py +ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py +ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py +ceilometer/tests/functional/api/v2/test_event_scenarios.py +ceilometer/tests/functional/api/v2/test_list_events_scenarios.py +ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py +ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py +ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py +ceilometer/tests/functional/api/v2/test_statistics_scenarios.py +ceilometer/tests/functional/gabbi/__init__.py +ceilometer/tests/functional/gabbi/fixtures.py +ceilometer/tests/functional/gabbi/test_gabbi.py +ceilometer/tests/functional/gabbi/test_gabbi_prefix.py +ceilometer/tests/functional/gabbi/gabbits/alarms.yaml +ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml +ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml +ceilometer/tests/functional/gabbi/gabbits/basic.yaml +ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml +ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml +ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml +ceilometer/tests/functional/gabbi/gabbits/meters.yaml +ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml +ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml +ceilometer/tests/functional/gabbi/gabbits/samples.yaml +ceilometer/tests/functional/gabbi/gabbits_prefix/api_events_with_data.yaml +ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml +ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml +ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml ceilometer/tests/functional/hooks/post_test_hook.sh -ceilometer/tests/gabbi/__init__.py -ceilometer/tests/gabbi/fixtures.py -ceilometer/tests/gabbi/test_gabbi.py -ceilometer/tests/gabbi/test_gabbi_prefix.py -ceilometer/tests/gabbi/gabbits/alarms.yaml -ceilometer/tests/gabbi/gabbits/api_events_no_data.yaml -ceilometer/tests/gabbi/gabbits/api_events_with_data.yaml -ceilometer/tests/gabbi/gabbits/basic.yaml -ceilometer/tests/gabbi/gabbits/capabilities.yaml -ceilometer/tests/gabbi/gabbits/clean-samples.yaml -ceilometer/tests/gabbi/gabbits/fixture-samples.yaml -ceilometer/tests/gabbi/gabbits/meters.yaml -ceilometer/tests/gabbi/gabbits/resources-empty.yaml -ceilometer/tests/gabbi/gabbits/resources-fixtured.yaml -ceilometer/tests/gabbi/gabbits/samples.yaml -ceilometer/tests/gabbi/gabbits_prefix/api_events_with_data.yaml -ceilometer/tests/gabbi/gabbits_prefix/basic.yaml -ceilometer/tests/gabbi/gabbits_prefix/clean-samples.yaml -ceilometer/tests/gabbi/gabbits_prefix/resources-fixtured.yaml -ceilometer/tests/hardware/__init__.py -ceilometer/tests/hardware/inspector/__init__.py -ceilometer/tests/hardware/inspector/test_inspector.py -ceilometer/tests/hardware/inspector/test_snmp.py -ceilometer/tests/hardware/pollsters/__init__.py -ceilometer/tests/hardware/pollsters/base.py -ceilometer/tests/hardware/pollsters/test_cpu.py -ceilometer/tests/hardware/pollsters/test_disk.py -ceilometer/tests/hardware/pollsters/test_memory.py -ceilometer/tests/hardware/pollsters/test_net.py -ceilometer/tests/hardware/pollsters/test_network_aggregated.py -ceilometer/tests/hardware/pollsters/test_system.py -ceilometer/tests/hardware/pollsters/test_util.py -ceilometer/tests/identity/__init__.py -ceilometer/tests/identity/test_notifications.py -ceilometer/tests/image/__init__.py -ceilometer/tests/image/test_glance.py -ceilometer/tests/image/test_notifications.py -ceilometer/tests/ipmi/__init__.py -ceilometer/tests/ipmi/notifications/__init__.py -ceilometer/tests/ipmi/notifications/ipmi_test_data.py -ceilometer/tests/ipmi/notifications/test_ironic.py -ceilometer/tests/ipmi/platform/__init__.py -ceilometer/tests/ipmi/platform/fake_utils.py -ceilometer/tests/ipmi/platform/ipmitool_test_data.py -ceilometer/tests/ipmi/platform/test_intel_node_manager.py -ceilometer/tests/ipmi/platform/test_ipmi_sensor.py -ceilometer/tests/ipmi/pollsters/__init__.py -ceilometer/tests/ipmi/pollsters/base.py -ceilometer/tests/ipmi/pollsters/test_node.py -ceilometer/tests/ipmi/pollsters/test_sensor.py -ceilometer/tests/key_value_storage/__init__.py -ceilometer/tests/key_value_storage/test_notifications.py -ceilometer/tests/meter/__init__.py -ceilometer/tests/meter/test_notifications.py -ceilometer/tests/network/__init__.py -ceilometer/tests/network/test_floatingip.py -ceilometer/tests/network/test_notifications.py -ceilometer/tests/network/services/__init__.py -ceilometer/tests/network/services/test_fwaas.py -ceilometer/tests/network/services/test_lbaas.py -ceilometer/tests/network/services/test_vpnaas.py -ceilometer/tests/network/statistics/__init__.py -ceilometer/tests/network/statistics/test_driver.py -ceilometer/tests/network/statistics/test_flow.py -ceilometer/tests/network/statistics/test_port.py -ceilometer/tests/network/statistics/test_statistics.py -ceilometer/tests/network/statistics/test_switch.py -ceilometer/tests/network/statistics/test_table.py -ceilometer/tests/network/statistics/opencontrail/__init__.py -ceilometer/tests/network/statistics/opencontrail/test_client.py -ceilometer/tests/network/statistics/opencontrail/test_driver.py -ceilometer/tests/network/statistics/opendaylight/__init__.py -ceilometer/tests/network/statistics/opendaylight/test_client.py -ceilometer/tests/network/statistics/opendaylight/test_driver.py -ceilometer/tests/objectstore/__init__.py -ceilometer/tests/objectstore/test_notifications.py -ceilometer/tests/objectstore/test_rgw.py -ceilometer/tests/objectstore/test_rgw_client.py -ceilometer/tests/objectstore/test_swift.py -ceilometer/tests/orchestration/__init__.py -ceilometer/tests/orchestration/test_notifications.py -ceilometer/tests/profiler/__init__.py -ceilometer/tests/profiler/test_notifications.py -ceilometer/tests/publisher/__init__.py -ceilometer/tests/publisher/test_direct.py -ceilometer/tests/publisher/test_file.py -ceilometer/tests/publisher/test_kafka_broker_publisher.py -ceilometer/tests/publisher/test_messaging_publisher.py -ceilometer/tests/publisher/test_udp.py -ceilometer/tests/publisher/test_utils.py -ceilometer/tests/storage/__init__.py -ceilometer/tests/storage/test_base.py -ceilometer/tests/storage/test_get_connection.py -ceilometer/tests/storage/test_impl_db2.py -ceilometer/tests/storage/test_impl_hbase.py -ceilometer/tests/storage/test_impl_log.py -ceilometer/tests/storage/test_impl_mongodb.py -ceilometer/tests/storage/test_impl_sqlalchemy.py -ceilometer/tests/storage/test_models.py -ceilometer/tests/storage/test_pymongo_base.py -ceilometer/tests/storage/test_storage_scenarios.py -ceilometer/tests/storage/sqlalchemy/__init__.py -ceilometer/tests/storage/sqlalchemy/test_models.py -ceilometer/tests/telemetry/__init__.py -ceilometer/tests/telemetry/test_notifications.py -ceilometer/tests/volume/__init__.py -ceilometer/tests/volume/test_notifications.py +ceilometer/tests/functional/publisher/__init__.py +ceilometer/tests/functional/publisher/test_direct.py +ceilometer/tests/functional/storage/__init__.py +ceilometer/tests/functional/storage/test_impl_db2.py +ceilometer/tests/functional/storage/test_impl_hbase.py +ceilometer/tests/functional/storage/test_impl_log.py +ceilometer/tests/functional/storage/test_impl_mongodb.py +ceilometer/tests/functional/storage/test_impl_sqlalchemy.py +ceilometer/tests/functional/storage/test_pymongo_base.py +ceilometer/tests/functional/storage/test_storage_scenarios.py +ceilometer/tests/integration/__init__.py +ceilometer/tests/integration/gabbi/__init__.py +ceilometer/tests/integration/gabbi/test_gabbi_live.py +ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml +ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json +ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json +ceilometer/tests/integration/hooks/post_test_hook.sh +ceilometer/tests/unit/__init__.py +ceilometer/tests/unit/test_coordination.py +ceilometer/tests/unit/test_decoupled_pipeline.py +ceilometer/tests/unit/test_event_pipeline.py +ceilometer/tests/unit/test_hacking.py +ceilometer/tests/unit/test_messaging.py +ceilometer/tests/unit/test_middleware.py +ceilometer/tests/unit/test_neutronclient.py +ceilometer/tests/unit/test_novaclient.py +ceilometer/tests/unit/test_sample.py +ceilometer/tests/unit/test_utils.py +ceilometer/tests/unit/agent/__init__.py +ceilometer/tests/unit/agent/agentbase.py +ceilometer/tests/unit/agent/test_discovery.py +ceilometer/tests/unit/agent/test_manager.py +ceilometer/tests/unit/agent/test_plugin.py +ceilometer/tests/unit/alarm/__init__.py +ceilometer/tests/unit/alarm/test_alarm_svc.py +ceilometer/tests/unit/alarm/test_notifier.py +ceilometer/tests/unit/alarm/test_rpc.py +ceilometer/tests/unit/alarm/evaluator/__init__.py +ceilometer/tests/unit/alarm/evaluator/base.py +ceilometer/tests/unit/alarm/evaluator/test_base.py +ceilometer/tests/unit/alarm/evaluator/test_combination.py +ceilometer/tests/unit/alarm/evaluator/test_gnocchi.py +ceilometer/tests/unit/alarm/evaluator/test_threshold.py +ceilometer/tests/unit/api/__init__.py +ceilometer/tests/unit/api/test_app.py +ceilometer/tests/unit/api/test_hooks.py +ceilometer/tests/unit/api/test_versions.py +ceilometer/tests/unit/api/v2/__init__.py +ceilometer/tests/unit/api/v2/test_complex_query.py +ceilometer/tests/unit/api/v2/test_query.py +ceilometer/tests/unit/api/v2/test_statistics.py +ceilometer/tests/unit/api/v2/test_wsme_custom_type.py +ceilometer/tests/unit/compute/__init__.py +ceilometer/tests/unit/compute/notifications/__init__.py +ceilometer/tests/unit/compute/notifications/test_instance.py +ceilometer/tests/unit/compute/pollsters/__init__.py +ceilometer/tests/unit/compute/pollsters/base.py +ceilometer/tests/unit/compute/pollsters/test_cpu.py +ceilometer/tests/unit/compute/pollsters/test_diskio.py +ceilometer/tests/unit/compute/pollsters/test_instance.py +ceilometer/tests/unit/compute/pollsters/test_location_metadata.py +ceilometer/tests/unit/compute/pollsters/test_memory.py +ceilometer/tests/unit/compute/pollsters/test_net.py +ceilometer/tests/unit/compute/virt/__init__.py +ceilometer/tests/unit/compute/virt/hyperv/__init__.py +ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py +ceilometer/tests/unit/compute/virt/hyperv/test_utilsv2.py +ceilometer/tests/unit/compute/virt/libvirt/__init__.py +ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py +ceilometer/tests/unit/compute/virt/vmware/__init__.py +ceilometer/tests/unit/compute/virt/vmware/test_inspector.py +ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py +ceilometer/tests/unit/compute/virt/xenapi/__init__.py +ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +ceilometer/tests/unit/data_processing/__init__.py +ceilometer/tests/unit/database/__init__.py +ceilometer/tests/unit/database/test_notifications.py +ceilometer/tests/unit/dispatcher/__init__.py +ceilometer/tests/unit/dispatcher/test_db.py +ceilometer/tests/unit/dispatcher/test_file.py +ceilometer/tests/unit/dispatcher/test_gnocchi.py +ceilometer/tests/unit/dispatcher/test_http.py +ceilometer/tests/unit/dns/__init__.py +ceilometer/tests/unit/dns/test_notifications.py +ceilometer/tests/unit/energy/__init__.py +ceilometer/tests/unit/energy/test_kwapi.py +ceilometer/tests/unit/event/__init__.py +ceilometer/tests/unit/event/test_converter.py +ceilometer/tests/unit/event/test_endpoint.py +ceilometer/tests/unit/event/test_trait_plugins.py +ceilometer/tests/unit/hardware/__init__.py +ceilometer/tests/unit/hardware/inspector/__init__.py +ceilometer/tests/unit/hardware/inspector/test_inspector.py +ceilometer/tests/unit/hardware/inspector/test_snmp.py +ceilometer/tests/unit/hardware/pollsters/__init__.py +ceilometer/tests/unit/hardware/pollsters/base.py +ceilometer/tests/unit/hardware/pollsters/test_cpu.py +ceilometer/tests/unit/hardware/pollsters/test_disk.py +ceilometer/tests/unit/hardware/pollsters/test_memory.py +ceilometer/tests/unit/hardware/pollsters/test_net.py +ceilometer/tests/unit/hardware/pollsters/test_network_aggregated.py +ceilometer/tests/unit/hardware/pollsters/test_system.py +ceilometer/tests/unit/hardware/pollsters/test_util.py +ceilometer/tests/unit/identity/__init__.py +ceilometer/tests/unit/image/__init__.py +ceilometer/tests/unit/image/test_glance.py +ceilometer/tests/unit/ipmi/__init__.py +ceilometer/tests/unit/ipmi/notifications/__init__.py +ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py +ceilometer/tests/unit/ipmi/notifications/test_ironic.py +ceilometer/tests/unit/ipmi/platform/__init__.py +ceilometer/tests/unit/ipmi/platform/fake_utils.py +ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py +ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py +ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py +ceilometer/tests/unit/ipmi/pollsters/__init__.py +ceilometer/tests/unit/ipmi/pollsters/base.py +ceilometer/tests/unit/ipmi/pollsters/test_node.py +ceilometer/tests/unit/ipmi/pollsters/test_sensor.py +ceilometer/tests/unit/key_value_storage/__init__.py +ceilometer/tests/unit/meter/__init__.py +ceilometer/tests/unit/meter/test_notifications.py +ceilometer/tests/unit/network/__init__.py +ceilometer/tests/unit/network/test_floatingip.py +ceilometer/tests/unit/network/test_notifications.py +ceilometer/tests/unit/network/services/__init__.py +ceilometer/tests/unit/network/services/test_fwaas.py +ceilometer/tests/unit/network/services/test_lbaas.py +ceilometer/tests/unit/network/services/test_vpnaas.py +ceilometer/tests/unit/network/statistics/__init__.py +ceilometer/tests/unit/network/statistics/test_driver.py +ceilometer/tests/unit/network/statistics/test_flow.py +ceilometer/tests/unit/network/statistics/test_port.py +ceilometer/tests/unit/network/statistics/test_statistics.py +ceilometer/tests/unit/network/statistics/test_switch.py +ceilometer/tests/unit/network/statistics/test_table.py +ceilometer/tests/unit/network/statistics/opencontrail/__init__.py +ceilometer/tests/unit/network/statistics/opencontrail/test_client.py +ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py +ceilometer/tests/unit/network/statistics/opendaylight/__init__.py +ceilometer/tests/unit/network/statistics/opendaylight/test_client.py +ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py +ceilometer/tests/unit/objectstore/__init__.py +ceilometer/tests/unit/objectstore/test_rgw.py +ceilometer/tests/unit/objectstore/test_rgw_client.py +ceilometer/tests/unit/objectstore/test_swift.py +ceilometer/tests/unit/orchestration/__init__.py +ceilometer/tests/unit/publisher/__init__.py +ceilometer/tests/unit/publisher/test_file.py +ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py +ceilometer/tests/unit/publisher/test_messaging_publisher.py +ceilometer/tests/unit/publisher/test_udp.py +ceilometer/tests/unit/publisher/test_utils.py +ceilometer/tests/unit/storage/__init__.py +ceilometer/tests/unit/storage/test_base.py +ceilometer/tests/unit/storage/test_get_connection.py +ceilometer/tests/unit/storage/test_models.py +ceilometer/tests/unit/storage/sqlalchemy/__init__.py +ceilometer/tests/unit/storage/sqlalchemy/test_models.py +ceilometer/tests/unit/telemetry/__init__.py +ceilometer/tests/unit/telemetry/test_notifications.py +ceilometer/tests/unit/volume/__init__.py ceilometer/transformer/__init__.py ceilometer/transformer/accumulator.py ceilometer/transformer/arithmetic.py ceilometer/transformer/conversions.py -ceilometer/volume/__init__.py -ceilometer/volume/notifications.py devstack/apache-ceilometer.template devstack/plugin.sh devstack/settings devstack/files/rpms/ceilometer +devstack/upgrade/settings +devstack/upgrade/shutdown.sh +devstack/upgrade/upgrade.sh doc/Makefile doc/source/1-agents.png doc/source/2-accessmodel.png @@ -555,6 +547,7 @@ doc/source/events.rst doc/source/format.rst doc/source/glossary.rst +doc/source/gmr.rst doc/source/index.rst doc/source/measurements.rst doc/source/new_meters.rst @@ -581,7 +574,6 @@ etc/ceilometer/gabbi_pipeline.yaml etc/ceilometer/gnocchi_archive_policy_map.yaml etc/ceilometer/gnocchi_resources.yaml -etc/ceilometer/meters.yaml etc/ceilometer/pipeline.yaml etc/ceilometer/policy.json etc/ceilometer/policy.json.sample diff -Nru ceilometer-5.0.0~b2/ChangeLog ceilometer-5.0.0~b3/ChangeLog --- ceilometer-5.0.0~b2/ChangeLog 2015-07-30 12:17:53.000000000 +0000 +++ ceilometer-5.0.0~b3/ChangeLog 2015-09-03 13:09:30.000000000 +0000 @@ -1,17 +1,135 @@ CHANGES ======= +5.0.0.0b3 +--------- + +* Event filtering for non-admin users +* integration: fix typo +* Updated from global requirements +* Add index to metadata_hash column of resource table +* Incorrect Links are updated +* Removing unused dependency: discover +* Use new location of subunit2html +* Fixed identity trust event types +* gnocchi: quote the resource_id in url +* Move profiler meters to yaml +* Control Events RBAC from policy.json +* Events RBAC needs scoped token +* make telemetry sample payloads dictionaries +* Fix requeue process on event handling error +* Keep the instance_type meta from polling and notification consistent +* Change json path's to start with $. for consistency +* Add validation tests for arithmetic, string and prefix expressions +* Fix description for "Inapt spelling of 'MongoDB'" +* Create conf directory during devstack install phase +* support custom timestamp +* Add cpu meters to yaml +* Fix description for "Incorrect spelling of a word" +* integration: add some new tests +* Fix disable_non_metric_meters referencing +* Update tests to reflect WSME 0.8 fixes +* remove jsonpath-rw requirement +* Do not use system config file for test +* gnocchi: move to jsonpath_rw_ext +* Updated from global requirements +* Allow to run debug tox job for functional tests +* Use jsonpath_rw_ext for meter/event definitions +* preload jsonpath_rw parsers +* integration test: adjusts timeout +* integration test: failfast +* Updated from global requirements +* Avoid recording whole instance info in log +* Fix dependency for doc build +* Mark record_type in PaaS Event Format doc as optional +* full multi-meter support +* add flexible grouping key +* Corrected test_fallback_meter_path test case +* Add hypervisor inspector sanity check +* handle list payloads in notifications +* xenapi: support the session to "unix://local" +* Introduce Guru Meditation Reports into Ceilometer +* Use start status of coodinator in tooz +* Fixed event requeuing/ack on publisher failure +* Implement consuming metrics from Magnum +* Avoid from storing samples with empty or not numerical volumes +* use union all when building trait query +* Fixed spelling error, retreive -> retrieve +* Use min and max on IntOpt option types +* Update install docs with gnocchi dispatcher info +* Make it possible to run postgresql functional job +* Revert "Remove version from os_auth_url in service_credentials" +* Updated from global requirements +* integration: chown ceilometer directory properly +* add mandatory limit value to complex query list +* add test to validate jsonpath +* Remove version from os_auth_url in service_credentials +* do not translate debug logs +* Updated from global requirements +* Grenade plugin using devstack plugin for ceilometer +* remove alembic requirement +* Convert instance, bandwidth and SwiftMiddleware meters +* Change and move the workers options to corresponding service section +* Drop the downgrade function of migration scripts +* start rpc deprecation +* support multiple-meter payloads +* add poll history to avoid duplicate samples +* Add Kilo release note reference +* initialise opencontrail client in tests +* Make ConnectionRetryTest more reliable +* Correct thread handling in TranslationHook +* Updated from global requirements +* Correctly intialized olso config fixture for TestClientHTTPBasicAuth +* Don't start up mongodb for unit test coverage +* disable non-metric meter definitions +* Cast Int64 values to float +* Convert identity, sahara and volume to meters yaml +* Enable entry points for new declarative meters +* Fix for rgw still throwing errors +* group pollsters by interval +* Revert "Revert "remove instance: meter"" +* api: fix alarm deletion and update +* Fixes the kafka publisher +* Sync devstack plugin with devstack:lib/ceilometer +* integration: use the right user in gate +* Imported Translations from Transifex +* Initial separating unit and functional tests +* Stop using openstack.common from keystoneclient +* minimise scope of hmac mocking +* Updated from global requirements +* gnocchi: retry with a new token on 401 +* Fix some gabbi tests +* Improve comments in notification.py +* mongo: fix last python3 bugs +* postgres isolation level produces inconsistent reads +* Masks messaging_urls in logs during debug mode +* Corrected unit of snmp based harware disk and memory meters +* Provide base method for inspect_memory_resident +* Fix Python 3 issue in opendaylight client +* Fix more tests on Python 3 +* Remove the compute inspector choice restriction +* [MongoDB] Refactor indexes for meter and resources +* tests: add an integration test +* Fix WSGI replacement_start_response() on Python 3 +* gnocchi: reduce the number of patch to gnocchi API +* Make the partition coordinator log more readable +* Drop out-of-time-sequence rate of change samples + 5.0.0.0b2 --------- * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching +* Imported Translations from Transifex +* fix gnocchi resources yaml * Import the api opt group in gabbi fixture +* Add a batch_polled_samples configuration item * Remove redundant comma * storage: deprecates mongodb_replica_set option * Improves send_test_data tools * Replace isotime() with utcnow() and isoformat() +* distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements diff -Nru ceilometer-5.0.0~b2/debian/changelog ceilometer-5.0.0~b3/debian/changelog --- ceilometer-5.0.0~b2/debian/changelog 2015-08-27 16:51:27.000000000 +0000 +++ ceilometer-5.0.0~b3/debian/changelog 2015-09-16 09:25:02.000000000 +0000 @@ -1,3 +1,19 @@ +ceilometer (1:5.0.0~b3-0ubuntu1~ubuntu15.10.1~ppa201509161024) wily; urgency=medium + + * No-change backport to wily + + -- James Page Wed, 16 Sep 2015 10:25:02 +0100 + +ceilometer (1:5.0.0~b3-0ubuntu1) wily; urgency=medium + + * New upstream milestone for OpenStack Liberty. + * d/control: Align (build-)depends with upstream. + * d/p/skip-test.patch: Rebased. + * d/p/skip-gabbi.patch: Rebased. + * d/p/disable-kafka.patch: Updated for removal of setUp() method. + + -- Corey Bryant Tue, 08 Sep 2015 15:48:26 -0400 + ceilometer (1:5.0.0~b2-0ubuntu2) wily; urgency=medium * d/p/disable-kafka.patch: Updated. diff -Nru ceilometer-5.0.0~b2/debian/control ceilometer-5.0.0~b3/debian/control --- ceilometer-5.0.0~b2/debian/control 2015-08-27 16:51:27.000000000 +0000 +++ ceilometer-5.0.0~b3/debian/control 2015-09-09 21:43:23.000000000 +0000 @@ -7,11 +7,10 @@ dh-systemd, openstack-pkg-tools (>= 23~), python-all, - python-pbr (>= 1.3), + python-pbr (>= 1.6), python-setuptools, python-sphinx, -Build-Depends-Indep: alembic (>= 0.7.2), - python-awsauth, +Build-Depends-Indep: python-awsauth, python-babel, python-ceilometerclient (>= 1.0.13), python-contextlib2, @@ -23,7 +22,7 @@ python-hacking, python-happybase (>= 0.5), python-httplib2 (>= 0.7.5), - python-jsonpath-rw, + python-jsonpath-rw-ext, python-jsonschema, python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 2.0.0), @@ -31,26 +30,27 @@ python-migrate (>= 0.9.6), python-mock (>= 1.2), python-msgpack (>= 0.4.0), - python-neutronclient (>= 2.3.11), - python-novaclient (>= 2:2.22.0), - python-oslo.concurrency (>= 2.1.0), - python-oslo.config (>= 1:1.11.0), + python-neutronclient (>= 2.6.0), + python-novaclient (>= 2:2.26.0), + python-oslo.concurrency (>= 2.3.0), + python-oslo.config (>= 1:2.3.0), python-oslo.context (>= 0.2.0), - python-oslo.db (>= 1.12.0), + python-oslo.db (>= 2.4.1), python-oslo.i18n (>= 1.5.0), - python-oslo.log (>= 1.6.0), + python-oslo.log (>= 1.8.0), python-oslo.messaging (>= 1.16.0), - python-oslo.middleware (>= 2.4.0), + python-oslo.middleware (>= 2.8.0), python-oslo.policy (>= 0.5.0), + python-oslo.reports (>= 0.1.0), python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 1.4.0), - python-oslo.service (>= 0.1.0), - python-oslo.utils (>= 1.9.0), + python-oslo.service (>= 0.7.0), + python-oslo.utils (>= 2.0.0), python-oslo.vmware (>= 1.16.0), python-oslosphinx (>= 2.5.0), - python-oslotest (>= 1.7.0), + python-oslotest (>= 1.10.0), python-pastedeploy, - python-pecan (>= 0.8.0), + python-pecan (>= 1.0.0), python-psycopg2, python-pymongo (>= 3.0.2), python-pymysql, @@ -64,7 +64,7 @@ python-tempest-lib (>= 0.6.1), python-testscenarios, python-testtools (>= 1.4.0), - python-tooz (>= 0.16.0), + python-tooz (>= 1.19.0), python-tz (>= 2013.6), python-webob, python-werkzeug, @@ -78,35 +78,35 @@ Package: python-ceilometer Architecture: all -Depends: alembic (>= 0.7.2), - python-ceilometerclient (>= 1.0.13), +Depends: python-ceilometerclient (>= 1.0.13), python-croniter, python-eventlet (>= 0.17.4), python-glanceclient (>= 1:0.18.0), - python-jsonpath-rw, + python-jsonpath-rw-ext, python-jsonschema, python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 2.0.0), python-lxml, python-migrate (>= 0.9.6), python-msgpack (>= 0.4.0), - python-neutronclient (>= 2.3.11), - python-novaclient (>= 2:2.22.0), - python-oslo.concurrency (>= 2.1.0), - python-oslo.config (>= 1:1.11.0), + python-neutronclient (>= 2.6.0), + python-novaclient (>= 2:2.26.0), + python-oslo.concurrency (>= 2.3.0), + python-oslo.config (>= 1:2.3.0), python-oslo.context (>= 0.2.0), - python-oslo.db (>= 1.12.0), + python-oslo.db (>= 2.4.1), python-oslo.i18n (>= 1.5.0), - python-oslo.log (>= 1.6.0), + python-oslo.log (>= 1.8.0), python-oslo.messaging (>= 1.16.0), - python-oslo.middleware (>= 2.4.0), + python-oslo.middleware (>= 2.8.0), python-oslo.policy (>= 0.5.0), + python-oslo.reports (>= 0.1.0), python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 1.4.0), - python-oslo.service (>= 0.1.0), - python-oslo.utils (>= 1.9.0), + python-oslo.service (>= 0.7.0), + python-oslo.utils (>= 2.0.0), python-pastedeploy, - python-pecan (>= 0.8.0), + python-pecan (>= 1.0.0), python-pysnmp4, python-requests (>= 2.5.2), python-retrying (>= 1.2.3), @@ -114,7 +114,7 @@ python-sqlalchemy (>= 0.9.7), python-stevedore (>= 1.5.0), python-swiftclient (>= 1:2.2.0), - python-tooz (>= 0.16.0), + python-tooz (>= 1.19.0), python-tz (>= 2013.6), python-webob, python-werkzeug, diff -Nru ceilometer-5.0.0~b2/debian/patches/disable-kafka.patch ceilometer-5.0.0~b3/debian/patches/disable-kafka.patch --- ceilometer-5.0.0~b2/debian/patches/disable-kafka.patch 2015-08-27 16:51:27.000000000 +0000 +++ ceilometer-5.0.0~b3/debian/patches/disable-kafka.patch 2015-09-09 21:43:23.000000000 +0000 @@ -1,5 +1,15 @@ ---- a/ceilometer/tests/publisher/test_kafka_broker_publisher.py -+++ b/ceilometer/tests/publisher/test_kafka_broker_publisher.py +--- a/requirements.txt ++++ b/requirements.txt +@@ -7,7 +7,6 @@ + eventlet>=0.17.4 + jsonpath-rw-ext>=0.1.7 + jsonschema!=2.5.0,<3.0.0,>=2.0.0 +-kafka-python>=0.9.2 # Apache-2.0 + keystonemiddleware>=2.0.0 + lxml>=2.3 + msgpack-python>=0.4.0 +--- a/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py ++++ b/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py @@ -15,13 +15,17 @@ """Tests for ceilometer/publisher/kafka_broker.py """ @@ -16,10 +26,10 @@ + from ceilometer.publisher import kafka_broker as kafka +except ImportError: + kafka = mock.Mock() + from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base - -@@ -95,6 +99,12 @@ +@@ -97,6 +101,12 @@ ), ] @@ -29,16 +39,70 @@ + kafka = None + + @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") - def setUp(self): - super(TestKafkaPublisher, self).setUp() + def test_publish(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer')) +@@ -106,6 +116,7 @@ + self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) ---- a/requirements.txt -+++ b/requirements.txt -@@ -8,7 +8,6 @@ - eventlet>=0.17.4 - jsonpath-rw<2.0,>=1.2.0 - jsonschema!=2.5.0,<3.0.0,>=2.0.0 --kafka-python>=0.9.2 # Apache-2.0 - keystonemiddleware>=2.0.0 - lxml>=2.3 - msgpack-python>=0.4.0 ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_without_options(self): + publisher = kafka.KafkaBrokerPublisher( + netutils.urlsplit('kafka://127.0.0.1:9092')) +@@ -115,6 +126,7 @@ + self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_host_without_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer')) +@@ -124,6 +136,7 @@ + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) + self.assertEqual('default', publisher.policy) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_host_with_default_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) +@@ -136,6 +149,7 @@ + self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_host_with_drop_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) +@@ -146,6 +160,7 @@ + self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(0, len(publisher.local_queue)) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_host_with_queue_policy(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) +@@ -156,6 +171,7 @@ + self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) + self.assertEqual(1, len(publisher.local_queue)) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_down_host_with_default_queue_size(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) +@@ -174,6 +190,7 @@ + self.assertEqual('test-1999', + publisher.local_queue[1023][2][0]['counter_name']) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_to_host_from_down_to_up_with_queue(self): + publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( + 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) +@@ -193,6 +210,7 @@ + publisher.publish_samples(mock.MagicMock(), self.test_data) + self.assertEqual(0, len(publisher.local_queue)) + ++ @testtools.skipIf(kafka is None, "Kafka not avaliable, skipping") + def test_publish_event_with_default_policy(self): + publisher = kafka.KafkaBrokerPublisher( + netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) diff -Nru ceilometer-5.0.0~b2/debian/patches/skip-gabbi.patch ceilometer-5.0.0~b3/debian/patches/skip-gabbi.patch --- ceilometer-5.0.0~b2/debian/patches/skip-gabbi.patch 2015-08-27 16:51:27.000000000 +0000 +++ ceilometer-5.0.0~b3/debian/patches/skip-gabbi.patch 2015-09-09 21:43:23.000000000 +0000 @@ -1,6 +1,6 @@ ---- a/ceilometer/tests/gabbi/test_gabbi.py -+++ b/ceilometer/tests/gabbi/test_gabbi.py -@@ -20,10 +20,14 @@ For the sake of exploratory development. +--- a/ceilometer/tests/functional/gabbi/test_gabbi.py ++++ b/ceilometer/tests/functional/gabbi/test_gabbi.py +@@ -20,10 +20,14 @@ import os @@ -11,9 +11,9 @@ + driver = None from ceilometer.api import app --from ceilometer.tests.gabbi import fixtures as fixture_module +-from ceilometer.tests.functional.gabbi import fixtures as fixture_module +if driver: -+ from ceilometer.tests.gabbi import fixtures as fixture_module - ++ from ceilometer.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits' + diff -Nru ceilometer-5.0.0~b2/debian/patches/skip-test.patch ceilometer-5.0.0~b3/debian/patches/skip-test.patch --- ceilometer-5.0.0~b2/debian/patches/skip-test.patch 2015-08-27 16:51:27.000000000 +0000 +++ ceilometer-5.0.0~b3/debian/patches/skip-test.patch 2015-09-09 21:43:23.000000000 +0000 @@ -1,6 +1,6 @@ ---- a/ceilometer/tests/api/v2/test_event_scenarios.py -+++ b/ceilometer/tests/api/v2/test_event_scenarios.py -@@ -138,7 +138,10 @@ +--- a/ceilometer/tests/functional/api/v2/test_event_scenarios.py ++++ b/ceilometer/tests/functional/api/v2/test_event_scenarios.py +@@ -143,7 +143,10 @@ # We expect to get native UTC generated time back trait_time = self.s_time for event in data: diff -Nru ceilometer-5.0.0~b2/devstack/plugin.sh ceilometer-5.0.0~b3/devstack/plugin.sh --- ceilometer-5.0.0~b2/devstack/plugin.sh 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/devstack/plugin.sh 2015-09-03 13:05:55.000000000 +0000 @@ -155,7 +155,7 @@ fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_instal_gr oslo.vmware + pip_install_gr oslo.vmware fi fi } @@ -168,9 +168,8 @@ create_service_user "ceilometer" "admin" if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local ceilometer_service=$(get_or_create_service "ceilometer" \ - "metering" "OpenStack Telemetry Service") - get_or_create_endpoint $ceilometer_service \ + get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service" + get_or_create_endpoint "metering" \ "$REGION_NAME" \ "$(ceilometer_service_url)/" \ "$(ceilometer_service_url)/" \ @@ -234,7 +233,6 @@ # Configure Ceilometer function configure_ceilometer { - sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR $CEILOMETER_API_LOG_DIR iniset_rpc_backend ceilometer $CEILOMETER_CONF @@ -257,6 +255,7 @@ cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/ceilometer/meter/data/meters.yaml $CEILOMETER_CONF_DIR if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml @@ -325,6 +324,7 @@ _ceilometer_prepare_virt_drivers install_ceilometerclient setup_develop $CEILOMETER_DIR + sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR $CEILOMETER_API_LOG_DIR } # install_ceilometerclient() - Collect source and prepare @@ -340,9 +340,9 @@ # start_ceilometer() - Start running processes, including screen function start_ceilometer { - run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" + run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" + run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-agent-ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" @@ -360,10 +360,10 @@ # Start the compute agent late to allow time for the collector to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespace compute --config-file $CEILOMETER_CONF" + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" fi # Only die on API if it was actually intended to be turned on diff -Nru ceilometer-5.0.0~b2/devstack/upgrade/settings ceilometer-5.0.0~b3/devstack/upgrade/settings --- ceilometer-5.0.0~b2/devstack/upgrade/settings 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/devstack/upgrade/settings 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,7 @@ +register_project_for_upgrade ceilometer + +devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer +devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator tempest + +devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer +devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator tempest diff -Nru ceilometer-5.0.0~b2/devstack/upgrade/shutdown.sh ceilometer-5.0.0~b3/devstack/upgrade/shutdown.sh --- ceilometer-5.0.0~b2/devstack/upgrade/shutdown.sh 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/devstack/upgrade/shutdown.sh 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,21 @@ +#!/bin/bash +# +# + +set -o errexit + +source $GRENADE_DIR/grenaderc +source $GRENADE_DIR/functions + +# We need base DevStack functions for this. +# TODO(chdent): This relies on the fact that for now base devstack is +# hosting ceilometer itself. Once M* starts this will need to be smarter. +source $BASE_DEVSTACK_DIR/functions +source $BASE_DEVSTACK_DIR/stackrc # needed for status directory +source $BASE_DEVSTACK_DIR/lib/tls +source $BASE_DEVSTACK_DIR/lib/apache +source $BASE_DEVSTACK_DIR/lib/ceilometer + +set -o xtrace + +stop_ceilometer diff -Nru ceilometer-5.0.0~b2/devstack/upgrade/upgrade.sh ceilometer-5.0.0~b3/devstack/upgrade/upgrade.sh --- ceilometer-5.0.0~b2/devstack/upgrade/upgrade.sh 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/devstack/upgrade/upgrade.sh 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# ``upgrade-ceilometer`` + +echo "*********************************************************************" +echo "Begin $0" +echo "*********************************************************************" + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + echo "*********************************************************************" + echo "ERROR: Abort $0" + echo "*********************************************************************" + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM + +# Keep track of the grenade directory +RUN_DIR=$(cd $(dirname "$0") && pwd) + +# Source params +source $GRENADE_DIR/grenaderc + +# Import common functions +source $GRENADE_DIR/functions + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Save mongodb state (replace with snapshot) +# TODO(chdent): There used to be a 'register_db_to_save ceilometer' +# which may wish to consider putting back in. +if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then + mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$BASE_RELEASE +fi + +# Upgrade Ceilometer +# ================== +# Locate ceilometer devstack plugin, the directory above the +# grenade plugin. +CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) + +# Get functions from current DevStack +source $TARGET_DEVSTACK_DIR/functions +source $TARGET_DEVSTACK_DIR/stackrc +source $TARGET_DEVSTACK_DIR/lib/apache + +# Get ceilometer functions from devstack plugin +source $CEILOMETER_DEVSTACK_DIR/settings + +# Print the commands being run so that we can see the command that triggers +# an error. +set -o xtrace + +# Install the target ceilometer +source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install + +# calls upgrade-ceilometer for specific release +upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH + +# Migrate the database +# NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but +# currently it is not. +CEILOMETER_BIN_DIR=$(dirname $(which ceilometer-dbsync)) +$CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error" + +# Start Ceilometer +start_ceilometer + +# Note these are process names, not service names +ensure_services_started ceilometer-agent-ipmi ceilometer-agent-compute ceilometer-agent-central ceilometer-agent-notification ceilometer-alarm-evaluator ceilometer-alarm-notifier ceilometer-api ceilometer-collector + +# Save mongodb state (replace with snapshot) +if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then + mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$TARGET_RELEASE +fi + + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End $0" +echo "*********************************************************************" diff -Nru ceilometer-5.0.0~b2/doc/Makefile ceilometer-5.0.0~b3/doc/Makefile --- ceilometer-5.0.0~b2/doc/Makefile 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/Makefile 2015-09-03 13:05:55.000000000 +0000 @@ -50,6 +50,7 @@ .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) + @ld -ltidy >/dev/null 2>&1 || (echo "Error: Missing libtidy dependencies. Pls. install libtidy with system package manager" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl diff -Nru ceilometer-5.0.0~b2/doc/source/format.rst ceilometer-5.0.0~b3/doc/source/format.rst --- ceilometer-5.0.0~b2/doc/source/format.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/format.rst 2015-09-03 13:05:55.000000000 +0000 @@ -73,7 +73,7 @@ "event": "events describe some kind of state change in the service", "quantity": "quantity describes a usage metric value" }, - "Compliance": "required", + "Compliance": "optional", "Notes": "" }, { diff -Nru ceilometer-5.0.0~b2/doc/source/gmr.rst ceilometer-5.0.0~b3/doc/source/gmr.rst --- ceilometer-5.0.0~b2/doc/source/gmr.rst 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/gmr.rst 2015-09-03 13:05:55.000000000 +0000 @@ -0,0 +1,89 @@ +.. + Copyright (c) 2014 OpenStack Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Guru Meditation Reports +======================= + +Ceilometer contains a mechanism whereby developers and system administrators +can generate a report about the state of a running Ceilometer executable. This +report is called a *Guru Meditation Report* (*GMR* for short). + +Generating a GMR +---------------- + +A *GMR* can be generated by sending the *USR1* signal to any Ceilometer process +with support (see below). The *GMR* will then be outputted standard error for +that particular process. + +For example, suppose that ``ceilometer-polling`` has process id ``8675``, and +was run with ``2>/var/log/ceilometer/ceilometer-polling.log``. Then, +``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to +``/var/log/ceilometer/ceilometer-polling.log``. + +Structure of a GMR +------------------ + +The *GMR* is designed to be extensible; any particular executable may add its +own sections. However, the base *GMR* consists of several sections: + +Package + Shows information about the package to which this process belongs, including + version information + +Threads + Shows stack traces and thread ids for each of the threads within this process + +Green Threads + Shows stack traces for each of the green threads within this process (green + threads don't have thread ids) + +Configuration + Lists all the configuration options currently accessible via the CONF object + for the current process + +Adding Support for GMRs to New Executables +------------------------------------------ + +Adding support for a *GMR* to a given executable is fairly easy. + +First import the module (currently residing in oslo-incubator), as well as the +Ceilometer version module: + +.. code-block:: python + + from oslo_reports import guru_meditation_report as gmr + from ceilometer import version + +Then, register any additional sections (optional): + +.. code-block:: python + + TextGuruMeditation.register_section('Some Special Section', + some_section_generator) + +Finally (under main), before running the "main loop" of the executable (usually +``service.server(server)`` or something similar), register the *GMR* hook: + +.. code-block:: python + + TextGuruMeditation.setup_autorun(version) + +Extending the GMR +----------------- + +As mentioned above, additional sections can be added to the GMR for a +particular executable. For more information, see the inline documentation +about oslo.reports: +`oslo.reports `_ diff -Nru ceilometer-5.0.0~b2/doc/source/index.rst ceilometer-5.0.0~b3/doc/source/index.rst --- ceilometer-5.0.0~b2/doc/source/index.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/index.rst 2015-09-03 13:05:55.000000000 +0000 @@ -64,6 +64,7 @@ new_meters testing contributing + gmr Appendix ======== diff -Nru ceilometer-5.0.0~b2/doc/source/install/manual.rst ceilometer-5.0.0~b3/doc/source/install/manual.rst --- ceilometer-5.0.0~b2/doc/source/install/manual.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/install/manual.rst 2015-09-03 13:05:55.000000000 +0000 @@ -654,12 +654,14 @@ database, multiple dispatchers can be developed and enabled by modifying Ceilometer configuration file. -Ceilometer ships multiple dispatchers currently. They are database, file and -http dispatcher. As the names imply, database dispatcher sends metering data +Ceilometer ships multiple dispatchers currently. They are `database`, `file`, `http` +and `gnocchi` dispatcher. As the names imply, database dispatcher sends metering data to a database, file dispatcher logs meters into a file, http dispatcher posts -the meters onto a http target. Each dispatcher can have its own configuration -parameters. Please see available configuration parameters at the beginning of -each dispatcher file. +the meters onto a http target, gnocchi dispatcher posts the meters onto Gnocchi_ +backend. Each dispatcher can have its own configuration parameters. Please see +available configuration parameters at the beginning of each dispatcher file. + +.. _Gnocchi: http://gnocchi.readthedocs.org/en/latest/basic.html To check if any of the dispatchers is available in your system, you can inspect the Ceilometer egg entry_points.txt file, you should normally see text @@ -669,6 +671,7 @@ database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher + gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher To configure one or multiple dispatchers for Ceilometer, find the Ceilometer configuration file ceilometer.conf which is normally located at /etc/ceilometer @@ -695,3 +698,21 @@ With above configuration, no dispatcher is used by the Ceilometer collector service, all metering data received by Ceilometer collector will be dropped. + +For Gnocchi dispatcher, the following configuration settings should be added:: + + [DEFAULT] + dispatcher = gnocchi + + [dispatcher_gnocchi] + filter_project = gnocchi_swift + filter_service_activity = True + archive_policy = low + url = http://localhost:8041 + +The `url` in the above configuration is a Gnocchi endpoint url and depends on your +deployment. + +.. note:: + If gnocchi dispatcher is enabled, Ceilometer api calls will return a 410 with + an empty result. The Gnocchi Api should be used instead to access the data. diff -Nru ceilometer-5.0.0~b2/doc/source/install/mod_wsgi.rst ceilometer-5.0.0~b3/doc/source/install/mod_wsgi.rst --- ceilometer-5.0.0~b2/doc/source/install/mod_wsgi.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/install/mod_wsgi.rst 2015-09-03 13:05:55.000000000 +0000 @@ -63,4 +63,4 @@ ``ceilometer.conf``. For other WSGI setup you can refer to the `pecan deployment`_ documentation. -.. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html#deployment +.. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html diff -Nru ceilometer-5.0.0~b2/doc/source/measurements.rst ceilometer-5.0.0~b3/doc/source/measurements.rst --- ceilometer-5.0.0~b2/doc/source/measurements.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/measurements.rst 2015-09-03 13:05:55.000000000 +0000 @@ -25,7 +25,7 @@ For the list of existing meters see the tables under the `Measurements page`_ of Ceilometer in the Cloud Administrator Guide. -.. _Measurements page: http://docs.openstack.org/admin-guide-cloud/content/section_telemetry-measurements.html +.. _Measurements page: http://docs.openstack.org/admin-guide-cloud/telemetry-measurements.html Adding new meters ================= diff -Nru ceilometer-5.0.0~b2/doc/source/releasenotes/folsom.rst ceilometer-5.0.0~b3/doc/source/releasenotes/folsom.rst --- ceilometer-5.0.0~b2/doc/source/releasenotes/folsom.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/releasenotes/folsom.rst 2015-09-03 13:05:55.000000000 +0000 @@ -23,7 +23,7 @@ caution in using it, as it is a technology preview at this time. Version of OpenStack - It is curently tested to work with OpenStack 2012.2 Folsom. Due to its use of + It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of openstack-common, and the modification that were made in term of notification to many other components (glance, cinder, quantum), it will not easily work with any prior version of OpenStack. diff -Nru ceilometer-5.0.0~b2/doc/source/releasenotes/index.rst ceilometer-5.0.0~b3/doc/source/releasenotes/index.rst --- ceilometer-5.0.0~b2/doc/source/releasenotes/index.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/releasenotes/index.rst 2015-09-03 13:05:55.000000000 +0000 @@ -26,7 +26,9 @@ * `Havana`_ * `Icehouse`_ * `Juno`_ +* `Kilo`_ .. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 .. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 .. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 +.. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 diff -Nru ceilometer-5.0.0~b2/doc/source/webapi/v2.rst ceilometer-5.0.0~b3/doc/source/webapi/v2.rst --- ceilometer-5.0.0~b2/doc/source/webapi/v2.rst 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/doc/source/webapi/v2.rst 2015-09-03 13:05:55.000000000 +0000 @@ -191,12 +191,12 @@ operators can be used: *and* *or* and *not*. The field names are validated against the database models. See :ref:`api-queries` for how to query the API. -.. note:: The *not* operator has different meaning in Mongo DB and in SQL DB engine. +.. note:: The *not* operator has different meaning in MongoDB and in SQL DB engine. If the *not* operator is applied on a non existent metadata field then the result depends on the DB engine. For example if {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query - the Mongo DB will return every Sample object as *not* operator evaluated true - for every Sample where the given field does not exists. See more in the Mongod DB doc. + the MongoDB will return every Sample object as *not* operator evaluated true + for every Sample where the given field does not exists. See more in the MongoDB doc. On the other hand SQL based DB engine will return empty result as the join operation on the metadata table will return zero rows as the on clause of the join which tries to match on the metadata field name is never fulfilled. diff -Nru ceilometer-5.0.0~b2/etc/ceilometer/gnocchi_resources.yaml ceilometer-5.0.0~b3/etc/ceilometer/gnocchi_resources.yaml --- ceilometer-5.0.0~b2/etc/ceilometer/gnocchi_resources.yaml 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/etc/ceilometer/gnocchi_resources.yaml 2015-09-03 13:05:55.000000000 +0000 @@ -13,10 +13,6 @@ - 'identity.group.created' - 'identity.group.deleted' - 'identity.group.updated' - - - resource_type: identity - archive_policy: low - metrics: - 'identity.role.created' - 'identity.role.deleted' - 'identity.role.updated' @@ -30,11 +26,12 @@ - resource_type: ceph_account metrics: - - 'stack.create' - - 'stack.update' - - 'stack.delete' - - 'stack.resume' - - 'stack.suspend' + - 'radosgw.objects' + - 'radosgw.objects.size' + - 'radosgw.objects.containers' + - 'radosgw.api.request' + - 'radosgw.containers.objects' + - 'radosgw.containers.objects.size' - resource_type: instance metrics: @@ -43,33 +40,77 @@ - 'disk.ephemeral.size' - 'memory' - 'memory.usage' + - 'memory.resident' - 'vcpus' - 'cpu' - 'cpu_util' + - 'vcpus' + - 'disk.read.requests' + - 'disk.read.requests.rate' + - 'disk.write.requests' + - 'disk.write.requests.rate' + - 'disk.read.bytes' + - 'disk.read.bytes.rate' + - 'disk.write.bytes' + - 'disk.write.bytes.rate' + - 'disk.device.read.requests' + - 'disk.device.read.requests.rate' + - 'disk.device.write.requests' + - 'disk.device.write.requests.rate' + - 'disk.device.read.bytes' + - 'disk.device.read.bytes.rate' + - 'disk.device.write.bytes' + - 'disk.device.write.bytes.rate' + - 'disk.latency' + - 'disk.iops' + - 'disk.device.latency' + - 'disk.device.iops' + - 'disk.capacity' + - 'disk.allocation' + - 'disk.usage' + - 'disk.device.capacity' + - 'disk.device.allocation' + - 'disk.device.usage' + - 'network.outgoing.packets.rate' + - 'network.incoming.packets.rate' + - 'network.outgoing.packets' + - 'network.incoming.packets' + - 'network.outgoing.bytes.rate' + - 'network.incoming.bytes.rate' + - 'network.outgoing.bytes' + - 'network.incoming.bytes' attributes: - - host: resource_metadata.host - - image_ref: resource_metadata.image_ref_url - - display_name: resource_metadata.display_name - - flavor_id: resource_metadata.instance_flavor_id - - flavor_id: resource_metadata.flavor.id - - server_group: resource_metadata.user_metadata.server_group + host: resource_metadata.host + image_ref: resource_metadata.image_ref_url + display_name: resource_metadata.display_name + flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)) + server_group: resource_metadata.user_metadata.server_group - resource_type: image metrics: - 'image' - 'image.size' + - 'image.download' + - 'image.serve' attributes: - - name: resource_metadata.name - - container_format: resource_metadata.container_format - - disk_format: resource_metadata.disk_format + name: resource_metadata.name + container_format: resource_metadata.container_format + disk_format: resource_metadata.disk_format - resource_type: ipmi metrics: - 'hardware.ipmi.node.power' - 'hardware.ipmi.node.temperature' + - 'hardware.ipmi.node.inlet_temperature' + - 'hardware.ipmi.node.outlet_temperature' - 'hardware.ipmi.node.fan' - 'hardware.ipmi.node.current' - 'hardware.ipmi.node.voltage' + - 'hardware.ipmi.node.airflow' + - 'hardware.ipmi.node.cups' + - 'hardware.ipmi.node.cpu_util' + - 'hardware.ipmi.node.mem_util' + - 'hardware.ipmi.node.io_util' - resource_type: network metrics: @@ -106,6 +147,8 @@ - 'storage.objects.size' - 'storage.objects' - 'storage.objects.containers' + - 'storage.containers.objects' + - 'storage.containers.objects.size' - resource_type: volume metrics: @@ -118,4 +161,4 @@ - 'volume.attach' - 'volume.detach' attributes: - - display_name: resource_metadata.display_name + display_name: resource_metadata.display_name diff -Nru ceilometer-5.0.0~b2/etc/ceilometer/meters.yaml ceilometer-5.0.0~b3/etc/ceilometer/meters.yaml --- ceilometer-5.0.0~b2/etc/ceilometer/meters.yaml 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/etc/ceilometer/meters.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,393 +0,0 @@ ---- - -metric: - - name: "image.size" - event_type: - - "image.upload" - - "image.delete" - - "image.update" - type: "gauge" - unit: B - volume: payload.size - resource_id: payload.id - project_id: payload.owner - - - name: "image.download" - event_type: "image.send" - type: "delta" - unit: "B" - volume: payload.bytes_sent - resource_id: payload.image_id - user_id: payload.receiver_user_id - project_id: payload.receiver_tenant_id - - - name: "image.serve" - event_type: "image.send" - type: "delta" - unit: "B" - volume: payload.bytes_sent - resource_id: payload.image_id - project_id: payload.owner_id - - - name: 'bandwidth' - event_type: 'l3.meter' - type: 'delta' - unit: 'B' - volume: payload.bytes - project_id: payload.tenant_id - resource_id: payload.label_id - - - name: 'magnetodb.table.index.count' - type: 'gauge' - unit: 'index' - event_type: 'magnetodb.table.create.end' - volume: payload.index_count - resource_id: payload.table_uuid - user_id: _context_user - - - name: 'memory' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'MB' - volume: payload.memory_mb - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.instance_id - - - name: 'vcpus' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'vcpu' - volume: payload.vcpus - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.instance_id - - - name: 'disk.root.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: payload.root_gb - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.instance_id - - - name: 'disk.ephemeral.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: payload.ephemeral_gb - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.instance_id - - - name: 'volume.size' - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - type: 'gauge' - unit: 'GB' - volume: payload.size - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.volume_id - - - name: 'snapshot.size' - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - type: 'gauge' - unit: 'GB' - volume: payload.volume_size - user_id: payload.user_id - project_id: payload.tenant_id - resource_id: payload.snapshot_id - -# NOTE: non-metric meters are generally events/existence meters -# These are expected to be DEPRECATED in future releases -# - - - name: 'stack.create' - event_type: - - 'orchestration.stack.create.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: payload.tenant_id - resource_id: payload.stack_identity - - - name: 'stack.update' - event_type: - - 'orchestration.stack.update.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: payload.tenant_id - resource_id: payload.stack_identity - - - name: 'stack.delete' - event_type: - - 'orchestration.stack.delete.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: payload.tenant_id - resource_id: payload.stack_identity - - - name: 'stack.resume' - event_type: - - 'orchestration.stack.resume.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: payload.tenant_id - resource_id: payload.stack_identity - - - name: 'stack.suspend' - event_type: - - 'orchestration.stack.suspend.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: payload.tenant_id - resource_id: payload.stack_identity - - - name: 'magnetodb.table.create' - type: 'gauge' - unit: 'table' - volume: 1 - event_type: 'magnetodb.table.create.end' - resource_id: payload.table_uuid - user_id: _context_user - project_id: _context_tenant - - - name: 'magnetodb.table.delete' - type: 'gauge' - unit: 'table' - volume: 1 - event_type: 'magnetodb.table.delete.end' - resource_id: payload.table_uuid - user_id: _context_user - project_id: _context_tenant - - - name: 'volume' - type: 'gauge' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.exists' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.create.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.create.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.delete.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.delete.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.update.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.update.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.resize.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.resize.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - - name: 'volume.attach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.attach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.detach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'volume.detach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.*' - resource_id: payload.volume_id - user_id: payload.user_id - project_id: payload.tenant_id - - - - name: 'snapshot' - type: 'gauge' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'snapshot.exists' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'snapshot.create.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.*' - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'snapshot.create.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.*' - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'snapshot.delete.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.*' - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id - - - name: 'snapshot.delete.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.*' - resource_id: payload.snapshot_id - user_id: payload.user_id - project_id: payload.tenant_id \ No newline at end of file diff -Nru ceilometer-5.0.0~b2/etc/ceilometer/policy.json.sample ceilometer-5.0.0~b3/etc/ceilometer/policy.json.sample --- ceilometer-5.0.0~b2/etc/ceilometer/policy.json.sample 2015-07-30 12:14:00.000000000 +0000 +++ ceilometer-5.0.0~b3/etc/ceilometer/policy.json.sample 2015-09-03 13:05:55.000000000 +0000 @@ -27,5 +27,8 @@ "telemetry:alarm_history": "rule:context_is_admin", "telemetry:change_alarm_state": "rule:context_is_admin", - "telemetry:query_alarm_history": "rule:context_is_admin" + "telemetry:query_alarm_history": "rule:context_is_admin", + + "telemetry:events:index": "rule:context_is_admin", + "telemetry:events:show": "rule:context_is_admin" } diff -Nru ceilometer-5.0.0~b2/PKG-INFO ceilometer-5.0.0~b3/PKG-INFO --- ceilometer-5.0.0~b2/PKG-INFO 2015-07-30 12:17:54.000000000 +0000 +++ ceilometer-5.0.0~b3/PKG-INFO 2015-09-03 13:09:31.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: ceilometer -Version: 5.0.0.0b2 +Version: 5.0.0.0b3 Summary: OpenStack Telemetry Home-page: http://www.openstack.org/ Author: OpenStack diff -Nru ceilometer-5.0.0~b2/requirements.txt ceilometer-5.0.0~b3/requirements.txt --- ceilometer-5.0.0~b2/requirements.txt 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/requirements.txt 2015-09-03 13:05:55.000000000 +0000 @@ -3,37 +3,37 @@ # process, which may cause wedges in the gate later. retrying!=1.3.0,>=1.2.3 # Apache-2.0 -alembic>=0.7.2 croniter>=0.3.4 # MIT License eventlet>=0.17.4 -jsonpath-rw<2.0,>=1.2.0 +jsonpath-rw-ext>=0.1.7 jsonschema!=2.5.0,<3.0.0,>=2.0.0 kafka-python>=0.9.2 # Apache-2.0 keystonemiddleware>=2.0.0 lxml>=2.3 msgpack-python>=0.4.0 oslo.context>=0.2.0 # Apache-2.0 -oslo.db>=1.12.0 # Apache-2.0 -oslo.concurrency>=2.1.0 # Apache-2.0 -oslo.config>=1.11.0 # Apache-2.0 +oslo.db>=2.4.1 # Apache-2.0 +oslo.concurrency>=2.3.0 # Apache-2.0 +oslo.config>=2.3.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 -oslo.log>=1.6.0 # Apache-2.0 +oslo.log>=1.8.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 +oslo.reports>=0.1.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.service>=0.1.0 # Apache-2.0 +oslo.service>=0.7.0 # Apache-2.0 PasteDeploy>=1.5.0 -pbr<2.0,>=1.3 -pecan>=0.8.0 +pbr<2.0,>=1.6 +pecan>=1.0.0 oslo.messaging!=1.17.0,!=1.17.1,>=1.16.0 # Apache-2.0 -oslo.middleware>=2.4.0 # Apache-2.0 +oslo.middleware>=2.8.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 -oslo.utils>=1.9.0 # Apache-2.0 +oslo.utils>=2.0.0 # Apache-2.0 pysnmp<5.0.0,>=4.2.1 python-ceilometerclient>=1.0.13 python-glanceclient>=0.18.0 python-keystoneclient>=1.6.0 -python-neutronclient<3,>=2.3.11 -python-novaclient>=2.22.0 +python-neutronclient<3,>=2.6.0 +python-novaclient>=2.26.0 python-swiftclient>=2.2.0 pytz>=2013.6 PyYAML>=3.1.0 @@ -42,7 +42,7 @@ SQLAlchemy<1.1.0,>=0.9.7 sqlalchemy-migrate>=0.9.6 stevedore>=1.5.0 # Apache-2.0 -tooz>=0.16.0 # Apache-2.0 +tooz>=1.19.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3 WSME>=0.7 diff -Nru ceilometer-5.0.0~b2/setup.cfg ceilometer-5.0.0~b3/setup.cfg --- ceilometer-5.0.0~b2/setup.cfg 2015-07-30 12:17:54.000000000 +0000 +++ ceilometer-5.0.0~b3/setup.cfg 2015-09-03 13:09:31.000000000 +0000 @@ -28,55 +28,16 @@ [entry_points] ceilometer.notification = - magnetodb_table = ceilometer.key_value_storage.notifications:Table - magnetodb_index_count = ceilometer.key_value_storage.notifications:Index instance = ceilometer.compute.notifications.instance:Instance - instance_flavor = ceilometer.compute.notifications.instance:InstanceFlavor instance_delete = ceilometer.compute.notifications.instance:InstanceDelete instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled - memory = ceilometer.compute.notifications.instance:Memory - vcpus = ceilometer.compute.notifications.instance:VCpus - disk_root_size = ceilometer.compute.notifications.instance:RootDiskSize - disk_ephemeral_size = ceilometer.compute.notifications.instance:EphemeralDiskSize - cpu_frequency = ceilometer.compute.notifications.cpu:CpuFrequency - cpu_user_time = ceilometer.compute.notifications.cpu:CpuUserTime - cpu_kernel_time = ceilometer.compute.notifications.cpu:CpuKernelTime - cpu_idle_time = ceilometer.compute.notifications.cpu:CpuIdleTime - cpu_iowait_time = ceilometer.compute.notifications.cpu:CpuIowaitTime - cpu_kernel_percent = ceilometer.compute.notifications.cpu:CpuKernelPercent - cpu_idle_percent = ceilometer.compute.notifications.cpu:CpuIdlePercent - cpu_user_percent = ceilometer.compute.notifications.cpu:CpuUserPercent - cpu_iowait_percent = ceilometer.compute.notifications.cpu:CpuIowaitPercent - cpu_percent = ceilometer.compute.notifications.cpu:CpuPercent - volume = ceilometer.volume.notifications:Volume - volume_size = ceilometer.volume.notifications:VolumeSize - volume_crud = ceilometer.volume.notifications:VolumeCRUD - snapshot = ceilometer.volume.notifications:Snapshot - snapshot_size = ceilometer.volume.notifications:SnapshotSize - snapshot_crud = ceilometer.volume.notifications:SnapshotCRUD - authenticate = ceilometer.identity.notifications:Authenticate - user = ceilometer.identity.notifications:User - group = ceilometer.identity.notifications:Group - role = ceilometer.identity.notifications:Role - project = ceilometer.identity.notifications:Project - trust = ceilometer.identity.notifications:Trust - role_assignment = ceilometer.identity.notifications:RoleAssignment - image_crud = ceilometer.image.notifications:ImageCRUD - image = ceilometer.image.notifications:Image - image_size = ceilometer.image.notifications:ImageSize - image_download = ceilometer.image.notifications:ImageDownload - image_serve = ceilometer.image.notifications:ImageServe network = ceilometer.network.notifications:Network subnet = ceilometer.network.notifications:Subnet port = ceilometer.network.notifications:Port router = ceilometer.network.notifications:Router floatingip = ceilometer.network.notifications:FloatingIP - bandwidth = ceilometer.network.notifications:Bandwidth http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse - stack_crud = ceilometer.orchestration.notifications:StackCRUD - data_processing = ceilometer.data_processing.notifications:DataProcessing - profiler = ceilometer.profiler.notifications:ProfilerNotifications hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification @@ -92,11 +53,10 @@ network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection - objectstore.request = ceilometer.objectstore.notifications:SwiftWsgiMiddleware - objectstore.request.meters = ceilometer.objectstore.notifications:SwiftWsgiMiddlewareMeters _sample = ceilometer.telemetry.notifications:TelemetryApiPost trove.instance.exists = ceilometer.database.notifications:InstanceExists dns.domain.exists = ceilometer.dns.notifications:DomainExists + meter = ceilometer.meter.notifications:ProcessMeterNotifications ceilometer.discover = local_instances = ceilometer.compute.discovery:InstanceDiscovery endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery @@ -141,7 +101,6 @@ network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster - instance_flavor = ceilometer.compute.pollsters.instance:InstanceFlavorPollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster diff -Nru ceilometer-5.0.0~b2/test-requirements.txt ceilometer-5.0.0~b3/test-requirements.txt --- ceilometer-5.0.0~b2/test-requirements.txt 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/test-requirements.txt 2015-09-03 13:05:55.000000000 +0000 @@ -7,7 +7,6 @@ Babel>=1.3 contextlib2>=0.4.0 # PSF License coverage>=3.6 -discover elasticsearch>=1.3.0 fixtures>=1.3.1 happybase!=0.7,>=0.5;python_version=='2.7' @@ -16,7 +15,7 @@ PyMySQL>=0.6.2 # MIT License # Docs Requirements oslosphinx>=2.5.0 # Apache-2.0 -oslotest>=1.7.0 # Apache-2.0 +oslotest>=1.10.0 # Apache-2.0 oslo.vmware>=1.16.0 # Apache-2.0 psycopg2 pylint==1.4.4 # GNU GPL v2 @@ -29,6 +28,6 @@ testrepository>=0.0.18 testscenarios>=0.4 testtools>=1.4.0 -gabbi>=0.12.0 # Apache-2.0 +gabbi>=1.1.4 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) tempest-lib>=0.6.1 diff -Nru ceilometer-5.0.0~b2/tox.ini ceilometer-5.0.0~b3/tox.ini --- ceilometer-5.0.0~b2/tox.ini 2015-07-30 12:14:02.000000000 +0000 +++ ceilometer-5.0.0~b3/tox.ini 2015-09-03 13:05:55.000000000 +0000 @@ -10,8 +10,9 @@ usedevelop = True setenv = VIRTUAL_ENV={envdir} EVENTLET_NO_GREENDNS=yes + OS_TEST_PATH=ceilometer/tests/unit commands = - bash -x {toxinidir}/setup-test-env-mongodb.sh python setup.py testr --slowest --testr-args="{posargs}" + python setup.py testr --slowest --testr-args="{posargs}" whitelist_externals = bash # TODO(ityaptin): With separation tests to unit and functional folders we need @@ -19,19 +20,23 @@ # in "py-" jobs [testenv:py-mongodb] +setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = bash -x {toxinidir}/setup-test-env-mongodb.sh python setup.py testr --slowest --testr-args="{posargs}" [testenv:py-mysql] +setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = bash -x {toxinidir}/setup-test-env-mysql.sh python setup.py testr --slowest --testr-args="{posargs}" [testenv:py-pgsql] +setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = bash -x {toxinidir}/setup-test-env-postgresql.sh python setup.py testr --slowest --testr-args="{posargs}" # Functional tests for elastic search [testenv:py-elastic] +setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = bash -x {toxinidir}/setup-test-env-es.sh python setup.py testr --slowest --testr-args="{posargs}" @@ -39,73 +44,37 @@ setenv = VIRTUAL_ENV={envdir} EVENTLET_NO_GREENDNS=yes OS_TEST_PATH=ceilometer/tests/functional/ +passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" -[testenv:py34] -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = python -m testtools.run \ - ceilometer.tests.api.v2.test_alarm_scenarios.TestAlarms.test_alarms_query_with_timestamp \ - ceilometer.tests.api.v2.test_alarm_scenarios.TestAlarms.test_get_alarm_history_constrained_by_alarm_id_failed \ - ceilometer.tests.api.v2.test_app \ - ceilometer.tests.api.v2.test_complex_query_scenarios.TestQueryMetersController.test_query_with_volume_field_name_orderby \ - ceilometer.tests.api.v2.test_complex_query_scenarios.TestQueryMetersController.test_query_with_isotime \ - ceilometer.tests.api.v2.test_event_scenarios.TestEventAPI.test_get_events_filter_datetime_trait \ - ceilometer.tests.api.v2.test_list_meters_scenarios.TestListMeters.test_query_samples_with_invalid_field_name_and_eq_operator \ - ceilometer.tests.api.v2.test_list_resources_scenarios.TestListResources.test_with_invalid_resource_id \ - ceilometer.tests.api.v2.test_query \ - ceilometer.tests.compute.virt.libvirt.test_inspector \ - ceilometer.tests.compute.virt.vmware.test_vsphere_operations \ - ceilometer.tests.data_processing.test_notifications \ - ceilometer.tests.dispatcher.test_db \ - ceilometer.tests.dispatcher.test_file \ - ceilometer.tests.dispatcher.test_http \ - ceilometer.tests.energy.test_kwapi \ - ceilometer.tests.ipmi.platform.test_intel_node_manager \ - ceilometer.tests.network.services.test_fwaas \ - ceilometer.tests.network.services.test_lbaas \ - ceilometer.tests.network.services.test_vpnaas \ - ceilometer.tests.network.test_floatingip \ - ceilometer.tests.network.test_notifications \ - ceilometer.tests.objectstore.test_rgw \ - ceilometer.tests.orchestration.test_notifications \ - ceilometer.tests.profiler.test_notifications \ - ceilometer.tests.publisher.test_direct \ - ceilometer.tests.publisher.test_file \ - ceilometer.tests.publisher.test_kafka_broker_publisher \ - ceilometer.tests.publisher.test_messaging_publisher \ - ceilometer.tests.publisher.test_utils \ - ceilometer.tests.publisher.test_udp \ - ceilometer.tests.storage.test_get_connection \ - ceilometer.tests.storage.test_impl_sqlalchemy \ - ceilometer.tests.test_bin \ - ceilometer.tests.test_collector \ - ceilometer.tests.test_coordination \ - ceilometer.tests.test_event_pipeline \ - ceilometer.tests.test_hacking \ - ceilometer.tests.test_messaging \ - ceilometer.tests.test_middleware \ - ceilometer.tests.test_neutronclient \ - ceilometer.tests.test_notification \ - ceilometer.tests.test_novaclient \ - ceilometer.tests.test_sample \ - ceilometer.tests.test_utils \ - ceilometer.tests.volume.test_notifications - +[testenv:integration] +setenv = VIRTUAL_ENV={envdir} + EVENTLET_NO_GREENDNS=yes + OS_TEST_PATH=./ceilometer/tests/integration + OS_TEST_TIMEOUT=2400 + GABBI_LIVE_FAIL_IF_NO_TEST=1 +passenv = HEAT_* CEILOMETER_* GNOCCHI_* AODH_* GLANCE_* NOVA_* ADMIN_* +# FIXME(sileht): run gabbi-run to failfast in case of error because testr +# doesn't support --failfast, but we loose the testr report. +commands = + bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' +# bash -x {toxinidir}/tools/pretty_tox.sh "{posargs}" # NOTE(chdent): The gabbi tests are also run under the primary tox # targets. This target simply provides a target to directly run just # gabbi tests without needing to discovery across the entire body of # tests. [testenv:gabbi] -setenv = OS_TEST_PATH=ceilometer/tests/gabbi +setenv = OS_TEST_PATH=ceilometer/tests/functional/gabbi commands = bash -x {toxinidir}/setup-test-env-mongodb.sh \ python setup.py testr --testr-args="{posargs}" [testenv:cover] -commands = bash -x {toxinidir}/setup-test-env-mongodb.sh python setup.py testr --slowest --coverage --testr-args="{posargs}" +setenv = OS_TEST_PATH=ceilometer/tests +commands = + python setup.py testr --slowest --coverage --testr-args="{posargs}" [testenv:pep8] commands = @@ -137,15 +106,22 @@ setenv = PYTHONHASHSEED=0 [testenv:debug] +commands = bash -x oslo_debug_helper {posargs} + +[testenv:debug-mongodb] +setenv = OS_TEST_PATH=ceilometer/tests/functional commands = bash -x {toxinidir}/setup-test-env-mongodb.sh oslo_debug_helper {posargs} [testenv:debug-mysql] +setenv = OS_TEST_PATH=ceilometer/tests/functional commands = bash -x {toxinidir}/setup-test-env-mysql.sh oslo_debug_helper {posargs} [testenv:debug-pgsql] -commands = bash -x {toxinidir}/setup-test-env-postgresql.sh oslo_debug_helper {posargs} +setenv = OS_TEST_PATH=ceilometer/tests/functional +commands = bash -x {toxinidir}/setup-test-env-pgsql.sh oslo_debug_helper {posargs} [testenv:debug-elastic] +setenv = OS_TEST_PATH=ceilometer/tests/functional commands = bash -x {toxinidir}/setup-test-env-elastic.sh oslo_debug_helper {posargs} [flake8]