diff -Nru influxdb-0.10.0+dfsg1/appveyor.yml influxdb-1.1.1+dfsg1/appveyor.yml --- influxdb-0.10.0+dfsg1/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/appveyor.yml 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,37 @@ +version: 0.{build} +pull_requests: + do_not_increment_build_number: true +branches: + only: + - master + +os: Windows Server 2012 R2 + +# Custom clone folder (variables are not expanded here). +clone_folder: c:\gopath\src\github.com\influxdata\influxdb + +# Environment variables +environment: + GOROOT: C:\go17 + GOPATH: C:\gopath + +# Scripts that run after cloning repository +install: + - set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH% + - rmdir c:\go /s /q + - echo %PATH% + - echo %GOPATH% + - cd C:\gopath\src\github.com\influxdata\influxdb + - go version + - go env + - go get github.com/sparrc/gdm + - cd C:\gopath\src\github.com\influxdata\influxdb + - gdm restore + +# To run your custom scripts instead of automatic MSBuild +build_script: + - go get -t -v ./... + - go test -race -v ./... + +# To disable deployment +deploy: off diff -Nru influxdb-0.10.0+dfsg1/build-docker.sh influxdb-1.1.1+dfsg1/build-docker.sh --- influxdb-0.10.0+dfsg1/build-docker.sh 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/build-docker.sh 2016-12-06 21:36:15.000000000 +0000 @@ -1,8 +1,8 @@ -#!/bin/sh +#!/bin/bash set -e -x -GO_VER=${GO_VER:-1.5} +GO_VER=${GO_VER:-1.7.4} docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' diff -Nru influxdb-0.10.0+dfsg1/build.py influxdb-1.1.1+dfsg1/build.py --- influxdb-0.10.0+dfsg1/build.py 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/build.py 2016-12-06 21:36:15.000000000 +0000 @@ -1,23 +1,16 @@ -#!/usr/bin/env python2.7 -# -# This is the InfluxDB build script. -# -# Current caveats: -# - Does not checkout the correct commit/branch (for now, you will need to do so manually) -# - Has external dependencies for packaging (fpm) and uploading (boto) -# +#!/usr/bin/python2.7 -u import sys import os import subprocess import time -import datetime +from datetime import datetime import shutil import tempfile import hashlib import re - -debug = False +import logging +import argparse ################ #### InfluxDB Variables @@ -31,6 +24,7 @@ SCRIPT_DIR = "/usr/lib/influxdb/scripts" CONFIG_DIR = "/etc/influxdb" LOGROTATE_DIR = "/etc/logrotate.d" +MAN_DIR = "/usr/share/man" INIT_SCRIPT = "scripts/init.sh" SYSTEMD_SCRIPT = "scripts/influxdb.service" @@ -41,7 +35,7 @@ DEFAULT_CONFIG = "etc/config.sample.toml" # Default AWS S3 bucket for uploads -DEFAULT_BUCKET = "influxdb" +DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts" CONFIGURATION_FILES = [ CONFIG_DIR + '/influxdb.conf', @@ -55,7 +49,8 @@ DESCRIPTION = "Distributed time-series database." prereqs = [ 'git', 'go' ] -optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] +go_vet_command = "go tool vet ./" +optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ] fpm_common_args = "-f -s dir --log error \ --vendor {} \ @@ -67,6 +62,7 @@ --maintainer {} \ --directories {} \ --directories {} \ +--directories {} \ --description \"{}\"".format( VENDOR, PACKAGE_URL, @@ -77,131 +73,235 @@ MAINTAINER, LOG_DIR, DATA_DIR, + MAN_DIR, DESCRIPTION) for f in CONFIGURATION_FILES: fpm_common_args += " --config-files {}".format(f) targets = { - 'influx' : './cmd/influx/main.go', - 'influxd' : './cmd/influxd/main.go', - 'influx_stress' : './cmd/influx_stress/influx_stress.go', - 'influx_inspect' : './cmd/influx_inspect/*.go', - 'influx_tsm' : './cmd/influx_tsm/*.go', + 'influx' : './cmd/influx', + 'influxd' : './cmd/influxd', + 'influx_stress' : './cmd/influx_stress', + 'influx_inspect' : './cmd/influx_inspect', + 'influx_tsm' : './cmd/influx_tsm', } supported_builds = { - 'darwin': [ "amd64", "i386" ], - # InfluxDB does not currently support Windows - # 'windows': [ "amd64", "386", "arm" ], - 'linux': [ "amd64", "i386", "arm" ] + 'darwin': [ "amd64" ], + 'windows': [ "amd64" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] } supported_packages = { "darwin": [ "tar" ], "linux": [ "deb", "rpm", "tar" ], - "windows": [ "tar" ], + "windows": [ "zip" ], } ################ #### InfluxDB Functions ################ +def print_banner(): + logging.info(""" + ___ __ _ ___ ___ + |_ _|_ _ / _| |_ ___ _| \\| _ ) + | || ' \\| _| | || \\ \\ / |) | _ \\ + |___|_||_|_| |_|\\_,_/_\\_\\___/|___/ + Build Script +""") + def create_package_fs(build_root): - print "Creating package filesystem at root: {}".format(build_root) + """Create a filesystem structure to mimic the package filesystem. + """ + logging.debug("Creating package filesystem at location: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], DATA_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + dirs = [ INSTALL_ROOT_DIR[1:], + LOG_DIR[1:], + DATA_DIR[1:], + SCRIPT_DIR[1:], + CONFIG_DIR[1:], + LOGROTATE_DIR[1:], + MAN_DIR[1:] ] for d in dirs: - create_dir(os.path.join(build_root, d)) - os.chmod(os.path.join(build_root, d), 0755) + os.makedirs(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) -def package_scripts(build_root): - print "Copying scripts and sample configuration to build directory" - shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644) - shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644) - shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) - os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0644) - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0644) +def package_scripts(build_root, config_only=False, windows=False): + """Copy the necessary scripts and configuration files to the package + filesystem. + """ + if config_only: + logging.debug("Copying configuration to build directory.") + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf")) + os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644) + else: + logging.debug("Copying scripts and sample configuration to build directory.") + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644) + +def package_man_files(build_root): + """Copy and gzip man pages to the package filesystem.""" + logging.debug("Installing man pages.") + run("make -C man/ clean install DESTDIR={}/usr".format(build_root)) + for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])): + for f in files: + run("gzip -9n {}".format(os.path.join(path, f))) def run_generate(): - # TODO - Port this functionality to InfluxDB, currently a NOOP - print "NOTE: The `--generate` flag is currently a NNOP. Skipping..." - # print "Running generate..." - # command = "go generate ./..." - # code = os.system(command) - # if code != 0: - # print "Generate Failed" - # return False - # else: - # print "Generate Succeeded" - # return True - pass - + """Run 'go generate' to rebuild any static assets. + """ + logging.info("Running 'go generate'...") + if not check_path_for("statik"): + run("go install github.com/rakyll/statik") + orig_path = None + if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep): + orig_path = os.environ["PATH"].split(os.pathsep) + os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin")) + run("rm -f ./services/admin/statik/statik.go") + run("go generate ./services/admin") + if orig_path is not None: + os.environ["PATH"] = orig_path + return True + +def go_get(branch, update=False, no_uncommitted=False): + """Retrieve build dependencies or restore pinned dependencies. + """ + if local_changes() and no_uncommitted: + logging.error("There are uncommitted changes in the current directory.") + return False + if not check_path_for("gdm"): + logging.info("Downloading `gdm`...") + get_command = "go get github.com/sparrc/gdm" + run(get_command) + logging.info("Retrieving dependencies with `gdm`...") + sys.stdout.flush() + run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) + return True + +def run_tests(race, parallel, timeout, no_vet): + """Run the Go test suite on binary output. + """ + logging.info("Starting tests...") + if race: + logging.info("Race is enabled.") + if parallel is not None: + logging.info("Using parallel: {}".format(parallel)) + if timeout is not None: + logging.info("Using timeout: {}".format(timeout)) + out = run("go fmt ./...") + if len(out) > 0: + logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + logging.error("{}".format(out)) + return False + if not no_vet: + logging.info("Running 'go vet'...") + out = run(go_vet_command) + if len(out) > 0: + logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") + logging.error("{}".format(out)) + return False + else: + logging.info("Skipping 'go vet' call...") + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + logging.info("Running tests...") + output = run(test_command) + logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore'))) + return True + ################ #### All InfluxDB-specific content above this line ################ def run(command, allow_failure=False, shell=False): + """Run shell command (convenience wrapper around subprocess). + """ out = None - if debug: - print "[DEBUG] {}".format(command) + logging.debug("{}".format(command)) try: if shell: out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) else: out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode('utf-8').strip() + # logging.debug("Command output: {}".format(out)) except subprocess.CalledProcessError as e: - print "" - print "" - print "Executed command failed!" - print "-- Command run was: {}".format(command) - print "-- Failure was: {}".format(e.output) if allow_failure: - print "Continuing..." + logging.warn("Command '{}' failed with error: {}".format(command, e.output)) return None else: - print "" - print "Stopping." + logging.error("Command '{}' failed with error: {}".format(command, e.output)) sys.exit(1) except OSError as e: - print "" - print "" - print "Invalid command!" - print "-- Command run was: {}".format(command) - print "-- Failure was: {}".format(e) if allow_failure: - print "Continuing..." + logging.warn("Command '{}' failed with error: {}".format(command, e)) return out else: - print "" - print "Stopping." + logging.error("Command '{}' failed with error: {}".format(command, e)) sys.exit(1) else: return out def create_temp_dir(prefix = None): + """ Create temporary directory with optional prefix. + """ if prefix is None: return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) else: return tempfile.mkdtemp(prefix=prefix) +def increment_minor_version(version): + """Return the version with the minor version incremented and patch + version set to zero. + """ + ver_list = version.split('.') + if len(ver_list) != 3: + logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + return version + ver_list[1] = str(int(ver_list[1]) + 1) + ver_list[2] = str(0) + inc_version = '.'.join(ver_list) + logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) + return inc_version + def get_current_version_tag(): - version = run("git describe --always --tags --abbrev=0").strip() + """Retrieve the raw git version tag. + """ + version = run("git describe --always --tags --abbrev=0") return version -def get_current_rc(): - rc = None +def get_current_version(): + """Parse version information from git tag output. + """ version_tag = get_current_version_tag() - matches = re.match(r'.*-rc(\d+)', version_tag) - if matches: - rc, = matches.groups(1) - return rc + # Remove leading 'v' + if version_tag[0] == 'v': + version_tag = version_tag[1:] + # Replace any '-'/'_' with '~' + if '-' in version_tag: + version_tag = version_tag.replace("-","~") + if '_' in version_tag: + version_tag = version_tag.replace("_","~") + return version_tag def get_current_commit(short=False): + """Retrieve the current git commit. + """ command = None if short: command = "git log --pretty=format:'%h' -n 1" @@ -211,23 +311,44 @@ return out.strip('\'\n\r ') def get_current_branch(): + """Retrieve the current git branch. + """ command = "git rev-parse --abbrev-ref HEAD" out = run(command) return out.strip() +def local_changes(): + """Return True if there are local un-committed changes. + """ + output = run("git diff-files --ignore-submodules --").strip() + if len(output) > 0: + return True + return False + def get_system_arch(): + """Retrieve current system architecture. + """ arch = os.uname()[4] if arch == "x86_64": arch = "amd64" + elif arch == "386": + arch = "i386" + elif 'arm' in arch: + # Prevent uname from reporting full ARM arch (eg 'armv7l') + arch = "arm" return arch def get_system_platform(): + """Retrieve current system platform. + """ if sys.platform.startswith("linux"): return "linux" else: return sys.platform def get_go_version(): + """Retrieve version information for Go. + """ out = run("go version") matches = re.search('go version go(\S+)', out) if matches is not None: @@ -235,6 +356,8 @@ return None def check_path_for(b): + """Check the the user's path for the provided binary. + """ def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) @@ -245,49 +368,47 @@ return full_path def check_environ(build_dir = None): - print "" - print "Checking environment:" + """Check environment for common Go variables. + """ + logging.info("Checking environment...") for v in [ "GOPATH", "GOBIN", "GOROOT" ]: - print "- {} -> {}".format(v, os.environ.get(v)) + logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) cwd = os.getcwd() if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - print "!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures." + logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + return True def check_prereqs(): - print "" - print "Checking for dependencies:" + """Check user path for required dependencies. + """ + logging.info("Checking for dependencies...") for req in prereqs: - print "- {} ->".format(req), - path = check_path_for(req) - if path: - print "{}".format(path) - else: - print "?" - for req in optional_prereqs: - print "- {} (optional) ->".format(req), - path = check_path_for(req) - if path: - print "{}".format(path) - else: - print "?" - print "" + if not check_path_for(req): + logging.error("Could not find dependency: {}".format(req)) + return False + return True -def upload_packages(packages, bucket_name=None, nightly=False): - if debug: - print "[DEBUG] upload_packages: {}".format(packages) +def upload_packages(packages, bucket_name=None, overwrite=False): + """Upload provided package output to AWS S3. + """ + logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) try: import boto from boto.s3.key import Key + from boto.s3.connection import OrdinaryCallingFormat + logging.getLogger("boto").setLevel(logging.WARNING) except ImportError: - print "!! Cannot upload packages without the 'boto' Python library." - return 1 - print "Connecting to S3...".format(bucket_name) - c = boto.connect_s3() + logging.warn("Cannot upload packages without 'boto' Python library!") + return False + logging.info("Connecting to AWS S3...") + # Up the number of attempts to 10 from default of 1 + boto.config.add_section("Boto") + boto.config.set("Boto", "metadata_service_num_attempts", "10") + c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) if bucket_name is None: bucket_name = DEFAULT_BUCKET bucket = c.get_bucket(bucket_name.split('/')[0]) - print "Using bucket: {}".format(bucket_name) for p in packages: if '/' in bucket_name: # Allow for nested paths within the bucket name (ex: @@ -297,531 +418,568 @@ os.path.basename(p)) else: name = os.path.basename(p) - if bucket.get_key(name) is None or nightly: - print "Uploading {}...".format(name) - sys.stdout.flush() + logging.debug("Using key: {}".format(name)) + if bucket.get_key(name) is None or overwrite: + logging.info("Uploading file {}".format(name)) k = Key(bucket) k.key = name - if nightly: + if overwrite: n = k.set_contents_from_filename(p, replace=True) else: n = k.set_contents_from_filename(p, replace=False) k.make_public() else: - print "!! Not uploading package {}, as it already exists.".format(p) - print "" - return 0 + logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + return True -def run_tests(race, parallel, timeout, no_vet): - print "Retrieving Go dependencies...", - get_command = "go get -d -t ./..." - sys.stdout.flush() - run(get_command) - get_command = "go get golang.org/x/tools/cmd/vet" - sys.stdout.flush() - run(get_command) - print "done." - print "Running tests:" - print "\tRace: ", race - if parallel is not None: - print "\tParallel:", parallel - if timeout is not None: - print "\tTimeout:", timeout - sys.stdout.flush() - p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +def go_list(vendor=False, relative=False): + """ + Return a list of packages + If vendor is False vendor package are not included + If relative is True the package prefix defined by PACKAGE_URL is stripped + """ + p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors." - print out - print err - return False - if not no_vet: - p = subprocess.Popen(["go", "tool", "vet", "-composites=true", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print "Go vet failed. Please run 'go vet ./...' and fix any errors." - print out - print err - return False - else: - print "Skipping go vet ..." - sys.stdout.flush() - test_command = "go test -v" - if race: - test_command += " -race" - if parallel is not None: - test_command += " -parallel {}".format(parallel) - if timeout is not None: - test_command += " -timeout {}".format(timeout) - test_command += " ./..." - code = os.system(test_command) - if code != 0: - print "Tests Failed" - return False - else: - print "Tests Passed" - return True + packages = out.split('\n') + if packages[-1] == '': + packages = packages[:-1] + if not vendor: + non_vendor = [] + for p in packages: + if '/vendor/' not in p: + non_vendor.append(p) + packages = non_vendor + if relative: + relative_pkgs = [] + for p in packages: + r = p.replace(PACKAGE_URL, '.') + if r != '.': + relative_pkgs.append(r) + packages = relative_pkgs + return packages def build(version=None, - branch=None, - commit=None, platform=None, arch=None, nightly=False, - rc=None, race=False, clean=False, outdir=".", - goarm_version="6"): - print "" - print "-------------------------" - print "" - print "Build Plan:" - print "- version: {}".format(version) - if rc: - print "- release candidate: {}".format(rc) - print "- commit: {}".format(get_current_commit(short=True)) - print "- branch: {}".format(get_current_branch()) - print "- platform: {}".format(platform) - print "- arch: {}".format(arch) - if arch == 'arm' and goarm_version: - print "- ARM version: {}".format(goarm_version) - print "- nightly? {}".format(str(nightly).lower()) - print "- race enabled? {}".format(str(race).lower()) - print "" + tags=[], + static=False): + """Build each target for the specified architecture and platform. + """ + logging.info("Starting build for {}/{}...".format(platform, arch)) + logging.info("Using Go version: {}".format(get_go_version())) + logging.info("Using git branch: {}".format(get_current_branch())) + logging.info("Using git commit: {}".format(get_current_commit())) + if static: + logging.info("Using statically-compiled output.") + if race: + logging.info("Race is enabled.") + if len(tags) > 0: + logging.info("Using build tags: {}".format(','.join(tags))) + logging.info("Sending build output to: {}".format(outdir)) if not os.path.exists(outdir): os.makedirs(outdir) - elif clean and outdir != '/': - print "Cleaning build directory..." + elif clean and outdir != '/' and outdir != ".": + logging.info("Cleaning build directory '{}' before building.".format(outdir)) shutil.rmtree(outdir) os.makedirs(outdir) - if rc: - # If a release candidate, update the version information accordingly - version = "{}rc{}".format(version, rc) - - # Set the architecture to something that Go expects - if arch == 'i386': - arch = '386' - elif arch == 'x86_64': - arch = 'amd64' - - print "Starting build..." - tmp_build_dir = create_temp_dir() - for b, c in targets.iteritems(): - print "Building '{}'...".format(os.path.join(outdir, b)) + logging.info("Using version '{}' for build.".format(version)) + for target, path in targets.items(): + logging.info("Building target: {}".format(target)) build_command = "" + + # Handle static binary output + if static is True or "static_" in arch: + if "static_" in arch: + static = True + arch = arch.replace("static_", "") + build_command += "CGO_ENABLED=0 " + + # Handle variations in architecture output + if arch == "i386" or arch == "i686": + arch = "386" + elif "arm" in arch: + arch = "arm" build_command += "GOOS={} GOARCH={} ".format(platform, arch) - if arch == "arm" and goarm_version: - if goarm_version not in ["5", "6", "7", "arm64"]: - print "!! Invalid ARM build version: {}".format(goarm_version) - build_command += "GOARM={} ".format(goarm_version) - build_command += "go build -o {} ".format(os.path.join(outdir, b)) + + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + # TODO(rossmcdonald) - Verify this is the correct setting for arm64 + build_command += "GOARM=7 " + else: + logging.error("Invalid ARM architecture specified: {}".format(arch)) + logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") + return False + if platform == 'windows': + target = target + '.exe' + build_command += "go build -o {} ".format(os.path.join(outdir, target)) if race: build_command += "-race " - go_version = get_go_version() - if "1.4" in go_version: - build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat()) - build_command += "-X main.version {} ".format(version) - build_command += "-X main.branch {} ".format(get_current_branch()) - build_command += "-X main.commit {}\" ".format(get_current_commit()) + if len(tags) > 0: + build_command += "-tags {} ".format(','.join(tags)) + if "1.4" in get_go_version(): + if static: + build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: - build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat()) - build_command += "-X main.version={} ".format(version) - build_command += "-X main.branch={} ".format(get_current_branch()) - build_command += "-X main.commit={}\" ".format(get_current_commit()) - build_command += c + # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + if static: + build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + if static: + build_command += "-a -installsuffix cgo " + build_command += path + start_time = datetime.utcnow() run(build_command, shell=True) - print "" + end_time = datetime.utcnow() + logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) + return True -def create_dir(path): - try: - os.makedirs(path) - except OSError as e: - print e - -def rename_file(fr, to): - try: - os.rename(fr, to) - except OSError as e: - print e - # Return the original filename - return fr - else: - # Return the new filename - return to - -def copy_file(fr, to): - try: - shutil.copy(fr, to) - except OSError as e: - print e - -def go_get(branch, update=False): - get_command = None - if update: - get_command = "go get -u -f -d ./..." - else: - get_command = "go get -d ./..." - - # 'go get' switches to master, so stash what we currently have - stash = run("git stash create -a").strip() - if len(stash) > 0: - print "There are un-committed changes in your local branch, stashing them as {}".format(stash) - # reset to ensure we don't have any checkout issues - run("git reset --hard") - - print "Retrieving Go dependencies (moving to master)..." - run(get_command) - sys.stdout.flush() - - print "Moving back to branch '{}'...".format(branch) - run("git checkout {}".format(branch)) - - print "Applying previously stashed contents..." - run("git stash apply {}".format(stash)) - else: - print "Retrieving Go dependencies..." - run(get_command) - - print "Moving back to branch '{}'...".format(branch) - run("git checkout {}".format(branch)) - def generate_md5_from_file(path): + """Generate MD5 signature based on the contents of the file at path. + """ m = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): m.update(chunk) return m.hexdigest() -def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iteration=1): +def generate_sig_from_file(path): + """Generate a detached GPG signature from the file at path. + """ + logging.debug("Generating GPG signature for file: {}".format(path)) + gpg_path = check_path_for('gpg') + if gpg_path is None: + logging.warn("gpg binary not found on path! Skipping signature creation.") + return False + if os.environ.get("GNUPG_HOME") is not None: + run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) + else: + run('gpg --armor --detach-sign --yes {}'.format(path)) + return True + +def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): + """Package the output of the build process. + """ outfiles = [] tmp_build_dir = create_temp_dir() - if debug: - print "[DEBUG] build_output = {}".format(build_output) + logging.debug("Packaging for build output: {}".format(build_output)) + logging.info("Using temporary directory: {}".format(tmp_build_dir)) try: - print "-------------------------" - print "" - print "Packaging..." - for p in build_output: + for platform in build_output: # Create top-level folder displaying which platform (linux, etc) - create_dir(os.path.join(tmp_build_dir, p)) - for a in build_output[p]: - current_location = build_output[p][a] + os.makedirs(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + logging.info("Creating packages for {}/{}".format(platform, arch)) # Create second-level directory displaying the architecture (amd64, etc) - build_root = os.path.join(tmp_build_dir, p, a, 'influxdb-{}-{}'.format(version, iteration)) + current_location = build_output[platform][arch] + # Create directory tree to mimic file system of package - create_dir(build_root) - create_package_fs(build_root) - # Copy in packaging and miscellaneous scripts - package_scripts(build_root) - # Copy newly-built binaries to packaging directory - for b in targets: - if p == 'windows': - b = b + '.exe' - fr = os.path.join(current_location, b) - to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) - if debug: - print "[{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to) - copy_file(fr, to) - # Package the directory structure - for package_type in supported_packages[p]: - print "Packaging directory '{}' as '{}'...".format(build_root, package_type) - name = PACKAGE_NAME + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) + os.makedirs(build_root) + + # Copy packaging scripts to build directory + if platform == "windows": + # For windows and static builds, just copy + # binaries to root of package (no other scripts or + # directories) + package_scripts(build_root, config_only=True, windows=True) + elif static or "static_" in arch: + package_scripts(build_root, config_only=True) + else: + create_package_fs(build_root) + package_scripts(build_root) + + if platform != "windows": + package_man_files(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + if platform == 'windows' or static or "static_" in arch: + # Where the binary should go in the package filesystem + to = os.path.join(build_root, binary) + # Where the binary currently is located + fr = os.path.join(current_location, binary) + else: + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + shutil.copy(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) + name = pkg_name # Reset version, iteration, and current location on each run # since they may be modified below. package_version = version package_iteration = iteration + if "static_" in arch: + # Remove the "static_" from the displayed arch on the package + package_arch = arch.replace("static_", "") + else: + package_arch = arch + if not release and not nightly: + # For non-release builds, just use the commit hash as the version + package_version = "{}~{}".format(version, + get_current_commit(short=True)) + package_iteration = "0" package_build_root = build_root - current_location = build_output[p][a] + current_location = build_output[platform][arch] if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) if nightly: - name = '{}-nightly_{}_{}'.format(name, p, a) + if static or "static_" in arch: + name = '{}-static-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + name = '{}-nightly_{}_{}'.format(name, + platform, + package_arch) else: - name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a) - - if package_type == 'tar': - # Add `tar.gz` to path to ensure a small package size - current_location = os.path.join(current_location, name + '.tar.gz') - elif package_type == 'zip': - current_location = os.path.join(current_location, name + '.zip') - - if rc is not None: - package_iteration = "0.rc{}".format(rc) - if pkg_arch is not None: - a = pkg_arch - if a == '386': - a = 'i386' - - fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( - fpm_common_args, - name, - a, - package_type, - package_version, - package_iteration, - package_build_root, - current_location) - if debug: - fpm_command += "--verbose " - if package_type == "rpm": - fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT) - out = run(fpm_command, shell=True) - matches = re.search(':path=>"(.*)"', out) - outfile = None - if matches is not None: - outfile = matches.groups()[0] - if outfile is None: - print "!! Could not determine output from packaging command." + if static or "static_" in arch: + name = '{}-{}-static_{}_{}'.format(name, + package_version, + platform, + package_arch) + else: + name = '{}-{}_{}_{}'.format(name, + package_version, + platform, + package_arch) + current_location = os.path.join(os.getcwd(), current_location) + if package_type == 'tar': + tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name) + run(tar_command, shell=True) + run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".tar.gz") + outfiles.append(outfile) + elif package_type == 'zip': + zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".zip") + outfiles.append(outfile) + elif package_type not in ['zip', 'tar'] and static or "static_" in arch: + logging.info("Skipping package type '{}' for static builds.".format(package_type)) else: - # Strip nightly version (the unix epoch) from filename - if nightly and package_type in ['deb', 'rpm']: - outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) - outfiles.append(os.path.join(os.getcwd(), outfile)) - # Display MD5 hash for generated package - print "MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)) - print "" - if debug: - print "[DEBUG] package outfiles: {}".format(outfiles) + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + package_arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT) + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + logging.warn("Could not determine output from packaging output!") + else: + if nightly: + # Strip nightly version from package name + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") + os.rename(outfile, new_outfile) + outfile = new_outfile + else: + if package_type == 'rpm': + # rpm's convert any dashes to underscores + package_version = package_version.replace("-", "_") + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) + os.rename(outfile, new_outfile) + outfile = new_outfile + outfiles.append(os.path.join(os.getcwd(), outfile)) + logging.debug("Produced package files: {}".format(outfiles)) return outfiles finally: # Cleanup shutil.rmtree(tmp_build_dir) -def print_usage(): - print "Usage: ./build.py [options]" - print "" - print "Options:" - print "\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build." - print "\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all" - print "\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6" - print "\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all" - print "\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag." - print "\t --pkgarch= \n\t\t- Package architecture if different from " - print "\t --commit= \n\t\t- Use specific commit for build (currently a NOOP)." - print "\t --branch= \n\t\t- Build from a specific branch (currently a NOOP)." - print "\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information)." - print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." - print "\t --race \n\t\t- Whether the produced build should have race detection enabled." - print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)." - print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)." - print "\t --update \n\t\t- Whether dependencies should be updated prior to building." - print "\t --test \n\t\t- Run Go tests. Will not produce a build." - print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified." - print "\t --generate \n\t\t- Run `go generate` (currently a NOOP)." - print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s." - print "\t --clean \n\t\t- Clean the build output directory prior to creating build." - print "\t --no-get \n\t\t- Do not run `go get` before building." - print "\t --bucket=\n\t\t- Full path of the bucket to upload packages to (must also specify --upload)." - print "\t --debug \n\t\t- Displays debug output." - print "" - -def print_package_summary(packages): - print packages - -def main(): - global debug - - # Command-line arguments - outdir = "build" - commit = None - target_platform = None - target_arch = None - package_arch = None - nightly = False - race = False - branch = None - version = get_current_version_tag() - rc = get_current_rc() - package = False - update = False - clean = False - upload = False - test = False - parallel = None - timeout = None - iteration = 1 - no_vet = False - goarm_version = "6" - run_get = True - upload_bucket = None - generate = False - - for arg in sys.argv[1:]: - if '--outdir' in arg: - # Output directory. If none is specified, then builds will be placed in the same directory. - outdir = arg.split("=")[1] - if '--commit' in arg: - # Commit to build from. If none is specified, then it will build from the most recent commit. - commit = arg.split("=")[1] - if '--branch' in arg: - # Branch to build from. If none is specified, then it will build from the current branch. - branch = arg.split("=")[1] - elif '--arch' in arg: - # Target architecture. If none is specified, then it will build for the current arch. - target_arch = arg.split("=")[1] - elif '--platform' in arg: - # Target platform. If none is specified, then it will build for the current platform. - target_platform = arg.split("=")[1] - elif '--version' in arg: - # Version to assign to this build (0.9.5, etc) - version = arg.split("=")[1] - elif '--pkgarch' in arg: - # Package architecture if different from (armhf, etc) - package_arch = arg.split("=")[1] - elif '--rc' in arg: - # Signifies that this is a release candidate build. - rc = arg.split("=")[1] - elif '--race' in arg: - # Signifies that race detection should be enabled. - race = True - elif '--package' in arg: - # Signifies that packages should be built. - package = True - elif '--nightly' in arg: - # Signifies that this is a nightly build. - nightly = True - # In order to cleanly delineate nightly version, we are adding the epoch timestamp - # to the version so that version numbers are always greater than the previous nightly. - version = "{}.n{}".format(version, int(time.time())) - elif '--update' in arg: - # Signifies that dependencies should be updated. - update = True - elif '--upload' in arg: - # Signifies that the resulting packages should be uploaded to S3 - upload = True - elif '--test' in arg: - # Run tests and exit - test = True - elif '--parallel' in arg: - # Set parallel for tests. - parallel = int(arg.split("=")[1]) - elif '--timeout' in arg: - # Set timeout for tests. - timeout = arg.split("=")[1] - elif '--clean' in arg: - # Signifies that the outdir should be deleted before building - clean = True - elif '--iteration' in arg: - iteration = arg.split("=")[1] - elif '--no-vet' in arg: - no_vet = True - elif '--no-get' in arg: - run_get = False - elif '--goarm' in arg: - # Signifies GOARM flag to pass to build command when compiling for ARM - goarm_version = arg.split("=")[1] - elif '--bucket' in arg: - # The bucket to upload the packages to, relies on boto - upload_bucket = arg.split("=")[1] - elif '--generate' in arg: - # Run go generate ./... - # TODO - this currently does nothing for InfluxDB - generate = True - elif '--debug' in arg: - print "[DEBUG] Using debug output" - debug = True - elif '--help' in arg: - print_usage() - return 0 - else: - print "!! Unknown argument: {}".format(arg) - print_usage() - return 1 +def main(args): + global PACKAGE_NAME - if nightly and rc: - print "!! Cannot be both nightly and a release candidate! Stopping." + if args.release and args.nightly: + logging.error("Cannot be both a nightly and a release.") return 1 + if args.nightly: + args.version = increment_minor_version(args.version) + args.version = "{}~n{}".format(args.version, + datetime.utcnow().strftime("%Y%m%d%H%M")) + args.iteration = 0 + # Pre-build checks check_environ() - check_prereqs() - - if not commit: - commit = get_current_commit(short=True) - if not branch: - branch = get_current_branch() - if not target_arch: - system_arch = get_system_arch() - if 'arm' in system_arch: - # Prevent uname from reporting ARM arch (eg 'armv7l') - target_arch = "arm" - else: - target_arch = system_arch - if not target_platform: - target_platform = get_system_platform() - if rc or nightly: - # If a release candidate or nightly, set iteration to 0 (instead of 1) - iteration = 0 - - if target_arch == '386': - target_arch = 'i386' - elif target_arch == 'x86_64': - target_arch = 'amd64' - + if not check_prereqs(): + return 1 + if args.build_tags is None: + args.build_tags = [] + else: + args.build_tags = args.build_tags.split(',') + + orig_commit = get_current_commit(short=True) + orig_branch = get_current_branch() + + if args.platform not in supported_builds and args.platform != 'all': + logging.error("Invalid build platform: {}".format(target_platform)) + return 1 + build_output = {} - if generate: - if not run_generate(): + if args.branch != orig_branch and args.commit != orig_commit: + logging.error("Can only specify one branch or commit to build from.") + return 1 + elif args.branch != orig_branch: + logging.info("Moving to git branch: {}".format(args.branch)) + run("git checkout {}".format(args.branch)) + elif args.commit != orig_commit: + logging.info("Moving to git commit: {}".format(args.commit)) + run("git checkout {}".format(args.commit)) + + if not args.no_get: + if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): return 1 - - if test: - if not run_tests(race, parallel, timeout, no_vet): + + if args.generate: + if not run_generate(): return 1 - return 0 - if run_get: - go_get(branch, update=update) + if args.test: + if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): + return 1 platforms = [] single_build = True - if target_platform == 'all': + if args.platform == 'all': platforms = supported_builds.keys() single_build = False else: - platforms = [target_platform] + platforms = [args.platform] for platform in platforms: build_output.update( { platform : {} } ) archs = [] - if target_arch == "all": + if args.arch == "all": single_build = False archs = supported_builds.get(platform) else: - archs = [target_arch] + archs = [args.arch] + for arch in archs: - od = outdir + od = args.outdir if not single_build: - od = os.path.join(outdir, platform, arch) - build(version=version, - branch=branch, - commit=commit, - platform=platform, - arch=arch, - nightly=nightly, - rc=rc, - race=race, - clean=clean, - outdir=od, - goarm_version=goarm_version) + od = os.path.join(args.outdir, platform, arch) + if not build(version=args.version, + platform=platform, + arch=arch, + nightly=args.nightly, + race=args.race, + clean=args.clean, + outdir=od, + tags=args.build_tags, + static=args.static): + return 1 build_output.get(platform).update( { arch : od } ) # Build packages - if package: + if args.package: if not check_path_for("fpm"): - print "!! Cannot package without command 'fpm'." + logging.error("FPM ruby gem required for packaging. Stopping.") return 1 + packages = package(build_output, + args.name, + args.version, + nightly=args.nightly, + iteration=args.iteration, + static=args.static, + release=args.release) + if args.sign: + logging.debug("Generating GPG signatures for packages: {}".format(packages)) + sigs = [] # retain signatures so they can be uploaded with packages + for p in packages: + if generate_sig_from_file(p): + sigs.append(p + '.asc') + else: + logging.error("Creation of signature for package [{}] failed!".format(p)) + return 1 + packages += sigs + if args.upload: + logging.debug("Files staged for upload: {}".format(packages)) + if args.nightly: + args.upload_overwrite = True + if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): + return 1 + logging.info("Packages created:") + for p in packages: + logging.info("{} (MD5={})".format(p.split('/')[-1:][0], + generate_md5_from_file(p))) + if orig_branch != get_current_branch(): + logging.info("Moving back to original git branch: {}".format(orig_branch)) + run("git checkout {}".format(orig_branch)) - packages = build_packages(build_output, version, package_arch, nightly=nightly, rc=rc, iteration=iteration) - if upload: - upload_packages(packages, bucket_name=upload_bucket, nightly=nightly) - print "Done!" return 0 if __name__ == '__main__': - sys.exit(main()) - + LOG_LEVEL = logging.INFO + if '--debug' in sys.argv[1:]: + LOG_LEVEL = logging.DEBUG + log_format = '[%(levelname)s] %(funcName)s: %(message)s' + logging.basicConfig(level=LOG_LEVEL, + format=log_format) + + parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') + parser.add_argument('--verbose','-v','--debug', + action='store_true', + help='Use debug output') + parser.add_argument('--outdir', '-o', + metavar='', + default='./build/', + type=os.path.abspath, + help='Output directory') + parser.add_argument('--name', '-n', + metavar='', + default=PACKAGE_NAME, + type=str, + help='Name to use for package name (when package is specified)') + parser.add_argument('--arch', + metavar='', + type=str, + default=get_system_arch(), + help='Target architecture for build output') + parser.add_argument('--platform', + metavar='', + type=str, + default=get_system_platform(), + help='Target platform for build output') + parser.add_argument('--branch', + metavar='', + type=str, + default=get_current_branch(), + help='Build from a specific branch') + parser.add_argument('--commit', + metavar='', + type=str, + default=get_current_commit(short=True), + help='Build from a specific commit') + parser.add_argument('--version', + metavar='', + type=str, + default=get_current_version(), + help='Version information to apply to build output (ex: 0.12.0)') + parser.add_argument('--iteration', + metavar='', + type=str, + default="1", + help='Package iteration to apply to build output (defaults to 1)') + parser.add_argument('--stats', + action='store_true', + help='Emit build metrics (requires InfluxDB Python client)') + parser.add_argument('--stats-server', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided hostname and port') + parser.add_argument('--stats-db', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided database name') + parser.add_argument('--nightly', + action='store_true', + help='Mark build output as nightly build (will incremement the minor version)') + parser.add_argument('--update', + action='store_true', + help='Update build dependencies prior to building') + parser.add_argument('--package', + action='store_true', + help='Package binary output') + parser.add_argument('--release', + action='store_true', + help='Mark build output as release') + parser.add_argument('--clean', + action='store_true', + help='Clean output directory before building') + parser.add_argument('--no-get', + action='store_true', + help='Do not retrieve pinned dependencies when building') + parser.add_argument('--no-uncommitted', + action='store_true', + help='Fail if uncommitted changes exist in the working directory') + parser.add_argument('--upload', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--upload-overwrite','-w', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--bucket', + metavar='', + type=str, + default=DEFAULT_BUCKET, + help='Destination bucket for uploads') + parser.add_argument('--generate', + action='store_true', + help='Run "go generate" before building') + parser.add_argument('--build-tags', + metavar='', + help='Optional build tags to use for compilation') + parser.add_argument('--static', + action='store_true', + help='Create statically-compiled binary output') + parser.add_argument('--sign', + action='store_true', + help='Create GPG detached signatures for packages (when package is specified)') + parser.add_argument('--test', + action='store_true', + help='Run tests (does not produce build output)') + parser.add_argument('--no-vet', + action='store_true', + help='Do not run "go vet" when running tests') + parser.add_argument('--race', + action='store_true', + help='Enable race flag for build output') + parser.add_argument('--parallel', + metavar='', + type=int, + help='Number of tests to run simultaneously') + parser.add_argument('--timeout', + metavar='', + type=str, + help='Timeout for tests before failing') + args = parser.parse_args() + print_banner() + sys.exit(main(args)) diff -Nru influxdb-0.10.0+dfsg1/build.sh influxdb-1.1.1+dfsg1/build.sh --- influxdb-0.10.0+dfsg1/build.sh 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/build.sh 2016-12-06 21:36:15.000000000 +0000 @@ -16,7 +16,7 @@ -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ -v $HOME/.aws.conf:/root/.aws.conf \ - -v $DIR:/root/go/src/github.com/influxdb/influxdb \ + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ influxdb-builder \ "$@" diff -Nru influxdb-0.10.0+dfsg1/CHANGELOG.md influxdb-1.1.1+dfsg1/CHANGELOG.md --- influxdb-0.10.0+dfsg1/CHANGELOG.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/CHANGELOG.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,30 +1,571 @@ -## v0.10.0 [unreleased] +## v1.1.1 [2016-12-06] ### Features -- [#5183](https://github.com/influxdb/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires -- [#5201](https://github.com/influxdb/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 -- [#5194](https://github.com/influxdb/influxdb/pull/5194): Custom continuous query options per query rather than per node. -- [#5224](https://github.com/influxdb/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). + +- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. + +### Bugfixes + +- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. +- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident. +- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes + +### Security + +[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues. This release includes these security fixes. + +## v1.1.0 [2016-11-14] + +### Release Notes + +This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. + +### Deprecations + +The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. + +### Configuration Changes + +The following configuration changes may need to changed before upgrading to `1.1.0` from prior versions. + +#### `[admin]` Section + +* `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. + +#### `[data]` Section + +* `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. +* `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. +* `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. +* `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. + +### Features + +The query language has been extended with a few new features: + +* New `cumulative_sum` function - [#7388](https://github.com/influxdata/influxdb/pull/7388) +* New `linear` fill option - [#7408](https://github.com/influxdata/influxdb/pull/7408) +* Support `ON` for `SHOW` commands - [#7295](https://github.com/influxdata/influxdb/pull/7295) +* Support regex on fields keys in select clause - [#7442](https://github.com/influxdata/influxdb/pull/7442) + +All Changes: + +- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. +- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. +- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. +- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj +- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. +- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. +- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. +- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. +- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. +- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. +- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad +- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. +- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. +- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. +- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data +- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. +- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. +- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. +- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. +- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. +- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. +- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. +- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. +- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. +- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices +- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. +- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. + +### Bugfixes + +- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. +- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. +- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. +- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 +- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. +- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. +- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. +- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! +- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. +- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations +- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. +- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. +- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database +- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. +- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. +- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified +- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. +- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. +- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. +- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. +- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. + +## v1.0.2 [2016-10-05] + +### Bugfixes + +- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY +- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. +- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers +- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions +- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load + +## v1.0.1 [2016-09-26] + +### Bugfixes + +- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! +- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. +- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. +- [#7299](https://github.com/influxdata/influxdb/ssues/7299): Ensure fieldsCreated stat available in shard measurement. +- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. +- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. + +## v1.0.0 [2016-09-08] + +### Release Notes + +### Breaking changes + +* `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. +* Config option `[cluster]` has been replaced with `[coordinator]` +* Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. +* Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. +* The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. +* The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. + +With this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald. + +### Features + +- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. +- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. +- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. +- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database +- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. +- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring +- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. +- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. +- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. +- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. +- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. +- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. +- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding +- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. +- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. +- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. +- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. +- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options +- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. +- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. +- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. +- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon +- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. +- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. +- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. +- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. +- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. +- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. +- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. +- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. +- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. +- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. +- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. +- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. +- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. +- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. +- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. +- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. +- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats + +### Bugfixes + +- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code +- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading +- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. +- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. +- [#6649](https://github.com/influxdata/influxdb/issues/6649): Make sure admin exists before authenticating query. +- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. +- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it +- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. +- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. +- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body +- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue +- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded +- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions +- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points +- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF +- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. +- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. +- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ +- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. +- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. +- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files +- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. +- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. +- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. +- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw +- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types +- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time +- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. +- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes +- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). +- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write +- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. +- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. +- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints +- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. +- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. +- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. +- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. +- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. +- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. +- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. +- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup +- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points +- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range +- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. +- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. +- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. +- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. +- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. +- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. +- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. +- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files +- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT +- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement +- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp +- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards +- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files +- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions +- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen +- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. +- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. +- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key +- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements +- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. +- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. +- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. +- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. +- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export +- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. +- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit +- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. +- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions + +## v0.13.0 [2016-05-12] + +### Release Notes + +With this release InfluxDB is moving to Go v1.6. + +### Features + +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. +- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu +- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. +- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES +- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) +- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol +- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators +- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. +- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. + +### Bugfixes + +- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. +- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. +- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction +- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index +- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. +- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values +- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT + +## v0.12.2 [2016-04-20] + +### Bugfixes + +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. + +## v0.12.1 [2016-04-08] + +### Bugfixes + +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. + + +## v0.12.0 [2016-04-05] +### Release Notes +Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). + +This release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay). + +### Features + +- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. +- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. +- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. +- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. +- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats +- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. +- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. +- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection +- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. +- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. +- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies +- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes +- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. +- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. +- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. +- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. +- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading +- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. +- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. +- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. +- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. +- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! + +### Bugfixes + +- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. +- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. +- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv +- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' +- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux +- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. +- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' +- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. +- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM +- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments +- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database +- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database +- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy + +## v0.11.1 [2016-03-31] + +### Bugfixes + +- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails +- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host +- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index +- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics +- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. + +## v0.11.0 [2016-03-22] + +### Release Notes + +There were some important breaking changes in this release. Here's a list of the important things to know before upgrading: + +* [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). +* [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) +* JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. +* b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). +* On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. +* On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. + +### Features + +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. +- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions +- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause +- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server +- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries + +### Bugfixes + +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine +- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT +- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock +- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries +- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups +- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float +- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS +- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu +- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails + +## v0.10.3 [2016-03-09] + +### Bugfixes + +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. + +## v0.10.2 [2016-03-03] + +### Bugfixes + +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value + +## v0.10.1 [2016-02-18] + +### Bugfixes +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +## v0.10.0 [2016-02-04] + +### Release Notes + +This release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used. + +This release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster. + +### Features +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). - [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. - [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. - [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. ### Bugfixes - [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard -- [#5042](https://github.com/influxdb/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. -- [#4735](https://github.com/influxdb/influxdb/issues/4735): Fix panic when merging empty results. -- [#5016](https://github.com/influxdb/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj -- [#5059](https://github.com/influxdb/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz -- [#4940](https://github.com/influxdb/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang -- [#4622](https://github.com/influxdb/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. -- [#5064](https://github.com/influxdb/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdb/influxdb/issues/5054). Thanks @mengjinglei -- [#5079](https://github.com/influxdb/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. -- [#4303](https://github.com/influxdb/influxdb/issues/4303): Don't drop measurements or series from multiple databases. -- [#5078](https://github.com/influxdb/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 -- [#5178](https://github.com/influxdb/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires -- [#5158](https://github.com/influxdb/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. - [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range -- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdb/influxdb/issues/5077). Thanks @pires +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires - [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. - [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory - [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. @@ -36,49 +577,50 @@ - [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 - [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. - [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic -- [#5504](https://github.com/influxdata/influxdb/issues/5475): create retention policy on unexistant DB crash InfluxDB +- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB - [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. +- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. ## v0.9.6 [2015-12-09] ### Release Notes This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. -If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml#L98-L125 +If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 ### Features -- [#4790](https://github.com/influxdb/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled -- [#4728](https://github.com/influxdb/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski -- [#4841](https://github.com/influxdb/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! -- [#4889](https://github.com/influxdb/influxdb/pull/4889): Implement close notifier and timeout on executors -- [#2676](https://github.com/influxdb/influxdb/issues/2676), [#4866](https://github.com/influxdb/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! -- [#4848](https://github.com/influxdb/influxdb/pull/4848): Added framework for cluster integration testing. -- [#4872](https://github.com/influxdb/influxdb/pull/4872): Add option to disable logging for meta service. -- [#4787](https://github.com/influxdb/influxdb/issues/4787): Now builds on Solaris - -### Bugfixes -- [#4849](https://github.com/influxdb/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. -- [#4984](https://github.com/influxdb/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei -- [#4666](https://github.com/influxdb/influxdb/issues/4666): Fix panic in derivative with invalid values. -- [#4404](https://github.com/influxdb/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. -- [#4858](https://github.com/influxdb/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru -- [#4921](https://github.com/influxdb/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires -- [#4974](https://github.com/influxdb/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name -- [#4876](https://github.com/influxdb/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! -- [#4833](https://github.com/influxdb/influxdb/pull/4833), [#4927](https://github.com/influxdb/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! -- [#4918](https://github.com/influxdb/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdb/influxdb/issues/4806). Thanks @oiooj -- [#4855](https://github.com/influxdb/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! -- [#4411](https://github.com/influxdb/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses -- [#4768](https://github.com/influxdb/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires -- [#4766](https://github.com/influxdb/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas -- [#4804](https://github.com/influxdb/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 -- [#4796](https://github.com/influxdb/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm -- [#4815](https://github.com/influxdb/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang -- [#4817](https://github.com/influxdb/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei -- [#4878](https://github.com/influxdb/influxdb/pull/4878): Fix String() function for several InfluxQL statement types -- [#4913](https://github.com/influxdb/influxdb/pull/4913): Fix b1 flush deadlock -- [#3170](https://github.com/influxdb/influxdb/issues/3170), [#4921](https://github.com/influxdb/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! -- [#5029](https://github.com/influxdb/influxdb/pull/5029): Drop UDP point on bad parse. +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. ## v0.9.5 [2015-11-20] @@ -95,119 +637,119 @@ - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`) ### Features -- [#4098](https://github.com/influxdb/influxdb/pull/4702): Support 'history' command at CLI -- [#4098](https://github.com/influxdb/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage -- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged -- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex -- [#4140](https://github.com/influxdb/influxdb/pull/4140): Make storage engine configurable -- [#4161](https://github.com/influxdb/influxdb/pull/4161): Implement bottom selector function -- [#4204](https://github.com/influxdb/influxdb/pull/4204): Allow module-level selection for SHOW STATS -- [#4208](https://github.com/influxdb/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS -- [#4196](https://github.com/influxdb/influxdb/pull/4196): Export tsdb.Iterator -- [#4198](https://github.com/influxdb/influxdb/pull/4198): Add basic cluster-service stats -- [#4262](https://github.com/influxdb/influxdb/pull/4262): Allow configuration of UDP retention policy -- [#4265](https://github.com/influxdb/influxdb/pull/4265): Add statistics for Hinted-Handoff -- [#4284](https://github.com/influxdb/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures -- [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou -- [#4348](https://github.com/influxdb/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. -- [#4178](https://github.com/influxdb/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! -- [#4409](https://github.com/influxdb/influxdb/pull/4409): wire up INTO queries. -- [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input. -- [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. -- [#4506](https://github.com/influxdb/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. -- [#4516](https://github.com/influxdb/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics -- [#4501](https://github.com/influxdb/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. -- [#4547](https://github.com/influxdb/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). -- [#4600](https://github.com/influxdb/influxdb/pull/4600): ping endpoint can wait for leader -- [#4648](https://github.com/influxdb/influxdb/pull/4648): UDP Client (v2 client) -- [#4690](https://github.com/influxdb/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires -- [#4676](https://github.com/influxdb/influxdb/pull/4676): UDP service listener performance enhancements -- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau -- [#4721](https://github.com/influxdb/influxdb/pull/4721): Export tsdb.InterfaceValues -- [#4681](https://github.com/influxdb/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners -- [#4685](https://github.com/influxdb/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. -- [#4846](https://github.com/influxdb/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! - -### Bugfixes -- [#4193](https://github.com/influxdb/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause -- [#4235](https://github.com/influxdb/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order -- [#4789](https://github.com/influxdb/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdb/influxdb/issues/4701). -- [#4778](https://github.com/influxdb/influxdb/pull/4778): If there are no points to count, count is 0. -- [#4715](https://github.com/influxdb/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdb/influxdb/issues/4707). Thanks @oiooj -- [#4643](https://github.com/influxdb/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj -- [#4632](https://github.com/influxdb/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn -- [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. -- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW -- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name -- [#4704](https://github.com/influxdb/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires -- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order -- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites -- [#4136](https://github.com/influxdb/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier -- [#4228](https://github.com/influxdb/influxdb/pull/4228): Add build timestamp to version information. -- [#4124](https://github.com/influxdb/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service -- [#4238](https://github.com/influxdb/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. -- [#4165](https://github.com/influxdb/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. -- [#4586](https://github.com/influxdb/influxdb/pull/4586): Exit when invalid engine is selected -- [#4118](https://github.com/influxdb/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions -- [#4191](https://github.com/influxdb/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdb/influxdb/issues/4170) -- [#4222](https://github.com/influxdb/influxdb/pull/4222): Graphite TCP connections should not block shutdown -- [#4180](https://github.com/influxdb/influxdb/pull/4180): Cursor & SelectMapper Refactor -- [#1577](https://github.com/influxdb/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point -- [#4264](https://github.com/influxdb/influxdb/issues/4264): Refactor map functions to use list of values -- [#4278](https://github.com/influxdb/influxdb/pull/4278): Fix error marshalling across the cluster -- [#4149](https://github.com/influxdb/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! -- [#4674](https://github.com/influxdb/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. -- [#4725](https://github.com/influxdb/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. -- [#4237](https://github.com/influxdb/influxdb/issues/4237): DERIVATIVE() edge conditions -- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing -- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson -- [#4296](https://github.com/influxdb/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdb/influxdb/issues/4272) -- [#4333](https://github.com/influxdb/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. -- [#4276](https://github.com/influxdb/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources -- [#4465](https://github.com/influxdb/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. -- [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. -- [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. -- [#4502](https://github.com/influxdb/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib -- [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. -- [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! -- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given. -- [#3429](https://github.com/influxdb/influxdb/issues/3429): Incorrect parsing of regex containing '/' -- [#4374](https://github.com/influxdb/influxdb/issues/4374): Add tsm1 quickcheck tests -- [#4644](https://github.com/influxdb/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdb/influxdb/issues/4641) -- [#4377](https://github.com/influxdb/influxdb/pull/4377): Hinted handoff should not process dropped nodes -- [#4365](https://github.com/influxdb/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock -- [#4280](https://github.com/influxdb/influxdb/issues/4280): Only drop points matching WHERE clause -- [#4443](https://github.com/influxdb/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdb/influxdb/issues/4442) -- [#4410](https://github.com/influxdb/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh -- [#4360](https://github.com/influxdb/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing -- [#4421](https://github.com/influxdb/influxdb/issues/4421): Fix line protocol accepting tags with no values -- [#4434](https://github.com/influxdb/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdb/influxdb/issues/4433) -- [#4431](https://github.com/influxdb/influxdb/issues/4431): Add tsm1 WAL QuickCheck -- [#4438](https://github.com/influxdb/influxdb/pull/4438): openTSDB service shutdown fixes -- [#4447](https://github.com/influxdb/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. -- [#3820](https://github.com/influxdb/influxdb/issues/3820): Fix js error in admin UI. -- [#4460](https://github.com/influxdb/influxdb/issues/4460): tsm1 meta lint -- [#4415](https://github.com/influxdb/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp -- [#4472](https://github.com/influxdb/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error -- [#4475](https://github.com/influxdb/influxdb/issues/4475): Fix SHOW TAG VALUES error message. -- [#4486](https://github.com/influxdb/influxdb/pull/4486): Fix missing comments for runner package -- [#4497](https://github.com/influxdb/influxdb/pull/4497): Fix sequence in meta proto -- [#3367](https://github.com/influxdb/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. -- [#4563](https://github.com/influxdb/influxdb/pull/4536): Fix broken subscriptions updates. -- [#4538](https://github.com/influxdb/influxdb/issues/4538): Dropping database under a write load causes panics -- [#4582](https://github.com/influxdb/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj -- [#4513](https://github.com/influxdb/influxdb/issues/4513): TSM1: panic: runtime error: index out of range -- [#4521](https://github.com/influxdb/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 -- [#4587](https://github.com/influxdb/influxdb/pull/4587): Prevent NaN float values from being stored -- [#4596](https://github.com/influxdb/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau -- [#4610](https://github.com/influxdb/influxdb/pull/4610): Make internal stats names consistent with Go style. -- [#4625](https://github.com/influxdb/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. -- [#4650](https://github.com/influxdb/influxdb/issues/4650): Importer should skip empty lines -- [#4651](https://github.com/influxdb/influxdb/issues/4651): Importer doesn't flush out last batch -- [#4602](https://github.com/influxdb/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. -- [#4691](https://github.com/influxdb/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. -- [#4283](https://github.com/influxdb/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. -- [#4703](https://github.com/influxdb/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda +- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda ## v0.9.4 [2015-09-14] @@ -215,59 +757,59 @@ With this release InfluxDB is moving to Go 1.5. ### Features -- [#4050](https://github.com/influxdb/influxdb/pull/4050): Add stats to collectd -- [#3771](https://github.com/influxdb/influxdb/pull/3771): Close idle Graphite TCP connections -- [#3755](https://github.com/influxdb/influxdb/issues/3755): Add option to build script. Thanks @fg2it -- [#3863](https://github.com/influxdb/influxdb/pull/3863): Move to Go 1.5 -- [#3892](https://github.com/influxdb/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE -- [#3916](https://github.com/influxdb/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. -- [#3901](https://github.com/influxdb/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki -- [#4048](https://github.com/influxdb/influxdb/pull/4048): Add statistics to Continuous Query service -- [#4049](https://github.com/influxdb/influxdb/pull/4049): Add stats to the UDP input -- [#3876](https://github.com/influxdb/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT -- [#3975](https://github.com/influxdb/influxdb/pull/3975): Add shard copy service -- [#3986](https://github.com/influxdb/influxdb/pull/3986): Support sorting by time desc -- [#3930](https://github.com/influxdb/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdb/influxdb/issues/1821) -- [#4045](https://github.com/influxdb/influxdb/pull/4045): Instrument cluster-level points writer -- [#3996](https://github.com/influxdb/influxdb/pull/3996): Add statistics to httpd package -- [#4003](https://github.com/influxdb/influxdb/pull/4033): Add logrotate configuration. -- [#4043](https://github.com/influxdb/influxdb/pull/4043): Add stats and batching to openTSDB input -- [#4042](https://github.com/influxdb/influxdb/pull/4042): Add pending batches control to batcher -- [#4006](https://github.com/influxdb/influxdb/pull/4006): Add basic statistics for shards -- [#4072](https://github.com/influxdb/influxdb/pull/4072): Add statistics for the WAL. - -### Bugfixes -- [#4042](https://github.com/influxdb/influxdb/pull/4042): Set UDP input batching defaults as needed. -- [#3785](https://github.com/influxdb/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic -- [#3804](https://github.com/influxdb/influxdb/pull/3804): init.d script fixes, fixes issue 3803. -- [#3823](https://github.com/influxdb/influxdb/pull/3823): Deterministic ordering for first() and last() -- [#3869](https://github.com/influxdb/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin -- [#3856](https://github.com/influxdb/influxdb/pull/3856): Minor changes to retention enforcement. -- [#3884](https://github.com/influxdb/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup -- [#3868](https://github.com/influxdb/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. -- [#3886](https://github.com/influxdb/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL -- [#3574](https://github.com/influxdb/influxdb/issues/3574): Querying data node causes panic -- [#3913](https://github.com/influxdb/influxdb/issues/3913): Convert meta shard owners to objects -- [#4026](https://github.com/influxdb/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdb/influxdb/issues/3636) -- [#3927](https://github.com/influxdb/influxdb/issues/3927): Add WAL lock to prevent timing lock contention -- [#3928](https://github.com/influxdb/influxdb/issues/3928): Write fails for multiple points when tag starts with quote -- [#3901](https://github.com/influxdb/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! -- [#3950](https://github.com/influxdb/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI -- [#3977](https://github.com/influxdb/influxdb/pull/3977): Silence wal logging during testing -- [#3931](https://github.com/influxdb/influxdb/pull/3931): Don't precreate shard groups entirely in the past -- [#3960](https://github.com/influxdb/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster -- [#3980](https://github.com/influxdb/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. -- [#4016](https://github.com/influxdb/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. -- [#4034](https://github.com/influxdb/influxdb/pull/4034): Rollback bolt tx on mapper open error -- [#3848](https://github.com/influxdb/influxdb/issues/3848): restart influxdb causing panic -- [#3881](https://github.com/influxdb/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference -- [#3926](https://github.com/influxdb/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdb/influxdb/pull/4038) -- [#4053](https://github.com/influxdb/influxdb/pull/4053): Prohibit dropping default retention policy. -- [#4060](https://github.com/influxdb/influxdb/pull/4060): Don't log EOF error in openTSDB input. -- [#3978](https://github.com/influxdb/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause -- [#4058](https://github.com/influxdb/influxdb/pull/4058): Disable bz1 recompression -- [#3902](https://github.com/influxdb/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" -- [#3718](https://github.com/influxdb/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse ## v0.9.3 [2015-08-26] @@ -275,210 +817,210 @@ There are breaking changes in this release. - To store data points as integers you must now append `i` to the number if using the line protocol. - - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. - - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. Please see the *Features* section below for full details. ### Features -- [#3376](https://github.com/influxdb/influxdb/pull/3376): Support for remote shard query mapping -- [#3372](https://github.com/influxdb/influxdb/pull/3372): Support joining nodes to existing cluster -- [#3426](https://github.com/influxdb/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 -- [#3478](https://github.com/influxdb/influxdb/pull/3478): Support incremental cluster joins -- [#3519](https://github.com/influxdb/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers -- [#3529](https://github.com/influxdb/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc -- [#3421](https://github.com/influxdb/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes -- [#3502](https://github.com/influxdb/influxdb/pull/3502): Importer for 0.8.9 data via the CLI -- [#3564](https://github.com/influxdb/influxdb/pull/3564): Fix alias, maintain column sort order -- [#3585](https://github.com/influxdb/influxdb/pull/3585): Additional test coverage for non-existent fields -- [#3246](https://github.com/influxdb/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables -- [#3599](https://github.com/influxdb/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale -- [#3636](https://github.com/influxdb/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 -- [#3641](https://github.com/influxdb/influxdb/pull/3641): Logging enhancements and single-node rename -- [#3635](https://github.com/influxdb/influxdb/pull/3635): Add build branch to version output. -- [#3115](https://github.com/influxdb/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. -- [#3628](https://github.com/influxdb/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries -- [#3721](https://github.com/influxdb/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch -- [#3514](https://github.com/influxdb/influxdb/issues/3514): Implement WAL outside BoltDB with compaction -- [#3544](https://github.com/influxdb/influxdb/pull/3544): Implement compression on top of BoltDB -- [#3795](https://github.com/influxdb/influxdb/pull/3795): Throttle import -- [#3584](https://github.com/influxdb/influxdb/pull/3584): Import/export documenation - -### Bugfixes -- [#3405](https://github.com/influxdb/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 -- [#3411](https://github.com/influxdb/influxdb/issues/3411): 500 timeout on write -- [#3420](https://github.com/influxdb/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. -- [#3404](https://github.com/influxdb/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 -- [#3414](https://github.com/influxdb/influxdb/issues/3414): Shard mappers perform query re-writing -- [#3525](https://github.com/influxdb/influxdb/pull/3525): check if fields are valid during parse time. -- [#3511](https://github.com/influxdb/influxdb/issues/3511): Sending a large number of tag causes panic -- [#3288](https://github.com/influxdb/influxdb/issues/3288): Run go fuzz on the line-protocol input -- [#3545](https://github.com/influxdb/influxdb/issues/3545): Fix parsing string fields with newlines -- [#3579](https://github.com/influxdb/influxdb/issues/3579): Revert breaking change to `client.NewClient` function -- [#3580](https://github.com/influxdb/influxdb/issues/3580): Do not allow wildcards with fields in select statements -- [#3530](https://github.com/influxdb/influxdb/pull/3530): Aliasing a column no longer works -- [#3436](https://github.com/influxdb/influxdb/issues/3436): Fix panic in hinted handoff queue processor -- [#3401](https://github.com/influxdb/influxdb/issues/3401): Derivative on non-numeric fields panics db -- [#3583](https://github.com/influxdb/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic -- [#3611](https://github.com/influxdb/influxdb/pull/3611): Fix query arithmetic with integers -- [#3326](https://github.com/influxdb/influxdb/issues/3326): simple regex query fails with cryptic error -- [#3618](https://github.com/influxdb/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger -- [#3625](https://github.com/influxdb/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement -- [#3629](https://github.com/influxdb/influxdb/pull/3629): Use sensible batching defaults for Graphite. -- [#3638](https://github.com/influxdb/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field -- [#3640](https://github.com/influxdb/influxdb/pull/3640): Shutdown Graphite service when signal received. -- [#3632](https://github.com/influxdb/influxdb/issues/3632): Make single-node host renames more seamless -- [#3656](https://github.com/influxdb/influxdb/issues/3656): Silence snapshotter logger for testing -- [#3651](https://github.com/influxdb/influxdb/pull/3651): Fully remove series when dropped. -- [#3517](https://github.com/influxdb/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. -- [#3522](https://github.com/influxdb/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. -- [#3646](https://github.com/influxdb/influxdb/pull/3646): Fix nil FieldCodec panic. -- [#3672](https://github.com/influxdb/influxdb/pull/3672): Reduce in-memory index by 20%-30% -- [#3673](https://github.com/influxdb/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. -- [#3676](https://github.com/influxdb/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. -- [#3686](https://github.com/influxdb/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. -- [#3687](https://github.com/influxdb/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff -- [#3697](https://github.com/influxdb/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. -- [#3708](https://github.com/influxdb/influxdb/issues/3708): Fix double escaping measurement name during cluster replication -- [#3704](https://github.com/influxdb/influxdb/issues/3704): cluster replication issue for measurement name containing backslash -- [#3681](https://github.com/influxdb/influxdb/issues/3681): Quoted measurement names fail -- [#3681](https://github.com/influxdb/influxdb/issues/3682): Fix inserting string value with backslashes -- [#3735](https://github.com/influxdb/influxdb/issues/3735): Append to small bz1 blocks -- [#3736](https://github.com/influxdb/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme -- [#3539](https://github.com/influxdb/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always -- [#3790](https://github.com/influxdb/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values -- [#3778](https://github.com/influxdb/influxdb/pull/3778): Don't panic if SELECT on time. -- [#3824](https://github.com/influxdb/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types -- [#3828](https://github.com/influxdb/influxdb/pull/3828): Support all number types when decoding a point -- [#3853](https://github.com/influxdb/influxdb/pull/3853): Use 4KB default block size for bz1 -- [#3607](https://github.com/influxdb/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! ## v0.9.2 [2015-07-24] ### Features -- [#3177](https://github.com/influxdb/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham -- [#3299](https://github.com/influxdb/influxdb/pull/3299): Refactor query engine for distributed query support. -- [#3334](https://github.com/influxdb/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho ### Bugfixes -- [#3180](https://github.com/influxdb/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. -- [#3218](https://github.com/influxdb/influxdb/pull/3218): Allow write timeouts to be configurable. -- [#3184](https://github.com/influxdb/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! -- [#3236](https://github.com/influxdb/influxdb/pull/3236): Fix display issues in admin interface. -- [#3232](https://github.com/influxdb/influxdb/pull/3232): Set logging prefix for metastore. -- [#3230](https://github.com/influxdb/influxdb/issues/3230): panic: unable to parse bool value -- [#3245](https://github.com/influxdb/influxdb/issues/3245): Error using graphite plugin with multiple filters -- [#3223](https://github.com/influxdb/influxdb/issues/323): default graphite template cannot have extra tags -- [#3255](https://github.com/influxdb/influxdb/pull/3255): Flush WAL on start-up as soon as possible. -- [#3289](https://github.com/influxdb/influxdb/issues/3289): InfluxDB crashes on floats without decimal -- [#3298](https://github.com/influxdb/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 -- [#3152](https://github.com/influxdb/influxdb/issues/3159): High CPU Usage with unsorted writes -- [#3307](https://github.com/influxdb/influxdb/pull/3307): Fix regression parsing boolean values True/False -- [#3304](https://github.com/influxdb/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 -- [#3332](https://github.com/influxdb/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. -- [#3335](https://github.com/influxdb/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report -- [#2761](https://github.com/influxdb/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. -- [#3356](https://github.com/influxdb/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. -- [#3351](https://github.com/influxdb/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel -- [#3244](https://github.com/influxdb/influxdb/pull/3244): Wire up admin privilege grant and revoke. -- [#3259](https://github.com/influxdb/influxdb/issues/3259): Respect privileges for queries. -- [#3256](https://github.com/influxdb/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. -- [#3380](https://github.com/influxdb/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. -- [#3319](https://github.com/influxdb/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces -- [#3453](https://github.com/influxdb/influxdb/issues/3453): Remove outdated `dump` command from CLI. -- [#3463](https://github.com/influxdb/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. ## v0.9.1 [2015-07-02] ### Features -- [2650](https://github.com/influxdb/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. -- [3125](https://github.com/influxdb/influxdb/pull/3125): Graphite Input Protocol Parsing -- [2746](https://github.com/influxdb/influxdb/pull/2746): New Admin UI/interface -- [3036](https://github.com/influxdb/influxdb/pull/3036): Write Ahead Log (WAL) -- [3014](https://github.com/influxdb/influxdb/issues/3014): Implement Raft snapshots +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots ### Bugfixes -- [3013](https://github.com/influxdb/influxdb/issues/3013): Panic error with inserting values with commas -- [#2956](https://github.com/influxdb/influxdb/issues/2956): Type mismatch in derivative -- [#2908](https://github.com/influxdb/influxdb/issues/2908): Field mismatch error messages need to be updated -- [#2931](https://github.com/influxdb/influxdb/pull/2931): Services and reporting should wait until cluster has leader. -- [#2943](https://github.com/influxdb/influxdb/issues/2943): Ensure default retention policies are fully replicated -- [#2948](https://github.com/influxdb/influxdb/issues/2948): Field mismatch error message to include measurement name -- [#2919](https://github.com/influxdb/influxdb/issues/2919): Unable to insert negative floats -- [#2935](https://github.com/influxdb/influxdb/issues/2935): Hook CPU and memory profiling back up. -- [#2960](https://github.com/influxdb/influxdb/issues/2960): Cluster Write Errors. -- [#2928](https://github.com/influxdb/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. -- [#2969](https://github.com/influxdb/influxdb/pull/2969): Actually set HTTP version in responses. -- [#2993](https://github.com/influxdb/influxdb/pull/2993): Don't log each UDP batch. -- [#2994](https://github.com/influxdb/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. -- [#3002](https://github.com/influxdb/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. -- [#3021](https://github.com/influxdb/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. -- [#3027](https://github.com/influxdb/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. -- [#3030](https://github.com/influxdb/influxdb/pull/3030): Fix excessive logging of shard creation. -- [#3038](https://github.com/influxdb/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. -- [#3033](https://github.com/influxdb/influxdb/pull/3033): Add support for marshaling `uint64` in client. -- [#3090](https://github.com/influxdb/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. -- [#2944](https://github.com/influxdb/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. -- [#3075](https://github.com/influxdb/influxdb/pull/3075): GROUP BY correctly when different tags have same value. -- [#3078](https://github.com/influxdb/influxdb/pull/3078): Fix CLI panic on malformed INSERT. -- [#2102](https://github.com/influxdb/influxdb/issues/2102): Re-work Graphite input and metric processing -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3136](https://github.com/influxdb/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3127](https://github.com/influxdb/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd -- [#3131](https://github.com/influxdb/influxdb/pull/3131): Copy batch tags to each point before marshalling -- [#3155](https://github.com/influxdb/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. -- [#2678](https://github.com/influxdb/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value -- [#3061](https://github.com/influxdb/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database -- [#2608](https://github.com/influxdb/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic -- [#3183](https://github.com/influxdb/influxdb/issues/3183): using line protocol measurement names cannot contain commas -- [#3193](https://github.com/influxdb/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd -- [#3102](https://github.com/influxdb/influxdb/issues/3102): Add authentication cache -- [#3209](https://github.com/influxdb/influxdb/pull/3209): Dump Run() errors to stderr -- [#3217](https://github.com/influxdb/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. ## v0.9.0 [2015-06-11] ### Bugfixes -- [#2869](https://github.com/influxdb/influxdb/issues/2869): Adding field to existing measurement causes panic -- [#2849](https://github.com/influxdb/influxdb/issues/2849): RC32: Frequent write errors -- [#2700](https://github.com/influxdb/influxdb/issues/2700): Incorrect error message in database EncodeFields -- [#2897](https://github.com/influxdb/influxdb/pull/2897): Ensure target Graphite database exists -- [#2898](https://github.com/influxdb/influxdb/pull/2898): Ensure target openTSDB database exists -- [#2895](https://github.com/influxdb/influxdb/pull/2895): Use Graphite input defaults where necessary -- [#2900](https://github.com/influxdb/influxdb/pull/2900): Use openTSDB input defaults where necessary -- [#2886](https://github.com/influxdb/influxdb/issues/2886): Refactor backup & restore -- [#2804](https://github.com/influxdb/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! -- [#2906](https://github.com/influxdb/influxdb/pull/2906): Restrict replication factor to the cluster size -- [#2905](https://github.com/influxdb/influxdb/pull/2905): Restrict clusters to 3 peers -- [#2904](https://github.com/influxdb/influxdb/pull/2904): Re-enable server reporting. -- [#2917](https://github.com/influxdb/influxdb/pull/2917): Fix int64 field values. -- [#2920](https://github.com/influxdb/influxdb/issues/2920): Ensure collectd database exists +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists ## v0.9.0-rc33 [2015-06-09] ### Bugfixes -- [#2816](https://github.com/influxdb/influxdb/pull/2816): Enable UDP service. Thanks @renan- -- [#2824](https://github.com/influxdb/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao -- [#2823](https://github.com/influxdb/influxdb/pull/2823): Convert OpenTSDB to a service. -- [#2838](https://github.com/influxdb/influxdb/pull/2838): Set auto-created retention policy period to infinite. -- [#2829](https://github.com/influxdb/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. -- [#2814](https://github.com/influxdb/influxdb/issues/2814): Convert collectd to a service. -- [#2852](https://github.com/influxdb/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo -- [#2857](https://github.com/influxdb/influxdb/issues/2857): Fix parsing commas in string field values. -- [#2833](https://github.com/influxdb/influxdb/pull/2833): Make the default config valid. -- [#2859](https://github.com/influxdb/influxdb/pull/2859): Fix panic on aggregate functions. -- [#2878](https://github.com/influxdb/influxdb/pull/2878): Re-enable shard precreation. -- [2865](https://github.com/influxdb/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. ### Features -- [2858](https://github.com/influxdb/influxdb/pull/2858): Support setting openTSDB write consistency. +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. ## v0.9.0-rc32 [2015-06-07] @@ -487,48 +1029,48 @@ This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. ### Features -- [#1997](https://github.com/influxdb/influxdb/pull/1997): Update SELECT * to return tag values. -- [#2599](https://github.com/influxdb/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. -- [#2682](https://github.com/influxdb/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md -- [#2683](https://github.com/influxdb/influxdb/issues/2683): Add batching support to Graphite inputs. -- [#2687](https://github.com/influxdb/influxdb/issues/2687): Add batching support to Collectd inputs. -- [#2696](https://github.com/influxdb/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. -- [#2751](https://github.com/influxdb/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. -- [#2684](https://github.com/influxdb/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! ### Bugfixes -- [#2776](https://github.com/influxdb/influxdb/issues/2776): Re-implement retention policy enforcement. -- [#2635](https://github.com/influxdb/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. -- [#2644](https://github.com/influxdb/influxdb/issues/2644): Make SHOW queries work with FROM //. -- [#2501](https://github.com/influxdb/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart -- [#2647](https://github.com/influxdb/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! ## v0.9.0-rc31 [2015-05-21] ### Features -- [#1822](https://github.com/influxdb/influxdb/issues/1822): Wire up DERIVATIVE aggregate -- [#1477](https://github.com/influxdb/influxdb/issues/1477): Wire up non_negative_derivative function -- [#2557](https://github.com/influxdb/influxdb/issues/2557): Fix false positive error with `GROUP BY time` -- [#1891](https://github.com/influxdb/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate -- [#1989](https://github.com/influxdb/influxdb/issues/1989): Implement `SELECT tagName FROM m` - -### Bugfixes -- [#2545](https://github.com/influxdb/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. -- [#2558](https://github.com/influxdb/influxdb/pull/2558): Fix client response check - thanks @vladlopes! -- [#2566](https://github.com/influxdb/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. -- [#2602](https://github.com/influxdb/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. -- [#2610](https://github.com/influxdb/influxdb/pull/2610): Fix shard group creation -- [#2596](https://github.com/influxdb/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. -- [#2592](https://github.com/influxdb/influxdb/pull/2592): Should return an error if user attempts to group by a field. -- [#2499](https://github.com/influxdb/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. -- [#2612](https://github.com/influxdb/influxdb/pull/2612): Query planner should validate distinct is passed a field. -- [#2531](https://github.com/influxdb/influxdb/issues/2531): Fix select with 3 or more terms in where clause. -- [#2564](https://github.com/influxdb/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. ## PRs -- [#2569](https://github.com/influxdb/influxdb/pull/2569): Add derivative functions -- [#2598](https://github.com/influxdb/influxdb/pull/2598): Implement tag support in SELECT statements -- [#2624](https://github.com/influxdb/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. ## v0.9.0-rc30 [2015-05-12] @@ -537,389 +1079,389 @@ This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. ### Features -- [#2254](https://github.com/influxdb/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate -- [#2525](https://github.com/influxdb/influxdb/pull/2525): Serve broker diagnostics over HTTP -- [#2186](https://github.com/influxdb/influxdb/pull/2186): The default status code for queries is now `200 OK` -- [#2298](https://github.com/influxdb/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! -- [#2549](https://github.com/influxdb/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. -- [#2568](https://github.com/influxdb/influxdb/pull/2568): Wire up SELECT DISTINCT. - -### Bugfixes -- [#2535](https://github.com/influxdb/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. -- [#2521](https://github.com/influxdb/influxdb/pull/2521): Don't truncate topic data until fully replicated. -- [#2509](https://github.com/influxdb/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart -- [#2536](https://github.com/influxdb/influxdb/issues/2532): Set leader ID on restart of single-node cluster. -- [#2448](https://github.com/influxdb/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! -- [#2108](https://github.com/influxdb/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! -- [#2539](https://github.com/influxdb/influxdb/issues/2539): Add additional vote request logging. -- [#2541](https://github.com/influxdb/influxdb/issues/2541): Update messaging client connection index with every message. -- [#2542](https://github.com/influxdb/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. -- [#2548](https://github.com/influxdb/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. -- [#2487](https://github.com/influxdb/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! -- [#2552](https://github.com/influxdb/influxdb/issues/2552): Run CQ that is actually passed into go-routine. -- [#2553](https://github.com/influxdb/influxdb/issues/2553): Fix race condition during CQ execution. -- [#2557](https://github.com/influxdb/influxdb/issues/2557): RC30 WHERE time filter Regression. +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. ## v0.9.0-rc29 [2015-05-05] ### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. -- [#2469](https://github.com/influxdb/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. -- [#1824](https://github.com/influxdb/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! ### Bugfixes -- [#2446](https://github.com/influxdb/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart -- [#2452](https://github.com/influxdb/influxdb/issues/2452): Fix panic with shard stats on multiple clusters -- [#2453](https://github.com/influxdb/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). -- [#2460](https://github.com/influxdb/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick -- [#2465](https://github.com/influxdb/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz -- [#2475](https://github.com/influxdb/influxdb/pull/2475): RLock server when checking if shards groups are required during write. -- [#2471](https://github.com/influxdb/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart -- [#2281](https://github.com/influxdb/influxdb/issues/2281): Fix Bad Escape error when parsing regex +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex ## v0.9.0-rc28 [2015-04-27] ### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410) Allow configuration of Raft timers -- [#2354](https://github.com/influxdb/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! ### Bugfixes -- [#2374](https://github.com/influxdb/influxdb/issues/2374): Two different panics during SELECT percentile -- [#2404](https://github.com/influxdb/influxdb/pull/2404): Mean and percentile function fixes -- [#2408](https://github.com/influxdb/influxdb/pull/2408): Fix snapshot 500 error -- [#1896](https://github.com/influxdb/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop -- [#2418](https://github.com/influxdb/influxdb/pull/2418): Fix raft node getting stuck in candidate state -- [#2415](https://github.com/influxdb/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in Graphite server. -- [#2429](https://github.com/influxdb/influxdb/pull/2429): Ensure no field value is null. -- [#2431](https://github.com/influxdb/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils -- [#2441](https://github.com/influxdb/influxdb/pull/2441): Correctly release server RLock during "drop series". -- [#2445](https://github.com/influxdb/influxdb/pull/2445): Read locks and data race fixes +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes ## v0.9.0-rc27 [04-23-2015] ### Features -- [#2398](https://github.com/influxdb/influxdb/pull/2398) Track more stats and report errors for shards. +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. ### Bugfixes -- [#2370](https://github.com/influxdb/influxdb/pull/2370): Fix data race in openTSDB endpoint. -- [#2371](https://github.com/influxdb/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 -- [#2372](https://github.com/influxdb/influxdb/pull/2372): Fix data race in graphite endpoint. -- [#2373](https://github.com/influxdb/influxdb/pull/2373): Actually allow HTTP logging to be controlled. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. -- [#2386](https://github.com/influxdb/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times -- [#2393](https://github.com/influxdb/influxdb/pull/2393): Fix default hostname for connecting to cluster. -- [#2390](https://github.com/influxdb/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! -- [#2391](https://github.com/influxdb/influxdb/pull/2391): Unable to write points through Go client when authentication enabled -- [#2400](https://github.com/influxdb/influxdb/pull/2400): Always send auth headers for client requests if present +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present ## v0.9.0-rc26 [04-21-2015] ### Features -- [#2301](https://github.com/influxdb/influxdb/pull/2301): Distributed query load balancing and failover -- [#2336](https://github.com/influxdb/influxdb/pull/2336): Handle distributed queries when shards != data nodes -- [#2353](https://github.com/influxdb/influxdb/pull/2353): Distributed Query/Clustering Fixes - -### Bugfixes -- [#2297](https://github.com/influxdb/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. -- [#2312](https://github.com/influxdb/influxdb/pull/2312): Re-use httpclient for continuous queries -- [#2318](https://github.com/influxdb/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. -- [#2242](https://github.com/influxdb/influxdb/pull/2242): Distributed Query should balance requests -- [#2243](https://github.com/influxdb/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ -- [#2190](https://github.com/influxdb/influxdb/pull/2190): Implement failover to other data nodes for distributed queries -- [#2324](https://github.com/influxdb/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() -- [#2325](https://github.com/influxdb/influxdb/pull/2325): Cluster open fixes -- [#2326](https://github.com/influxdb/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY -- [#2300](https://github.com/influxdb/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. -- [#2338](https://github.com/influxdb/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been -- [#2340](https://github.com/influxdb/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. -- [#2351](https://github.com/influxdb/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. -- [#2348](https://github.com/influxdb/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 -- [#2343](https://github.com/influxdb/influxdb/pull/2343): Node falls behind Metastore updates -- [#2334](https://github.com/influxdb/influxdb/pull/2334): Test Partial replication is very problematic -- [#2272](https://github.com/influxdb/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a -- [#2350](https://github.com/influxdb/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. -- [#2367](https://github.com/influxdb/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. ## v0.9.0-rc25 [2015-04-15] ### Bugfixes -- [#2282](https://github.com/influxdb/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. -- [#2283](https://github.com/influxdb/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. -- [#2293](https://github.com/influxdb/influxdb/pull/2293): Open cluster listener before starting broker. -- [#2287](https://github.com/influxdb/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. -- [#2288](https://github.com/influxdb/influxdb/pull/2288): Fix expression parsing bug. -- [#2294](https://github.com/influxdb/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). ## Features -- [#2276](https://github.com/influxdb/influxdb/pull/2276): Broker topic truncation. -- [#2292](https://github.com/influxdb/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! -- [#2290](https://github.com/influxdb/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! -- [#2295](https://github.com/influxdb/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! -- [#2246](https://github.com/influxdb/influxdb/pull/2246): Allow HTTP logging to be controlled. +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. ## v0.9.0-rc24 [2015-04-13] ### Bugfixes -- [#2255](https://github.com/influxdb/influxdb/pull/2255): Fix panic when changing default retention policy. -- [#2257](https://github.com/influxdb/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. -- [#2261](https://github.com/influxdb/influxdb/pull/2261): Support int64 value types. -- [#2191](https://github.com/influxdb/influxdb/pull/2191): Case-insensitive check for "fill" -- [#2274](https://github.com/influxdb/influxdb/pull/2274): Snapshot and HTTP API endpoints -- [#2265](https://github.com/influxdb/influxdb/pull/2265): Fix auth for CLI. +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. ## v0.9.0-rc23 [2015-04-11] ### Features -- [#2202](https://github.com/influxdb/influxdb/pull/2202): Initial implementation of Distributed Queries -- [#2202](https://github.com/influxdb/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. ### Bugfixes -- [#2225](https://github.com/influxdb/influxdb/pull/2225): Make keywords completely case insensitive -- [#2228](https://github.com/influxdb/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement -- [#2236](https://github.com/influxdb/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof -- [#2213](https://github.com/influxdb/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. ## v0.9.0-rc22 [2015-04-09] ### Features -- [#2214](https://github.com/influxdb/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g ### Bugfixes -- [#2223](https://github.com/influxdb/influxdb/pull/2223): Always notify term change on RequestVote +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote ## v0.9.0-rc21 [2015-04-09] ### Features -- [#870](https://github.com/influxdb/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate -- [#2180](https://github.com/influxdb/influxdb/pull/2180): Allow http write handler to decode gzipped body -- [#2175](https://github.com/influxdb/influxdb/pull/2175): Separate broker and data nodes -- [#2158](https://github.com/influxdb/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g -- [#2201](https://github.com/influxdb/influxdb/pull/2201): Bring back config join URLs -- [#2121](https://github.com/influxdb/influxdb/pull/2121): Parser refactor +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor ### Bugfixes -- [#2181](https://github.com/influxdb/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". -- [#2170](https://github.com/influxdb/influxdb/pull/2170): Make sure queries on missing tags return 200 status. -- [#2197](https://github.com/influxdb/influxdb/pull/2197): Lock server during Open(). -- [#2200](https://github.com/influxdb/influxdb/pull/2200): Re-enable Continuous Queries. -- [#2203](https://github.com/influxdb/influxdb/pull/2203): Fix race condition on continuous queries. -- [#2217](https://github.com/influxdb/influxdb/pull/2217): Only revert to follower if new term is greater. -- [#2219](https://github.com/influxdb/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium ## v0.9.0-rc20 [2015-04-04] ### Features -- [#2128](https://github.com/influxdb/influxdb/pull/2128): Data node discovery from brokers -- [#2142](https://github.com/influxdb/influxdb/pull/2142): Support chunked queries -- [#2154](https://github.com/influxdb/influxdb/pull/2154): Node redirection -- [#2168](https://github.com/influxdb/influxdb/pull/2168): Return raft term from vote, add term logging - -### Bugfixes -- [#2147](https://github.com/influxdb/influxdb/pull/2147): Set Go Max procs in a better location -- [#2137](https://github.com/influxdb/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. -- [#2151](https://github.com/influxdb/influxdb/pull/2151): Ignore replay commands on the metastore. -- [#2152](https://github.com/influxdb/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' -- [#2156](https://github.com/influxdb/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. -- [#2163](https://github.com/influxdb/influxdb/pull/2163): Fix up paths for default data and run storage. -- [#2164](https://github.com/influxdb/influxdb/pull/2164): Append STDOUT/STDERR in initscript. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Better name for config section for stats and diags. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Monitoring database and retention policy are not configurable. -- [#2167](https://github.com/influxdb/influxdb/pull/2167): Add broker log recovery. -- [#2166](https://github.com/influxdb/influxdb/pull/2166): Don't panic if presented with a field of unknown type. -- [#2149](https://github.com/influxdb/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. -- [#2150](https://github.com/influxdb/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. ## v0.9.0-rc19 [2015-04-01] ### Features -- [#2143](https://github.com/influxdb/influxdb/pull/2143): Add raft term logging. +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. ### Bugfixes -- [#2145](https://github.com/influxdb/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. ## v0.9.0-rc18 [2015-03-31] ### Bugfixes -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Use channel to synchronize collectd shutdown. -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Synchronize access to shard index. -- [#2131](https://github.com/influxdb/influxdb/pull/2131): Optimize marshalTags(). -- [#2130](https://github.com/influxdb/influxdb/pull/2130): Make fewer calls to marshalTags(). -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support !~ tags values. -- [#2138](https://github.com/influxdb/influxdb/pull/2136): Use map for marshaledTags cache. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. ## v0.9.0-rc17 [2015-03-29] ### Features -- [#2076](https://github.com/influxdb/influxdb/pull/2076): Separate stdout and stderr output in init.d script -- [#2091](https://github.com/influxdb/influxdb/pull/2091): Support disabling snapshot endpoint. -- [#2081](https://github.com/influxdb/influxdb/pull/2081): Support writing diagnostic data into the internal database. -- [#2095](https://github.com/influxdb/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed ### Bugfixes -- [#2093](https://github.com/influxdb/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed -- [#2084](https://github.com/influxdb/influxdb/pull/2084): Allowing leading underscores in identifiers. -- [#2080](https://github.com/influxdb/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. -- [#2101](https://github.com/influxdb/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". -- [#2104](https://github.com/influxdb/influxdb/pull/2104): Include NEQ when calculating field filters. -- [#2112](https://github.com/influxdb/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. -- [#2111](https://github.com/influxdb/influxdb/pull/2111) and [#2025](https://github.com/influxdb/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. -- [#2114](https://github.com/influxdb/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. ## v0.9.0-rc16 [2015-03-24] ### Features -- [#2058](https://github.com/influxdb/influxdb/pull/2058): Track number of queries executed in stats. -- [#2059](https://github.com/influxdb/influxdb/pull/2059): Retention policies sorted by name on return to client. -- [#2061](https://github.com/influxdb/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. -- [#2064](https://github.com/influxdb/influxdb/pull/2064): Allow init.d script to return influxd version. -- [#2053](https://github.com/influxdb/influxdb/pull/2053): Implment backup and restore. -- [#1631](https://github.com/influxdb/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. - -### Bugfixes -- [#2037](https://github.com/influxdb/influxdb/pull/2037): Don't check 'configExists' at Run() level. -- [#2039](https://github.com/influxdb/influxdb/pull/2039): Don't panic if getting current user fails. -- [#2034](https://github.com/influxdb/influxdb/pull/2034): GROUP BY should require an aggregate. -- [#2040](https://github.com/influxdb/influxdb/pull/2040): Add missing top-level help for config command. -- [#2057](https://github.com/influxdb/influxdb/pull/2057): Move racy "in order" test to integration test suite. -- [#2060](https://github.com/influxdb/influxdb/pull/2060): Reload server shard map on restart. -- [#2068](https://github.com/influxdb/influxdb/pull/2068): Fix misspelled JSON field. -- [#2067](https://github.com/influxdb/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. ## v0.9.0-rc15 [2015-03-19] ### Features -- [#2000](https://github.com/influxdb/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. -- [#2007](https://github.com/influxdb/influxdb/pull/2007): Track shard-level stats. +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. ### Bugfixes -- [#2001](https://github.com/influxdb/influxdb/pull/2001): Ensure measurement not found returns status code 200. -- [#1985](https://github.com/influxdb/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. -- [#2003](https://github.com/influxdb/influxdb/pull/2003): Set timestamp when writing monitoring stats. -- [#2004](https://github.com/influxdb/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). -- [#2016](https://github.com/influxdb/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann -- [#2021](https://github.com/influxdb/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern ## v0.9.0-rc14 [2015-03-18] ### Bugfixes -- [#1999](https://github.com/influxdb/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. ## v0.9.0-rc13 [2015-03-17] ### Features -- [#1974](https://github.com/influxdb/influxdb/pull/1974): Add time taken for request to the http server logs. +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. ### Bugfixes -- [#1971](https://github.com/influxdb/influxdb/pull/1971): Fix leader id initialization. -- [#1975](https://github.com/influxdb/influxdb/pull/1975): Require `q` parameter for query endpoint. -- [#1969](https://github.com/influxdb/influxdb/pull/1969): Print loaded config. -- [#1987](https://github.com/influxdb/influxdb/pull/1987): Fix config print startup statement for when no config is provided. -- [#1990](https://github.com/influxdb/influxdb/pull/1990): Drop measurement was taking too long due to transactions. +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. ## v0.9.0-rc12 [2015-03-15] ### Bugfixes -- [#1942](https://github.com/influxdb/influxdb/pull/1942): Sort wildcard names. -- [#1957](https://github.com/influxdb/influxdb/pull/1957): Graphite numbers are always float64. -- [#1955](https://github.com/influxdb/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio -- [#1952](https://github.com/influxdb/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio ### Features -- [#1935](https://github.com/influxdb/influxdb/pull/1935): Implement stateless broker for Raft. -- [#1936](https://github.com/influxdb/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring ### Features -- [#1909](https://github.com/influxdb/influxdb/pull/1909): Implement a dump command. +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. ## v0.9.0-rc11 [2015-03-13] ### Bugfixes -- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed. -- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test. -- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created. -- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified. -- [#1908](https://github.com/influxdb/influxdb/pull/1908): Cosmetic CLI output fixes. -- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. -- [#1937](https://github.com/influxdb/influxdb/pull/1937): OFFSET should be allowed to be 0. +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. ### Features -- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration. -- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language. -- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. ## v0.9.0-rc10 [2015-03-09] ### Bugfixes -- [#1867](https://github.com/influxdb/influxdb/pull/1867): Fix race accessing topic replicas map -- [#1864](https://github.com/influxdb/influxdb/pull/1864): fix race in startStateLoop -- [#1753](https://github.com/influxdb/influxdb/pull/1874): Do Not Panic on Missing Dirs -- [#1877](https://github.com/influxdb/influxdb/pull/1877): Broker clients track broker leader -- [#1862](https://github.com/influxdb/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin -- [#1883](https://github.com/influxdb/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha -- [#1868](https://github.com/influxdb/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. -- [#1881](https://github.com/influxdb/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. - Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select ### Features -- [#1875](https://github.com/influxdb/influxdb/pull/1875): Support trace logging of Raft. -- [#1895](https://github.com/influxdb/influxdb/pull/1895): Auto-create a retention policy when a database is created. -- [#1897](https://github.com/influxdb/influxdb/pull/1897): Pre-create shard groups. -- [#1900](https://github.com/influxdb/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` ## v0.9.0-rc9 [2015-03-06] ### Bugfixes -- [#1872](https://github.com/influxdb/influxdb/pull/1872): Fix "stale term" errors with raft +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft ## v0.9.0-rc8 [2015-03-05] ### Bugfixes -- [#1836](https://github.com/influxdb/influxdb/pull/1836): Store each parsed shell command in history file. -- [#1789](https://github.com/influxdb/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh -- [#1859](https://github.com/influxdb/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist ### Features -- [#1755](https://github.com/influxdb/influxdb/pull/1848): Support JSON data ingest over UDP -- [#1857](https://github.com/influxdb/influxdb/pull/1857): Support retention policies with infinite duration -- [#1858](https://github.com/influxdb/influxdb/pull/1858): Enable detailed tracing of write path +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path ## v0.9.0-rc7 [2015-03-02] ### Features -- [#1813](https://github.com/influxdb/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. -- [#1826](https://github.com/influxdb/influxdb/pull/1826), [#1827](https://github.com/influxdb/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. ### Bugfixes -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh -- [#1809](https://github.com/influxdb/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos ## v0.9.0-rc6 [2015-02-27] ### Bugfixes -- [#1780](https://github.com/influxdb/influxdb/pull/1780): Malformed identifiers get through the parser -- [#1775](https://github.com/influxdb/influxdb/pull/1775): Panic "index out of range" on some queries -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. ## v0.9.0-rc5 [2015-02-27] ### Bugfixes -- [#1752](https://github.com/influxdb/influxdb/pull/1752): remove debug log output from collectd. -- [#1720](https://github.com/influxdb/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. -- [#1767](https://github.com/influxdb/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. -- [#1773](https://github.com/influxdb/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval -- [#1771](https://github.com/influxdb/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` ### Features -- [#1698](https://github.com/influxdb/influxdb/pull/1698): Wire up DROP MEASUREMENT +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT ## v0.9.0-rc4 [2015-02-24] @@ -932,30 +1474,30 @@ ### Features -- [#1659](https://github.com/influxdb/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' -- [#1580](https://github.com/influxdb/influxdb/pull/1580): Add support for fields with bool, int, or string data types -- [#1687](https://github.com/influxdb/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE -- [#1629](https://github.com/influxdb/influxdb/pull/1629): Add support for `DROP SERIES` queries -- [#1632](https://github.com/influxdb/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement -- [#1689](https://github.com/influxdb/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE -- [#1699](https://github.com/influxdb/influxdb/pull/1699): Add CPU and memory profiling options to daemon -- [#1672](https://github.com/influxdb/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work -- [#1591](https://github.com/influxdb/influxdb/pull/1591): Add `spread` aggregate function -- [#1576](https://github.com/influxdb/influxdb/pull/1576): Add `first` and `last` aggregate functions -- [#1573](https://github.com/influxdb/influxdb/pull/1573): Add `stddev` aggregate function -- [#1565](https://github.com/influxdb/influxdb/pull/1565): Add the admin interface back into the server and update for new API -- [#1562](https://github.com/influxdb/influxdb/pull/1562): Enforce retention policies -- [#1700](https://github.com/influxdb/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE -- [#1706](https://github.com/influxdb/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause ### Bugfixes -- [#1636](https://github.com/influxdb/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE -- [#1701](https://github.com/influxdb/influxdb/pull/1701), [#1667](https://github.com/influxdb/influxdb/pull/1667), [#1663](https://github.com/influxdb/influxdb/pull/1663), [#1615](https://github.com/influxdb/influxdb/pull/1615): Raft fixes -- [#1644](https://github.com/influxdb/influxdb/pull/1644): Add batching support for significantly improved write performance -- [#1704](https://github.com/influxdb/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) -- [#1718](https://github.com/influxdb/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field -- [#1806](https://github.com/influxdb/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. ## v0.9.0-rc1,2 [no public release] @@ -980,72 +1522,72 @@ ### Features -- [Issue #973](https://github.com/influxdb/influxdb/issues/973). Support +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support joining using a regex or list of time series -- [Issue #1068](https://github.com/influxdb/influxdb/issues/1068). Print +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print the processor chain when the query is started ### Bugfixes -- [Issue #584](https://github.com/influxdb/influxdb/issues/584). Don't +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't panic if the process died while initializing -- [Issue #663](https://github.com/influxdb/influxdb/issues/663). Make +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make sure all sub servies are closed when are stopping InfluxDB -- [Issue #671](https://github.com/influxdb/influxdb/issues/671). Fix +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix the Makefile package target for Mac OSX -- [Issue #800](https://github.com/influxdb/influxdb/issues/800). Use +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use su instead of sudo in the init script. This fixes the startup problem on RHEL 6. -- [Issue #925](https://github.com/influxdb/influxdb/issues/925). Don't +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't generate invalid query strings for single point queries -- [Issue #943](https://github.com/influxdb/influxdb/issues/943). Don't +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't take two snapshots at the same time -- [Issue #947](https://github.com/influxdb/influxdb/issues/947). Exit +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit nicely if the daemon doesn't have permission to write to the log. -- [Issue #959](https://github.com/influxdb/influxdb/issues/959). Stop using +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using closed connections in the protobuf client. -- [Issue #978](https://github.com/influxdb/influxdb/issues/978). Check +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check for valgrind and mercurial in the configure script -- [Issue #996](https://github.com/influxdb/influxdb/issues/996). Fill should +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should fill the time range even if no points exists in the given time range -- [Issue #1008](https://github.com/influxdb/influxdb/issues/1008). Return +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return an appropriate exit status code depending on whether the process exits due to an error or exits gracefully. -- [Issue #1024](https://github.com/influxdb/influxdb/issues/1024). Hitting +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting open files limit causes influxdb to create shards in loop. -- [Issue #1069](https://github.com/influxdb/influxdb/issues/1069). Fix +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix deprecated interface endpoint in Admin UI. -- [Issue #1076](https://github.com/influxdb/influxdb/issues/1076). Fix +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix the timestamps of data points written by the collectd plugin. (Thanks, @renchap for reporting this bug) -- [Issue #1078](https://github.com/influxdb/influxdb/issues/1078). Make sure +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure we don't resurrect shard directories for shards that have already expired -- [Issue #1085](https://github.com/influxdb/influxdb/issues/1085). Set +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set the connection string of the local raft node -- [Issue #1092](https://github.com/influxdb/influxdb/issues/1093). Set +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set the connection string of the local node in the raft snapshot. -- [Issue #1100](https://github.com/influxdb/influxdb/issues/1100). Removing +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing a non-existent shard space causes the cluster to panic. -- [Issue #1113](https://github.com/influxdb/influxdb/issues/1113). A nil +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil engine.ProcessorChain causes a panic. ## v0.8.5 [2014-10-27] ### Features -- [Issue #1055](https://github.com/influxdb/influxdb/issues/1055). Allow +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow graphite and collectd input plugins to have separate binding address ### Bugfixes -- [Issue #1058](https://github.com/influxdb/influxdb/issues/1058). Use +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use the query language instead of the continuous query endpoints that were removed in 0.8.4 -- [Issue #1022](https://github.com/influxdb/influxdb/issues/1022). Return +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return an +Inf or NaN instead of panicing when we encounter a divide by zero -- [Issue #821](https://github.com/influxdb/influxdb/issues/821). Don't +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't scan through points when we hit the limit -- [Issue #1051](https://github.com/influxdb/influxdb/issues/1051). Fix +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix timestamps when the collectd is used and low resolution timestamps is set. @@ -1055,113 +1597,113 @@ - Remove the continuous query api endpoints since the query language has all the features needed to list and delete continuous queries. -- [Issue #778](https://github.com/influxdb/influxdb/issues/778). Selecting +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting from a non-existent series should give a better error message indicating that the series doesn't exist -- [Issue #988](https://github.com/influxdb/influxdb/issues/988). Check +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check the arguments of `top()` and `bottom()` -- [Issue #1021](https://github.com/influxdb/influxdb/issues/1021). Make +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make redirecting to standard output and standard error optional instead of going to `/dev/null`. This can now be configured by setting `$STDOUT` in `/etc/default/influxdb` -- [Issue #985](https://github.com/influxdb/influxdb/issues/985). Make +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make sure we drop a shard only when there's no one using it. Otherwise, the shard can be closed when another goroutine is writing to it which will cause random errors and possibly corruption of the database. ### Features -- [Issue #1047](https://github.com/influxdb/influxdb/issues/1047). Allow +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow merge() to take a list of series (as opposed to a regex in #72) ## v0.8.4-rc.1 [2014-10-21] ### Bugfixes -- [Issue #1040](https://github.com/influxdb/influxdb/issues/1040). Revert +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert to older raft snapshot if the latest one is corrupted -- [Issue #1004](https://github.com/influxdb/influxdb/issues/1004). Querying +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying for data outside of existing shards returns an empty response instead of throwing a `Couldn't lookup columns` error -- [Issue #1020](https://github.com/influxdb/influxdb/issues/1020). Change +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change init script exit codes to conform to the lsb standards. (Thanks, @spuder) -- [Issue #1011](https://github.com/influxdb/influxdb/issues/1011). Fix +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix the tarball for homebrew so that rocksdb is included and the directory structure is clean -- [Issue #1007](https://github.com/influxdb/influxdb/issues/1007). Fix +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix the content type when an error occurs and the client requests compression. -- [Issue #916](https://github.com/influxdb/influxdb/issues/916). Set +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set the ulimit in the init script with a way to override the limit -- [Issue #742](https://github.com/influxdb/influxdb/issues/742). Fix +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix rocksdb for Mac OSX -- [Issue #387](https://github.com/influxdb/influxdb/issues/387). Aggregations +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations with group by time(1w), time(1m) and time(1y) (for week, month and year respectively) will cause the start time and end time of the bucket to fall on the logical boundaries of the week, month or year. -- [Issue #334](https://github.com/influxdb/influxdb/issues/334). Derivative +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative for queries with group by time() and fill(), will take the difference between the first value in the bucket and the first value of the next bucket. -- [Issue #972](https://github.com/influxdb/influxdb/issues/972). Don't +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't assign duplicate server ids ### Features -- [Issue #722](https://github.com/influxdb/influxdb/issues/722). Add +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add an install target to the Makefile -- [Issue #1032](https://github.com/influxdb/influxdb/issues/1032). Include +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include the admin ui static assets in the binary -- [Issue #1019](https://github.com/influxdb/influxdb/issues/1019). Upgrade +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade to rocksdb 3.5.1 -- [Issue #992](https://github.com/influxdb/influxdb/issues/992). Add +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add an input plugin for collectd. (Thanks, @kimor79) -- [Issue #72](https://github.com/influxdb/influxdb/issues/72). Support merge +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge for multiple series using regex syntax ## v0.8.3 [2014-09-24] ### Bugfixes -- [Issue #885](https://github.com/influxdb/influxdb/issues/885). Multiple +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple queries separated by semicolons work as expected. Queries are process sequentially -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return an +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an error if an invalid column is used in the where clause -- [Issue #794](https://github.com/influxdb/influxdb/issues/794). Fix case +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case insensitive regex matching -- [Issue #853](https://github.com/influxdb/influxdb/issues/853). Move +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move cluster config from raft to API. -- [Issue #714](https://github.com/influxdb/influxdb/issues/714). Don't +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't panic on invalid boolean operators. -- [Issue #843](https://github.com/influxdb/influxdb/issues/843). Prevent blank database names -- [Issue #780](https://github.com/influxdb/influxdb/issues/780). Fix +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix fill() for all aggregators -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() -- [Issue #967](https://github.com/influxdb/influxdb/issues/967). Return an +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an error if the storage engine can't be created -- [Issue #954](https://github.com/influxdb/influxdb/issues/954). Don't automatically +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically create shards which was causing too many shards to be created when used with grafana -- [Issue #939](https://github.com/influxdb/influxdb/issues/939). Aggregation should +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should ignore null values and invalid values, e.g. strings with mean(). -- [Issue #964](https://github.com/influxdb/influxdb/issues/964). Parse +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse big int in queries properly. ## v0.8.2 [2014-09-05] ### Bugfixes -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Update shard space to not set defaults +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults -- [Issue #867](https://github.com/influxdb/influxdb/issues/867). Add option to return shard space mappings in list series +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series ### Bugfixes -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return a meaningful error if an invalid column is used in where clause after joining multiple series @@ -1173,75 +1715,75 @@ ### Bugfixes -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB ## v0.8.1 [2014-09-03] -- [Issue #896](https://github.com/influxdb/influxdb/issues/896). Allow logging to syslog. Thanks @malthe +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe ### Bugfixes -- [Issue #868](https://github.com/influxdb/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x -- [Issue #887](https://github.com/influxdb/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled -- [Issue #674](https://github.com/influxdb/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) -- [Issue #857](https://github.com/influxdb/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) ## v0.8.0 [2014-08-22] ### Features -- [Issue #850](https://github.com/influxdb/influxdb/issues/850). Makes the server listing more informative +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative ### Bugfixes -- [Issue #779](https://github.com/influxdb/influxdb/issues/779). Deleting expired shards isn't thread safe. -- [Issue #860](https://github.com/influxdb/influxdb/issues/860). Load database config should validate shard spaces. -- [Issue #862](https://github.com/influxdb/influxdb/issues/862). Data migrator should have option to set delay time. +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. ## v0.8.0-rc.5 [2014-08-15] ### Features -- [Issue #376](https://github.com/influxdb/influxdb/issues/376). List series should support regex filtering -- [Issue #745](https://github.com/influxdb/influxdb/issues/745). Add continuous queries to the database config -- [Issue #746](https://github.com/influxdb/influxdb/issues/746). Add data migration tool for 0.8.0 +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 ### Bugfixes -- [Issue #426](https://github.com/influxdb/influxdb/issues/426). Fill should fill the entire time range that is requested -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Don't emit non existent fields when joining series with different fields -- [Issue #744](https://github.com/influxdb/influxdb/issues/744). Admin site should have all assets locally -- [Issue #767](https://github.com/influxdb/influxdb/issues/768). Remove shards whenever they expire -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Don't emit non existent fields when joining series with different fields -- [Issue #791](https://github.com/influxdb/influxdb/issues/791). Move database config loader to be an API endpoint -- [Issue #809](https://github.com/influxdb/influxdb/issues/809). Migration path from 0.7 -> 0.8 -- [Issue #811](https://github.com/influxdb/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft -- [Issue #820](https://github.com/influxdb/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range -- [Issue #827](https://github.com/influxdb/influxdb/issues/827). Don't leak file descriptors in the WAL -- [Issue #830](https://github.com/influxdb/influxdb/issues/830). List series should return series in lexicographic sorted order -- [Issue #831](https://github.com/influxdb/influxdb/issues/831). Move create shard space to be db specific +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific ## v0.8.0-rc.4 [2014-07-29] ### Bugfixes -- [Issue #774](https://github.com/influxdb/influxdb/issues/774). Don't try to parse "inf" shard retention policy -- [Issue #769](https://github.com/influxdb/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) -- [Issue #736](https://github.com/influxdb/influxdb/issues/736). Only db admins should be able to drop a series -- [Issue #713](https://github.com/influxdb/influxdb/issues/713). Null should be a valid fill value -- [Issue #644](https://github.com/influxdb/influxdb/issues/644). Graphite api should write data in batches to the coordinator -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Panic when distinct fields are selected from an inner join -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Panic when distinct fields are added after an inner join +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join ## v0.8.0-rc.3 [2014-07-21] ### Bugfixes -- [Issue #752](https://github.com/influxdb/influxdb/issues/752). `./configure` should use goroot to find gofmt -- [Issue #758](https://github.com/influxdb/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) -- [Issue #759](https://github.com/influxdb/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) -- [Issue #760](https://github.com/influxdb/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) -- [Issue #772](https://github.com/influxdb/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. ## v0.8.0-rc.2 [2014-07-15] @@ -1254,52 +1796,52 @@ ### Features -- [Issue #643](https://github.com/influxdb/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) -- [Issue #641](https://github.com/influxdb/influxdb/issues/641). Support multiple storage engines -- [Issue #665](https://github.com/influxdb/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) -- [Issue #667](https://github.com/influxdb/influxdb/issues/667). Enable compression on all GET requests and when writing data -- [Issue #648](https://github.com/influxdb/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) -- [Issue #682](https://github.com/influxdb/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) -- [Issue #689](https://github.com/influxdb/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft -- [Issue #255](https://github.com/influxdb/influxdb/issues/255). Support millisecond precision using `ms` suffix -- [Issue #95](https://github.com/influxdb/influxdb/issues/95). Drop database should not be synchronous -- [Issue #571](https://github.com/influxdb/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies - Default storage engine changed to RocksDB ### Bugfixes -- [Issue #651](https://github.com/influxdb/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) -- [Issue #670](https://github.com/influxdb/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs -- [Issue #676](https://github.com/influxdb/influxdb/issues/676). Allow storing high precision integer values without losing any information -- [Issue #695](https://github.com/influxdb/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) -- [Issue #731](https://github.com/influxdb/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false -- [Issue #733](https://github.com/influxdb/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled -- [Issue #707](https://github.com/influxdb/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character -- [Issue #734](https://github.com/influxdb/influxdb/issues/734). Don't buffer non replicated writes -- [Issue #465](https://github.com/influxdb/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore -- [Issue #358](https://github.com/influxdb/influxdb/issues/358). **BREAKING** List series should return as a single series -- [Issue #499](https://github.com/influxdb/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error -- [Issue #570](https://github.com/influxdb/influxdb/issues/570). InfluxDB crashes during delete/drop of database -- [Issue #592](https://github.com/influxdb/influxdb/issues/592). Drop series is inefficient +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient ## v0.7.3 [2014-06-13] ### Bugfixes -- [Issue #637](https://github.com/influxdb/influxdb/issues/637). Truncate log files if the last request wasn't written properly -- [Issue #646](https://github.com/influxdb/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. ## v0.7.2 [2014-05-30] ### Features -- [Issue #521](https://github.com/influxdb/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) ### Bugfixes -- [Issue #418](https://github.com/influxdb/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. -- [Issue #606](https://github.com/influxdb/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist -- [Issue #602](https://github.com/influxdb/influxdb/issues/602). Merge will fail to work across shards +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards ### Features @@ -1307,55 +1849,55 @@ ### Bugfixes -- [Issue #579](https://github.com/influxdb/influxdb/issues/579). Reject writes to nonexistent databases -- [Issue #597](https://github.com/influxdb/influxdb/issues/597). Force compaction after deleting data +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data ### Features -- [Issue #476](https://github.com/influxdb/influxdb/issues/476). Support ARM architecture -- [Issue #578](https://github.com/influxdb/influxdb/issues/578). Support aliasing for expressions in parenthesis -- [Issue #544](https://github.com/influxdb/influxdb/pull/544). Support forcing node removal from a cluster -- [Issue #591](https://github.com/influxdb/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) -- [Issue #600](https://github.com/influxdb/influxdb/pull/600). Report version, os, arch, and raftName once per day. +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. ## v0.7.0 [2014-05-23] ### Bugfixes -- [Issue #557](https://github.com/influxdb/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works -- [Issue #547](https://github.com/influxdb/influxdb/issues/547). Add difference function (Thanks, @mboelstra) -- [Issue #550](https://github.com/influxdb/influxdb/issues/550). Fix tests on 32-bit ARM -- [Issue #524](https://github.com/influxdb/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together -- [Issue #561](https://github.com/influxdb/influxdb/issues/561). Fix missing query in parsing errors -- [Issue #563](https://github.com/influxdb/influxdb/issues/563). Add sample config for graphite over udp -- [Issue #537](https://github.com/influxdb/influxdb/issues/537). Incorrect query syntax causes internal error -- [Issue #565](https://github.com/influxdb/influxdb/issues/565). Empty series names shouldn't cause a panic -- [Issue #575](https://github.com/influxdb/influxdb/issues/575). Single point select doesn't interpret timestamps correctly -- [Issue #576](https://github.com/influxdb/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq -- [Issue #560](https://github.com/influxdb/influxdb/issues/560). Use /dev/urandom instead of /dev/random -- [Issue #502](https://github.com/influxdb/influxdb/issues/502). Fix a +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a race condition in assigning id to db+series+field (Thanks @ohurvitz for reporting this bug and providing a script to repro) ### Features -- [Issue #567](https://github.com/influxdb/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) ### Deprecated -- [Issue #460](https://github.com/influxdb/influxdb/issues/460). Don't start automatically after installing -- [Issue #529](https://github.com/influxdb/influxdb/issues/529). Don't run influxdb as root -- [Issue #443](https://github.com/influxdb/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins ## v0.6.5 [2014-05-19] ### Features -- [Issue #551](https://github.com/influxdb/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) ### Bugfixes -- [Issue #555](https://github.com/influxdb/influxdb/issues/555). Fix a regression introduced in the raft snapshot format +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format ## v0.6.4 [2014-05-16] @@ -1363,104 +1905,104 @@ - Make the write batch size configurable (also applies to deletes) - Optimize writing to multiple series -- [Issue #546](https://github.com/influxdb/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) ### Bugfixes - Fix a bug in shard logic that caused short term shards to be clobbered with long term shards -- [Issue #489](https://github.com/influxdb/influxdb/issues/489). Remove replication factor from CreateDatabase command +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command ## v0.6.3 [2014-05-13] ### Features -- [Issue #505](https://github.com/influxdb/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) -- [Issue #520](https://github.com/influxdb/influxdb/issues/520). Print the version to the log file +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file ### Bugfixes -- [Issue #516](https://github.com/influxdb/influxdb/issues/516). Close WAL log/index files when they aren't being used -- [Issue #532](https://github.com/influxdb/influxdb/issues/532). Don't log graphite connection EOF as an error -- [Issue #535](https://github.com/influxdb/influxdb/issues/535). WAL Replay hangs if response isn't received -- [Issue #538](https://github.com/influxdb/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns -- [Issue #536](https://github.com/influxdb/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic -- [Issue #539](https://github.com/influxdb/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups -- [Issue #534](https://github.com/influxdb/influxdb/issues/534). Create a new series when interpolating +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating ## v0.6.2 [2014-05-09] ### Bugfixes -- [Issue #511](https://github.com/influxdb/influxdb/issues/511). Don't automatically create the database when a db user is created -- [Issue #512](https://github.com/influxdb/influxdb/issues/512). Group by should respect null values -- [Issue #518](https://github.com/influxdb/influxdb/issues/518). Filter Infinities and NaNs from the returned json -- [Issue #522](https://github.com/influxdb/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files -- [Issue #369](https://github.com/influxdb/influxdb/issues/369). Fix some edge cases with WAL recovery +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery ## v0.6.1 [2014-05-06] ### Bugfixes -- [Issue #500](https://github.com/influxdb/influxdb/issues/500). Support `y` suffix in time durations -- [Issue #501](https://github.com/influxdb/influxdb/issues/501). Writes with invalid payload should be rejected -- [Issue #507](https://github.com/influxdb/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster -- [Issue #508](https://github.com/influxdb/influxdb/issues/508). Don't replay WAL entries for servers with no shards -- [Issue #464](https://github.com/influxdb/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns -- [Issue #480](https://github.com/influxdb/influxdb/issues/480). Large values on the y-axis get cut off +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off ## v0.6.0 [2014-05-02] ### Feature -- [Issue #477](https://github.com/influxdb/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) -- [Issue #491](https://github.com/influxdb/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) ### Bugfixes -- [Issue #469](https://github.com/influxdb/influxdb/issues/469). Drop continuous queries when a database is dropped -- [Issue #431](https://github.com/influxdb/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file -- [Issue #483](https://github.com/influxdb/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) -- [Issue #486](https://github.com/influxdb/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series -- [Issue #490](https://github.com/influxdb/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) -- [Issue #495](https://github.com/influxdb/influxdb/issues/495). Enforce write permissions properly +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly ## v0.5.12 [2014-04-29] ### Bugfixes -- [Issue #419](https://github.com/influxdb/influxdb/issues/419),[Issue #478](https://github.com/influxdb/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user ## v0.5.11 [2014-04-25] ### Features -- [Issue #471](https://github.com/influxdb/influxdb/issues/471). Read and write permissions should be settable through the http api +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api ### Bugfixes -- [Issue #323](https://github.com/influxdb/influxdb/issues/323). Continuous queries should guard against data loops -- [Issue #473](https://github.com/influxdb/influxdb/issues/473). Engine memory optimization +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization ## v0.5.10 [2014-04-22] ### Features -- [Issue #463](https://github.com/influxdb/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) -- [Issue #447](https://github.com/influxdb/influxdb/issues/447). Allow @ in usernames -- [Issue #466](https://github.com/influxdb/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) ### Bugfixes -- [Issue #458](https://github.com/influxdb/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 -- [Issue #457](https://github.com/influxdb/influxdb/issues/457). Deleting series that start with capital letters should work +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work ## v0.5.9 [2014-04-18] ### Bugfixes -- [Issue #446](https://github.com/influxdb/influxdb/issues/446). Check for (de)serialization errors -- [Issue #456](https://github.com/influxdb/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value -- [Issue #455](https://github.com/influxdb/influxdb/issues/455). Comparison operators should ignore null values +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values ## v0.5.8 [2014-04-17] @@ -1468,9 +2010,9 @@ ### Bugfixes -- [Issue #244](https://github.com/influxdb/influxdb/issues/244). Reconstruct the query from the ast -- [Issue #449](https://github.com/influxdb/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up -- [Issue #451](https://github.com/influxdb/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that aggregation queries over large periods of time don't take insance amount of memory ## v0.5.7 [2014-04-15] @@ -1481,29 +2023,29 @@ ### Bugfixes -- [Issue #328](https://github.com/influxdb/influxdb/issues/328). Join queries with math expressions don't work -- [Issue #440](https://github.com/influxdb/influxdb/issues/440). Heartbeat timeouts in logs -- [Issue #442](https://github.com/influxdb/influxdb/issues/442). shouldQuerySequentially didn't work as expected +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected causing count(*) queries on large time series to use lots of memory -- [Issue #437](https://github.com/influxdb/influxdb/issues/437). Queries with negative constants don't parse properly -- [Issue #432](https://github.com/influxdb/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart -- [Issue #439](https://github.com/influxdb/influxdb/issues/439). Report the right location of the error in the query +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query - Fix some bugs with the WAL recovery on startup ## v0.5.6 [2014-04-08] ### Features -- [Issue #310](https://github.com/influxdb/influxdb/issues/310). Request should support multiple timeseries -- [Issue #416](https://github.com/influxdb/influxdb/issues/416). Improve the time it takes to drop database +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database ### Bugfixes -- [Issue #413](https://github.com/influxdb/influxdb/issues/413). Don't assume that group by interval is greater than a second -- [Issue #415](https://github.com/influxdb/influxdb/issues/415). Include the database when sending an auth error back to the user -- [Issue #421](https://github.com/influxdb/influxdb/issues/421). Make read timeout a config option -- [Issue #392](https://github.com/influxdb/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards ### Bugfixes @@ -1516,48 +2058,48 @@ ### Feature - Add a command line option to repair corrupted leveldb databases on startup -- [Issue #401](https://github.com/influxdb/influxdb/issues/401). No limit on the number of columns in the group by clause +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause ### Bugfixes -- [Issue #398](https://github.com/influxdb/influxdb/issues/398). Support now() and NOW() in the query lang -- [Issue #403](https://github.com/influxdb/influxdb/issues/403). Filtering should work with join queries -- [Issue #404](https://github.com/influxdb/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server -- [Issue #405](https://github.com/influxdb/influxdb/issues/405). Percentile shouldn't crash for small number of values -- [Issue #408](https://github.com/influxdb/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics -- [Issue #390](https://github.com/influxdb/influxdb/issues/390). Multiple response.WriteHeader when querying as admin -- [Issue #407](https://github.com/influxdb/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized - Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 ## v0.5.4 [2014-04-02] ### Bugfixes -- [Issue #386](https://github.com/influxdb/influxdb/issues/386). Drop series should work with series containing dots -- [Issue #389](https://github.com/influxdb/influxdb/issues/389). Filtering shouldn't stop prematurely -- [Issue #341](https://github.com/influxdb/influxdb/issues/341). Make the number of shards that are queried in parallel configurable -- [Issue #394](https://github.com/influxdb/influxdb/issues/394). Support count(distinct) and count(DISTINCT) -- [Issue #362](https://github.com/influxdb/influxdb/issues/362). Limit should be enforced after aggregation +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation ## v0.5.3 [2014-03-31] ### Bugfixes -- [Issue #378](https://github.com/influxdb/influxdb/issues/378). Indexing should return if there are no requests added since the last index -- [Issue #370](https://github.com/influxdb/influxdb/issues/370). Filtering and limit should be enforced on the shards -- [Issue #379](https://github.com/influxdb/influxdb/issues/379). Boolean columns should be usable in where clauses -- [Issue #381](https://github.com/influxdb/influxdb/issues/381). Should be able to do deletes as a cluster admin +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin ## v0.5.2 [2014-03-28] ### Bugfixes -- [Issue #342](https://github.com/influxdb/influxdb/issues/342). Data resurrected after a server restart -- [Issue #367](https://github.com/influxdb/influxdb/issues/367). Influxdb won't start if the api port is commented out -- [Issue #355](https://github.com/influxdb/influxdb/issues/355). Return an error on wrong time strings -- [Issue #331](https://github.com/influxdb/influxdb/issues/331). Allow negative time values in the where clause -- [Issue #371](https://github.com/influxdb/influxdb/issues/371). Seris index isn't deleted when the series is dropped -- [Issue #360](https://github.com/influxdb/influxdb/issues/360). Store and recover continuous queries +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries ## v0.5.1 [2014-03-24] @@ -1569,11 +2111,11 @@ ### Features -- [Issue #293](https://github.com/influxdb/influxdb/pull/293). Implement a Graphite listener +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener ### Bugfixes -- [Issue #340](https://github.com/influxdb/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order ## v0.5.0-rc.6 [2014-03-20] @@ -1582,9 +2124,9 @@ - Increase raft election timeout to avoid unecessary relections - Sort points before writing them to avoid an explosion in the request number when the points are written randomly -- [Issue #335](https://github.com/influxdb/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries -- [Issue #318](https://github.com/influxdb/influxdb/pull/318). Support EXPLAIN queries -- [Issue #333](https://github.com/influxdb/influxdb/pull/333). Fail +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail when the password is too short or too long instead of passing it to the crypto library @@ -1592,65 +2134,65 @@ ### Bugfixes -- [Issue #312](https://github.com/influxdb/influxdb/issues/312). WAL should wait for server id to be set before recovering -- [Issue #301](https://github.com/influxdb/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache -- [Issue #319](https://github.com/influxdb/influxdb/issues/319). Propagate engine creation error correctly to the user -- [Issue #316](https://github.com/influxdb/influxdb/issues/316). Make +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make sure we don't starve goroutines if we get an access denied error from one of the shards -- [Issue #306](https://github.com/influxdb/influxdb/issues/306). Deleting/Dropping database takes a lot of memory -- [Issue #302](https://github.com/influxdb/influxdb/issues/302). Should be able to set negative timestamps on points -- [Issue #327](https://github.com/influxdb/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 -- [Issue #321](https://github.com/influxdb/influxdb/issues/321). Make sure we split points on shards properly +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly ## v0.5.0-rc.4 [2014-03-07] ### Bugfixes -- [Issue #298](https://github.com/influxdb/influxdb/issues/298). Fix limit when querying multiple shards -- [Issue #305](https://github.com/influxdb/influxdb/issues/305). Shard ids not unique after restart -- [Issue #309](https://github.com/influxdb/influxdb/issues/309). Don't relog the requests on the remote server +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server - Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) ## v0.5.0-rc.3 [2014-03-03] ### Bugfixes -- [Issue #69](https://github.com/influxdb/influxdb/issues/69). Support column aliases -- [Issue #287](https://github.com/influxdb/influxdb/issues/287). Make the lru cache size configurable -- [Issue #38](https://github.com/influxdb/influxdb/issues/38). Fix a memory leak discussed in this story -- [Issue #286](https://github.com/influxdb/influxdb/issues/286). Make the number of open shards configurable +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable - Make LevelDB use the max open files configuration option. ## v0.5.0-rc.2 [2014-02-27] ### Bugfixes -- [Issue #274](https://github.com/influxdb/influxdb/issues/274). Crash after restart -- [Issue #277](https://github.com/influxdb/influxdb/issues/277). Ensure duplicate shards won't be created -- [Issue #279](https://github.com/influxdb/influxdb/issues/279). Limits not working on regex queries -- [Issue #281](https://github.com/influxdb/influxdb/issues/281). `./influxdb -v` should print the sha when building from source -- [Issue #283](https://github.com/influxdb/influxdb/issues/283). Dropping shard and restart in cluster causes panic. -- [Issue #288](https://github.com/influxdb/influxdb/issues/288). Sequence numbers should be unique per server id +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id ## v0.5.0-rc.1 [2014-02-25] ### Bugfixes - Ensure large deletes don't take too much memory -- [Issue #240](https://github.com/influxdb/influxdb/pull/240). Unable to query against columns with `.` in the name. -- [Issue #250](https://github.com/influxdb/influxdb/pull/250). different result between normal and continuous query with "group by" clause -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points ### Features -- [Issue #243](https://github.com/influxdb/influxdb/issues/243). Should have endpoint to GET a user's attributes. -- [Issue #269](https://github.com/influxdb/influxdb/pull/269), [Issue #65](https://github.com/influxdb/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards -- [Issue #164](https://github.com/influxdb/influxdb/pull/269),[Issue #103](https://github.com/influxdb/influxdb/pull/269),[Issue #166](https://github.com/influxdb/influxdb/pull/269),[Issue #165](https://github.com/influxdb/influxdb/pull/269),[Issue #132](https://github.com/influxdb/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup ### Deprecated -- [Issue #189](https://github.com/influxdb/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points ## v0.4.4 [2014-02-05] @@ -1662,69 +2204,69 @@ ### Bugfixes -- [Issue #225](https://github.com/influxdb/influxdb/issues/225). Remove a hard limit on the points returned by the datastore -- [Issue #223](https://github.com/influxdb/influxdb/issues/223). Null values caused count(distinct()) to panic -- [Issue #224](https://github.com/influxdb/influxdb/issues/224). Null values broke replication due to protobuf limitation +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation ## v0.4.1 [2014-01-30] ### Features -- [Issue #193](https://github.com/influxdb/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy -- [Issue #190](https://github.com/influxdb/influxdb/pull/190). Add support for SSL. -- [Issue #194](https://github.com/influxdb/influxdb/pull/194). Should be able to disable Admin interface. +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. ### Bugfixes -- [Issue #33](https://github.com/influxdb/influxdb/issues/33). Don't call WriteHeader more than once per request -- [Issue #195](https://github.com/influxdb/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. -- [Issue #199](https://github.com/influxdb/influxdb/issues/199). Make the test timeout configurable -- [Issue #200](https://github.com/influxdb/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail -- [Issue #215](https://github.com/influxdb/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. ## v0.4.0 [2014-01-17] ## Features -- [Issue #86](https://github.com/influxdb/influxdb/issues/86). Support arithmetic expressions in select clause -- [Issue #92](https://github.com/influxdb/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' -- [Issue #88](https://github.com/influxdb/influxdb/issues/88). Support datetime strings -- [Issue #64](https://github.com/influxdb/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) -- [Issue #78](https://github.com/influxdb/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused -- [Issue #102](https://github.com/influxdb/influxdb/issues/102). Support expressions in where condition -- [Issue #101](https://github.com/influxdb/influxdb/issues/101). Support expressions in aggregates -- [Issue #62](https://github.com/influxdb/influxdb/issues/62). Support updating and deleting column values -- [Issue #96](https://github.com/influxdb/influxdb/issues/96). Replicate deletes in a cluster -- [Issue #94](https://github.com/influxdb/influxdb/issues/94). delete queries -- [Issue #116](https://github.com/influxdb/influxdb/issues/116). Use proper logging -- [Issue #40](https://github.com/influxdb/influxdb/issues/40). Use TOML instead of JSON in the config file -- [Issue #99](https://github.com/influxdb/influxdb/issues/99). Support list series in the query language -- [Issue #149](https://github.com/influxdb/influxdb/issues/149). Cluster admins should be able to perform reads and writes. -- [Issue #108](https://github.com/influxdb/influxdb/issues/108). Querying one point using `time =` -- [Issue #114](https://github.com/influxdb/influxdb/issues/114). Servers should periodically check that they're consistent. -- [Issue #93](https://github.com/influxdb/influxdb/issues/93). Should be able to drop a time series -- [Issue #177](https://github.com/influxdb/influxdb/issues/177). Support drop series in the query language. -- [Issue #184](https://github.com/influxdb/influxdb/issues/184). Implement Raft log compaction. -- [Issue #153](https://github.com/influxdb/influxdb/issues/153). Implement continuous queries +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries ### Bugfixes -- [Issue #90](https://github.com/influxdb/influxdb/issues/90). Group by multiple columns panic -- [Issue #89](https://github.com/influxdb/influxdb/issues/89). 'Group by' combined with 'where' not working -- [Issue #106](https://github.com/influxdb/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative -- [Issue #105](https://github.com/influxdb/influxdb/issues/105). Panic when using a where clause that reference columns with null values -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Remove default limits from queries -- [Issue #118](https://github.com/influxdb/influxdb/issues/118). Make column names starting with '_' legal -- [Issue #121](https://github.com/influxdb/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails -- [Issue #127](https://github.com/influxdb/influxdb/issues/127). Return error on delete queries with where condition that don't have time -- [Issue #117](https://github.com/influxdb/influxdb/issues/117). Fill empty groups with default values -- [Issue #150](https://github.com/influxdb/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. -- [Issue #158](https://github.com/influxdb/influxdb/issues/158). Logged deletes should be stored with the time range if missing. -- [Issue #136](https://github.com/influxdb/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays -- [Issue #145](https://github.com/influxdb/influxdb/issues/145). Server fails to join cluster if all starting at same time. -- [Issue #176](https://github.com/influxdb/influxdb/issues/176). Drop database should take effect on all nodes -- [Issue #180](https://github.com/influxdb/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. -- [Issue #182](https://github.com/influxdb/influxdb/issues/182). Queries with invalid limit clause crash the server +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server ### Deprecated @@ -1739,49 +2281,49 @@ ## Features -- [Issue #82](https://github.com/influxdb/influxdb/issues/82). Add endpoint for listing available admin interfaces. -- [Issue #80](https://github.com/influxdb/influxdb/issues/80). Support durations when specifying start and end time -- [Issue #81](https://github.com/influxdb/influxdb/issues/81). Add support for IN +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN ## Bugfixes -- [Issue #75](https://github.com/influxdb/influxdb/issues/75). Don't allow time series names that start with underscore -- [Issue #85](https://github.com/influxdb/influxdb/issues/85). Non-existing columns exist after they have been queried before +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before ## v0.3.0 ## Features -- [Issue #51](https://github.com/influxdb/influxdb/issues/51). Implement first and last aggregates -- [Issue #35](https://github.com/influxdb/influxdb/issues/35). Support table aliases in Join Queries -- [Issue #71](https://github.com/influxdb/influxdb/issues/71). Add WillReturnSingleSeries to the Query -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Limit should default to 10k -- [Issue #59](https://github.com/influxdb/influxdb/issues/59). Add histogram aggregate function +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function ## Bugfixes - Fix join and merges when the query is a descending order query -- [Issue #57](https://github.com/influxdb/influxdb/issues/57). Don't panic when type of time != float -- [Issue #63](https://github.com/influxdb/influxdb/issues/63). Aggregate queries should not have a sequence_number column +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column ## v0.2.0 ### Features -- [Issue #37](https://github.com/influxdb/influxdb/issues/37). Support the negation of the regex matcher !~ -- [Issue #47](https://github.com/influxdb/influxdb/issues/47). Spill out query and database detail at the time of bug report +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report ### Bugfixes -- [Issue #36](https://github.com/influxdb/influxdb/issues/36). The regex operator should be =~ not ~= -- [Issue #39](https://github.com/influxdb/influxdb/issues/39). Return proper content types from the http api -- [Issue #42](https://github.com/influxdb/influxdb/issues/42). Make the api consistent with the docs -- [Issue #41](https://github.com/influxdb/influxdb/issues/41). Table/Points not deleted when database is dropped -- [Issue #45](https://github.com/influxdb/influxdb/issues/45). Aggregation shouldn't mess up the order of the points -- [Issue #44](https://github.com/influxdb/influxdb/issues/44). Fix crashes on RHEL 5.9 -- [Issue #34](https://github.com/influxdb/influxdb/issues/34). Ascending order always return null for columns that have a null value -- [Issue #55](https://github.com/influxdb/influxdb/issues/55). Limit should limit the points that match the Where clause -- [Issue #53](https://github.com/influxdb/influxdb/issues/53). Writing null values via HTTP API fails +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails ### Deprecated @@ -1794,18 +2336,18 @@ ### Features -- [Issue #29](https://github.com/influxdb/influxdb/issues/29). Semicolon is now optional in queries -- [Issue #31](https://github.com/influxdb/influxdb/issues/31). Support Basic Auth as well as query params for authentication. +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. ### Bugfixes - Don't allow creating users with empty username -- [Issue #22](https://github.com/influxdb/influxdb/issues/22). Don't set goroot if it was set -- [Issue #25](https://github.com/influxdb/influxdb/issues/25). Fix queries that use the median aggregator -- [Issue #26](https://github.com/influxdb/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data -- [Issue #27](https://github.com/influxdb/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values -- [Issue #30](https://github.com/influxdb/influxdb/issues/30). Column indexes/names getting off somehow -- [Issue #32](https://github.com/influxdb/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli ## v0.0.9 @@ -1817,8 +2359,8 @@ #### Bugfixes - Set PYTHONPATH and CC appropriately on mac os x. -- [Issue #18](https://github.com/influxdb/influxdb/issues/18). Fix 386 debian and redhat packages -- [Issue #23](https://github.com/influxdb/influxdb/issues/23). Fix the init scripts on redhat +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat ## v0.0.8 diff -Nru influxdb-0.10.0+dfsg1/circle.yml influxdb-1.1.1+dfsg1/circle.yml --- influxdb-0.10.0+dfsg1/circle.yml 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/circle.yml 2016-12-06 21:36:15.000000000 +0000 @@ -1,10 +1,18 @@ machine: services: - docker + environment: + GODIST: "go1.7.4.linux-amd64.tar.gz" + post: + - mkdir -p download + - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST + - sudo rm -rf /usr/local/go + - sudo tar -C /usr/local -xzf download/$GODIST dependencies: cache_directories: - "~/docker" + - ~/download override: - ./test.sh save: # building the docker images can take a long time, hence caching @@ -14,3 +22,20 @@ override: - bash circle-test.sh: parallel: true + +deployment: + release: + tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/ + commands: + - > + docker run + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" + -v $(pwd):/root/go/src/github.com/influxdata/influxdb + influxdb_build_ubuntu64 + --release + --package + --platform all + --arch all + --upload + --bucket dl.influxdata.com/influxdb/releases diff -Nru influxdb-0.10.0+dfsg1/client/example_test.go influxdb-1.1.1+dfsg1/client/example_test.go --- influxdb-0.10.0+dfsg1/client/example_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/example_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -9,7 +9,7 @@ "strconv" "time" - "github.com/influxdb/influxdb/client" + "github.com/influxdata/influxdb/client" ) func ExampleNewClient() { diff -Nru influxdb-0.10.0+dfsg1/client/influxdb.go influxdb-1.1.1+dfsg1/client/influxdb.go --- influxdb-0.10.0+dfsg1/client/influxdb.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/influxdb.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,10 +1,12 @@ -package client +package client // import "github.com/influxdata/influxdb/client" import ( "bytes" + "crypto/tls" "encoding/json" "errors" "fmt" + "io" "io/ioutil" "net" "net/http" @@ -13,7 +15,7 @@ "strings" "time" - "github.com/influxdb/influxdb/models" + "github.com/influxdata/influxdb/models" ) const ( @@ -31,6 +33,18 @@ type Query struct { Command string Database string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int } // ParseConnectionString will parse a string to create a valid connection URL @@ -79,6 +93,7 @@ UserAgent string Timeout time.Duration Precision string + UnsafeSsl bool } // NewConfig will create a config to be used in connecting to the client @@ -114,11 +129,19 @@ // NewClient will instantiate and return a connected client to issue commands to the server. func NewClient(c Config) (*Client, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.UnsafeSsl, + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := Client{ url: c.URL, username: c.Username, password: c.Password, - httpClient: &http.Client{Timeout: c.Timeout}, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, userAgent: c.UserAgent, precision: c.Precision, } @@ -147,12 +170,18 @@ values := u.Query() values.Set("q", q.Command) values.Set("db", q.Database) + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } if c.precision != "" { values.Set("epoch", c.precision) } u.RawQuery = values.Encode() - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } @@ -168,19 +197,38 @@ defer resp.Body.Close() var response Response - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - decErr := dec.Decode(&response) + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } - // ignore this error if we got an invalid status code - if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { - decErr = nil - } - // If we got a valid decode error, send that back - if decErr != nil { - return nil, decErr + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } } - // If we don't have an error in our json response, and didn't get StatusOK, then send back an error + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. if resp.StatusCode != http.StatusOK && response.Error() == nil { return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) } @@ -196,6 +244,10 @@ var b bytes.Buffer for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } if p.Raw != "" { if _, err := b.WriteString(p.Raw); err != nil { return nil, err @@ -335,22 +387,31 @@ // Structs +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + // Result represents a resultset returned from a single statement. type Result struct { - Series []models.Row - Err error + Series []models.Row + Messages []*Message + Err error } // MarshalJSON encodes the result into JSON. func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Series = r.Series + o.Messages = r.Messages if r.Err != nil { o.Err = r.Err.Error() } @@ -361,8 +422,9 @@ // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } dec := json.NewDecoder(bytes.NewBuffer(b)) @@ -372,6 +434,7 @@ return err } r.Series = o.Series + r.Messages = o.Messages if o.Err != "" { r.Err = errors.New(o.Err) } @@ -423,7 +486,7 @@ // Error returns the first error from any statement. // Returns nil if no errors occurred on any statements. -func (r Response) Error() error { +func (r *Response) Error() error { if r.Err != nil { return r.Err } @@ -435,6 +498,55 @@ return nil } +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + // Point defines the fields that will be written to the database // Measurement, Time, and Fields are required // Precision can be specified if the time is in epoch format (integer). @@ -474,7 +586,7 @@ // MarshalString renders string representation of a Point with specified // precision. The default precision is nanoseconds. func (p *Point) MarshalString() string { - pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) if err != nil { return "# ERROR: " + err.Error() + " " + p.Measurement } @@ -642,6 +754,19 @@ return c.url.String() } +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + // helper functions // EpochToTime takes a unix epoch time and uses precision to return back a time.Time diff -Nru influxdb-0.10.0+dfsg1/client/influxdb_test.go influxdb-1.1.1+dfsg1/client/influxdb_test.go --- influxdb-0.10.0+dfsg1/client/influxdb_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/influxdb_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,18 +1,52 @@ package client_test import ( + "crypto/tls" "encoding/json" + "errors" "fmt" + "io/ioutil" "net/http" "net/http/httptest" "net/url" + "os" "strings" "testing" "time" - "github.com/influxdb/influxdb/client" + "github.com/influxdata/influxdb/client" ) +func BenchmarkWrite(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + {Fields: map[string]interface{}{"value": 101}}}, + } + for i := 0; i < b.N; i++ { + r, err := c.Write(bp) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + b.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } + } +} + func BenchmarkUnmarshalJSON2Tags(b *testing.B) { var bp client.BatchPoints data := []byte(` @@ -105,8 +139,8 @@ if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } - if d == 0 { - t.Fatalf("expected a duration greater than zero. actual %v", d) + if d.Nanoseconds() == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds()) } if version != "x.x" { t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) @@ -135,6 +169,30 @@ } } +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + func TestClient_BasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { u, p, ok := r.BasicAuth() @@ -263,6 +321,44 @@ } } +func TestClient_Messages(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`)) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + resp, err := c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + if got, exp := len(resp.Results), 1; got != exp { + t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got) + } + + r := resp.Results[0] + if got, exp := len(r.Messages), 1; got != exp { + t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got) + } + + m := r.Messages[0] + if got, exp := m.Level, "warning"; got != exp { + t.Errorf("unexpected message level. expected %v, actual %v", exp, got) + } + if got, exp := m.Text, "deprecation test"; got != exp { + t.Errorf("unexpected message text. expected %v, actual %v", exp, got) + } +} + func TestPoint_UnmarshalEpoch(t *testing.T) { now := time.Now() tests := []struct { @@ -453,6 +549,7 @@ func emptyTestServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) w.Header().Set("X-Influxdb-Version", "x.x") return })) @@ -546,6 +643,36 @@ } } +func TestClient_WriteUint64(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + bp := client.BatchPoints{ + Points: []client.Point{ + { + Fields: map[string]interface{}{"value": uint64(10)}, + }, + }, + } + r, err := c.Write(bp) + if err == nil { + t.Fatalf("unexpected error. expected err, actual %v", err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + func TestClient_ParseConnectionString_IPv6(t *testing.T) { path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" u, err := client.ParseConnectionString(path, false) @@ -556,3 +683,149 @@ t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) } } + +func TestClient_CustomCertificates(t *testing.T) { + // generated with: + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf + // influx.cnf: + // [req] + // distinguished_name = req_distinguished_name + // x509_extensions = v3_req + // prompt = no + // [req_distinguished_name] + // C = US + // ST = CA + // L = San Francisco + // O = InfluxDB + // CN = github.com/influxdata + // [v3_req] + // keyUsage = keyEncipherment, dataEncipherment + // extendedKeyUsage = serverAuth + // subjectAltName = @alt_names + // [alt_names] + // IP.1 = 127.0.0.1 + // + key := ` +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi +4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv +qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS +1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t +WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa +mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m +hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I +dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi +b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu +36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m +u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH +FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt +byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ +vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 +aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 +BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K +Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 +3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T +OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi +elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 +2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K +5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk +bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C +cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg +/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ +cvh4WzEegcExTppINW1NB5E= +-----END PRIVATE KEY----- +` + cert := ` +-----BEGIN CERTIFICATE----- +MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G +A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 +NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw +FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE +AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK +JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr +XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ +3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK +u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW +37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti +MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN +AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ +m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F +3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk +rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY +jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW +war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= +-----END CERTIFICATE----- +` + cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) + + if err != nil { + t.Fatalf("Received error: %v", err) + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} + server.TLS.BuildNameToCertificate() + server.StartTLS() + defer server.Close() + + certFile, _ := ioutil.TempFile("", "influx-cert-") + certFile.WriteString(cert) + certFile.Close() + defer os.Remove(certFile.Name()) + + u, _ := url.Parse(server.URL) + + tests := []struct { + name string + unsafeSsl bool + expected error + }{ + {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, + {name: "not validate certificates", unsafeSsl: true, expected: nil}, + } + + for _, test := range tests { + config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + + if (test.expected == nil) != (err == nil) { + t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) + } + } +} + +func TestChunkedResponse(t *testing.T) { + s := `{"results":[{},{}]}{"results":[{}]}` + r := client.NewChunkedResponse(strings.NewReader(s)) + resp, err := r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 2 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 1 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if resp != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, resp) + } +} diff -Nru influxdb-0.10.0+dfsg1/client/README.md influxdb-1.1.1+dfsg1/client/README.md --- influxdb-0.10.0+dfsg1/client/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,12 +1,12 @@ # InfluxDB Client -[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client/v2) +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) ## Description **NOTE:** The Go client library now has a "v2" version, with the old version being deprecated. The new version can be imported at -`import "github.com/influxdb/influxdb/client/v2"`. It is not backwards-compatible. +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. A Go client library written and maintained by the **InfluxDB** team. This package provides convenience functions to read and write time series data. @@ -22,14 +22,14 @@ 8086. You can customize these settings to your specific installation via the **InfluxDB** configuration file. -Thought not necessary for experimentation, you may want to create a new user +Though not necessary for experimentation, you may want to create a new user and authenticate the connection to your database. For more information please check out the -[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html). +[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). For the impatient, you can create a new admin user _bubba_ by firing off the -[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). ```shell influx @@ -48,14 +48,11 @@ ```go package main -import import ( - "net/url" - "fmt" "log" - "os" + "time" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) const ( @@ -66,18 +63,26 @@ func main() { // Make client - c := client.NewHTTPClient(client.HTTPConfig{ + c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", Username: username, Password: password, }) + if err != nil { + log.Fatalln("Error: ", err) + } + // Create a new point batch - bp := client.NewBatchPoints(client.BatchPointsConfig{ + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ Database: MyDB, Precision: "s", }) + if err != nil { + log.Fatalln("Error: ", err) + } + // Create a point and add to batch tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ @@ -85,7 +90,12 @@ "system": 53.3, "user": 46.6, } - pt := client.NewPoint("cpu_usage", tags, fields, time.Now()) + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + + if err != nil { + log.Fatalln("Error: ", err) + } + bp.AddPoint(pt) // Write the batch @@ -166,6 +176,8 @@ return res, response.Error() } res = response.Results + } else { + return res, err } return res, nil } @@ -243,13 +255,35 @@ } ``` +### Point Splitting + +The UDP client now supports splitting single points that exceed the configured +payload size. The logic for processing each point is listed here, starting with +an empty payload. + +1. If adding the point to the current (non-empty) payload would exceed the + configured size, send the current payload. Otherwise, add it to the current + payload. +1. If the point is smaller than the configured size, add it to the payload. +1. If the point has no timestamp, just try to send the entire point as a single + UDP payload, and process the next point. +1. Since the point has a timestamp, re-use the existing measurement name, + tagset, and timestamp and create multiple new points by splitting up the + fields. The per-point length will be kept close to the configured size, + staying under it if possible. This does mean that one large field, maybe a + long string, could be sent as a larger-than-configured payload. + +The above logic attempts to respect configured payload sizes, but not sacrifice +any data integrity. Points without a timestamp can't be split, as that may +cause fields to have differing timestamps when processed by the server. + ## Go Docs Please refer to -[http://godoc.org/github.com/influxdb/influxdb/client/v2](http://godoc.org/github.com/influxdb/influxdb/client/v2) +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) for documentation. ## See Also You can also examine how the client library is used by the -[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff -Nru influxdb-0.10.0+dfsg1/client/v2/client.go influxdb-1.1.1+dfsg1/client/v2/client.go --- influxdb-0.10.0+dfsg1/client/v2/client.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/v2/client.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,4 +1,4 @@ -package client +package client // import "github.com/influxdata/influxdb/client/v2" import ( "bytes" @@ -7,20 +7,14 @@ "errors" "fmt" "io/ioutil" - "net" "net/http" "net/url" "time" - "github.com/influxdb/influxdb/models" -) - -// UDPPayloadSize is a reasonable default payload size for UDP packets that -// could be travelling over the internet. -const ( - UDPPayloadSize = 512 + "github.com/influxdata/influxdb/models" ) +// HTTPConfig is the config data needed to create an HTTP Client type HTTPConfig struct { // Addr should be of the form "http://host:port" // or "http://[ipv6-host%zone]:port". @@ -41,18 +35,13 @@ // InsecureSkipVerify gets passed to the http client, if true, it will // skip https certificate verification. Defaults to false InsecureSkipVerify bool -} -type UDPConfig struct { - // Addr should be of the form "host:port" - // or "[ipv6-host%zone]:port". - Addr string - - // PayloadSize is the maximum size of a UDP client message, optional - // Tune this based on your network. Defaults to UDPBufferSize. - PayloadSize int + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config } +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct type BatchPointsConfig struct { // Precision is the write precision of the points, defaults to "ns" Precision string @@ -69,6 +58,10 @@ // Client is a client interface for writing & querying the database type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients + Ping(timeout time.Duration) (time.Duration, string, error) + // Write takes a BatchPoints object and writes all Points to InfluxDB. Write(bp BatchPoints) error @@ -80,7 +73,8 @@ Close() error } -// NewClient creates a client interface from the given config. +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. func NewHTTPClient(conf HTTPConfig) (Client, error) { if conf.UserAgent == "" { conf.UserAgent = "InfluxDBClient" @@ -100,8 +94,11 @@ InsecureSkipVerify: conf.InsecureSkipVerify, }, } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } return &client{ - url: u, + url: *u, username: conf.Username, password: conf.Password, useragent: conf.UserAgent, @@ -109,55 +106,71 @@ Timeout: conf.Timeout, Transport: tr, }, + transport: tr, }, nil } -// Close releases the client's resources. -func (c *client) Close() error { - return nil -} +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" -// NewUDPClient returns a client interface for writing to an InfluxDB UDP -// service from the given config. -func NewUDPClient(conf UDPConfig) (Client, error) { - var udpAddr *net.UDPAddr - udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return nil, err + return 0, "", err } - conn, err := net.DialUDP("udp", nil, udpAddr) + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) if err != nil { - return nil, err + return 0, "", err } + defer resp.Body.Close() - payloadSize := conf.PayloadSize - if payloadSize == 0 { - payloadSize = UDPPayloadSize + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err } - return &udpclient{ - conn: conn, - payloadSize: payloadSize, - }, nil + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil } -// Close releases the udpclient's resources. -func (uc *udpclient) Close() error { - return uc.conn.Close() +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil } +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. type client struct { - url *url.URL + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to syncronise access to url. + url url.URL username string password string useragent string httpClient *http.Client -} - -type udpclient struct { - conn *net.UDPConn - payloadSize int + transport *http.Transport } // BatchPoints is an interface into a batched grouping of points to write into @@ -166,6 +179,8 @@ type BatchPoints interface { // AddPoint adds the given point to the Batch of points AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points + AddPoints(ps []*Point) // Points lists the points in the Batch Points() []*Point @@ -219,6 +234,10 @@ bp.points = append(bp.points, p) } +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + func (bp *batchpoints) Points() []*Point { return bp.points } @@ -259,6 +278,7 @@ bp.retentionPolicy = rp } +// Point represents a single data point type Point struct { pt models.Point } @@ -278,7 +298,7 @@ T = t[0] } - pt, err := models.NewPoint(name, tags, fields, T) + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) if err != nil { return nil, err } @@ -302,9 +322,9 @@ return p.pt.Name() } -// Name returns the tags associated with the point +// Tags returns the tags associated with the point func (p *Point) Tags() map[string]string { - return p.pt.Tags() + return p.pt.Tags().Map() } // Time return the timestamp for the point @@ -322,29 +342,9 @@ return p.pt.Fields() } -func (uc *udpclient) Write(bp BatchPoints) error { - var b bytes.Buffer - var d time.Duration - d, _ = time.ParseDuration("1" + bp.Precision()) - - for _, p := range bp.Points() { - pointstring := p.pt.RoundedString(d) + "\n" - - // Write and reset the buffer if we reach the max size - if b.Len()+len(pointstring) >= uc.payloadSize { - if _, err := uc.conn.Write(b.Bytes()); err != nil { - return err - } - b.Reset() - } - - if _, err := b.WriteString(pointstring); err != nil { - return err - } - } - - _, err := uc.conn.Write(b.Bytes()) - return err +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} } func (c *client) Write(bp BatchPoints) error { @@ -436,14 +436,17 @@ return nil } -// Result represents a resultset returned from a single statement. -type Result struct { - Series []models.Row - Err string `json:"error,omitempty"` +// Message represents a user message. +type Message struct { + Level string + Text string } -func (uc *udpclient) Query(q Query) (*Response, error) { - return nil, fmt.Errorf("Querying via UDP is not supported") +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` } // Query sends a command to the server and returns the Response @@ -451,7 +454,7 @@ u := c.url u.Path = "query" - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } @@ -486,7 +489,7 @@ } // If we got a valid decode error, send that back if decErr != nil { - return nil, decErr + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) } // If we don't have an error in our json response, and didn't get statusOK // then send back an error diff -Nru influxdb-0.10.0+dfsg1/client/v2/client_test.go influxdb-1.1.1+dfsg1/client/v2/client_test.go --- influxdb-0.10.0+dfsg1/client/v2/client_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/v2/client_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -6,6 +6,7 @@ "net/http/httptest" "reflect" "strings" + "sync" "testing" "time" ) @@ -24,6 +25,20 @@ } } +func TestUDPClient_Ping(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + rtt, version, err := c.Ping(0) + if rtt != 0 || version != "" || err != nil { + t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) + } +} + func TestUDPClient_Write(t *testing.T) { config := UDPConfig{Addr: "localhost:8089"} c, err := NewUDPClient(config) @@ -57,6 +72,68 @@ } } +func TestUDPClient_Batches(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 20 // should allow for two points per batch + + // expected point should look like this: "cpu a=1i" + fields := map[string]interface{}{"a": 1} + + p, _ := NewPoint("cpu", nil, fields, time.Time{}) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + for i := 0; i < 9; i++ { + bp.AddPoint(p) + } + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != 5 { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5) + } +} + +func TestUDPClient_Split(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 1 // force one field per point + + fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4} + + p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0)) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + bp.AddPoint(p) + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != len(fields) { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields)) + } +} + +type writeLogger struct { + writes [][]byte +} + +func (w *writeLogger) Write(b []byte) (int, error) { + w.writes = append(w.writes, append([]byte(nil), b...)) + return len(b), nil +} + +func (w *writeLogger) Close() error { return nil } + func TestClient_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response @@ -106,6 +183,72 @@ } } +func TestClient_Ping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Concurrent_Use(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + var wg sync.WaitGroup + wg.Add(3) + n := 1000 + + go func() { + defer wg.Done() + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("got error %v", err) + } + + for i := 0; i < n; i++ { + if err = c.Write(bp); err != nil { + t.Fatalf("got error %v", err) + } + } + }() + + go func() { + defer wg.Done() + var q Query + for i := 0; i < n; i++ { + if _, err := c.Query(q); err != nil { + t.Fatalf("got error %v", err) + } + } + }() + + go func() { + defer wg.Done() + for i := 0; i < n; i++ { + c.Ping(time.Second) + } + }() + wg.Wait() +} + func TestClient_Write(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response diff -Nru influxdb-0.10.0+dfsg1/client/v2/example_test.go influxdb-1.1.1+dfsg1/client/v2/example_test.go --- influxdb-0.10.0+dfsg1/client/v2/example_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/v2/example_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -6,7 +6,7 @@ "os" "time" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) // Create a new client @@ -55,6 +55,23 @@ c.Write(bp) } +// Ping the cluster using the HTTP client +func ExampleClient_Ping() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + _, _, err = c.Ping(0) + if err != nil { + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) + } +} + // Write a point using the HTTP client func ExampleClient_write() { // Make client diff -Nru influxdb-0.10.0+dfsg1/client/v2/udp.go influxdb-1.1.1+dfsg1/client/v2/udp.go --- influxdb-0.10.0+dfsg1/client/v2/udp.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/client/v2/udp.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff -Nru influxdb-0.10.0+dfsg1/cluster/balancer.go influxdb-1.1.1+dfsg1/cluster/balancer.go --- influxdb-0.10.0+dfsg1/cluster/balancer.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/balancer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -package cluster - -import ( - "math/rand" - - "github.com/influxdb/influxdb/services/meta" -) - -// Balancer represents a load-balancing algorithm for a set of nodes -type Balancer interface { - // Next returns the next Node according to the balancing method - // or nil if there are no nodes available - Next() *meta.NodeInfo -} - -type nodeBalancer struct { - nodes []meta.NodeInfo // data nodes to balance between - p int // current node index -} - -// NewNodeBalancer create a shuffled, round-robin balancer so that -// multiple instances will return nodes in randomized order and each -// each returned node will be repeated in a cycle -func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { - // make a copy of the node slice so we can randomize it - // without affecting the original instance as well as ensure - // that each Balancer returns nodes in a different order - b := &nodeBalancer{} - - b.nodes = make([]meta.NodeInfo, len(nodes)) - copy(b.nodes, nodes) - - b.shuffle() - return b -} - -// shuffle randomizes the ordering the balancers available nodes -func (b *nodeBalancer) shuffle() { - for i := range b.nodes { - j := rand.Intn(i + 1) - b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] - } -} - -// online returns a slice of the nodes that are online -func (b *nodeBalancer) online() []meta.NodeInfo { - return b.nodes - // now := time.Now().UTC() - // up := []meta.NodeInfo{} - // for _, n := range b.nodes { - // if n.OfflineUntil.After(now) { - // continue - // } - // up = append(up, n) - // } - // return up -} - -// Next returns the next available nodes -func (b *nodeBalancer) Next() *meta.NodeInfo { - // only use online nodes - up := b.online() - - // no nodes online - if len(up) == 0 { - return nil - } - - // rollover back to the beginning - if b.p >= len(up) { - b.p = 0 - } - - d := &up[b.p] - b.p++ - - return d -} diff -Nru influxdb-0.10.0+dfsg1/cluster/balancer_test.go influxdb-1.1.1+dfsg1/cluster/balancer_test.go --- influxdb-0.10.0+dfsg1/cluster/balancer_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/balancer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -package cluster_test - -import ( - "fmt" - "testing" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/services/meta" -) - -func NewNodes() []meta.NodeInfo { - var nodes []meta.NodeInfo - for i := 1; i <= 2; i++ { - nodes = append(nodes, meta.NodeInfo{ - ID: uint64(i), - Host: fmt.Sprintf("localhost:999%d", i), - }) - } - return nodes -} - -func TestBalancerEmptyNodes(t *testing.T) { - b := cluster.NewNodeBalancer([]meta.NodeInfo{}) - got := b.Next() - if got != nil { - t.Errorf("expected nil, got %v", got) - } -} - -func TestBalancerUp(t *testing.T) { - nodes := NewNodes() - b := cluster.NewNodeBalancer(nodes) - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node in randomized round-robin order - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should never get the same node in order twice - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} - -/* -func TestBalancerDown(t *testing.T) { - nodes := NewNodes() - b := cluster.NewNodeBalancer(nodes) - - nodes[0].Down() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Health node should be returned each time - if first.ID != 2 && first.ID != second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ - -/* -func TestBalancerBackUp(t *testing.T) { - nodes := newDataNodes() - b := cluster.NewNodeBalancer(nodes) - - nodes[0].Down() - - for i := 0; i < 3; i++ { - got := b.Next() - if got == nil { - t.Errorf("expected datanode, got %v", got) - } - - if exp := uint64(2); got.ID != exp { - t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) - } - } - - nodes[0].Up() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should get both nodes returned - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ diff -Nru influxdb-0.10.0+dfsg1/cluster/client_pool.go influxdb-1.1.1+dfsg1/cluster/client_pool.go --- influxdb-0.10.0+dfsg1/cluster/client_pool.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/client_pool.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -package cluster - -import ( - "net" - "sync" - - "gopkg.in/fatih/pool.v2" -) - -type clientPool struct { - mu sync.RWMutex - pool map[uint64]pool.Pool -} - -func newClientPool() *clientPool { - return &clientPool{ - pool: make(map[uint64]pool.Pool), - } -} - -func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { - c.mu.Lock() - c.pool[nodeID] = p - c.mu.Unlock() -} - -func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { - c.mu.RLock() - p, ok := c.pool[nodeID] - c.mu.RUnlock() - return p, ok -} - -func (c *clientPool) size() int { - c.mu.RLock() - var size int - for _, p := range c.pool { - size += p.Len() - } - c.mu.RUnlock() - return size -} - -func (c *clientPool) conn(nodeID uint64) (net.Conn, error) { - c.mu.RLock() - conn, err := c.pool[nodeID].Get() - c.mu.RUnlock() - return conn, err -} - -func (c *clientPool) close() { - c.mu.Lock() - for _, p := range c.pool { - p.Close() - } - c.mu.Unlock() -} diff -Nru influxdb-0.10.0+dfsg1/cluster/config.go influxdb-1.1.1+dfsg1/cluster/config.go --- influxdb-0.10.0+dfsg1/cluster/config.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/config.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -package cluster - -import ( - "time" - - "github.com/influxdb/influxdb/toml" -) - -const ( - // DefaultWriteTimeout is the default timeout for a complete write to succeed. - DefaultWriteTimeout = 5 * time.Second - - // DefaultShardWriterTimeout is the default timeout set on shard writers. - DefaultShardWriterTimeout = 5 * time.Second - - // DefaultShardMapperTimeout is the default timeout set on shard mappers. - DefaultShardMapperTimeout = 5 * time.Second - - // DefaultMaxRemoteWriteConnections is the maximum number of open connections - // that will be available for remote writes to another host. - DefaultMaxRemoteWriteConnections = 3 -) - -// Config represents the configuration for the clustering service. -type Config struct { - ForceRemoteShardMapping bool `toml:"force-remote-mapping"` - WriteTimeout toml.Duration `toml:"write-timeout"` - ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"` - MaxRemoteWriteConnections int `toml:"max-remote-write-connections"` - ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"` -} - -// NewConfig returns an instance of Config with defaults. -func NewConfig() Config { - return Config{ - WriteTimeout: toml.Duration(DefaultWriteTimeout), - ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout), - ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout), - MaxRemoteWriteConnections: DefaultMaxRemoteWriteConnections, - } -} diff -Nru influxdb-0.10.0+dfsg1/cluster/config_test.go influxdb-1.1.1+dfsg1/cluster/config_test.go --- influxdb-0.10.0+dfsg1/cluster/config_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/config_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -package cluster_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cluster" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c cluster.Config - if _, err := toml.Decode(` -shard-writer-timeout = "10s" -write-timeout = "20s" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if time.Duration(c.ShardWriterTimeout) != 10*time.Second { - t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout) - } else if time.Duration(c.WriteTimeout) != 20*time.Second { - t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) - } -} diff -Nru influxdb-0.10.0+dfsg1/cluster/internal/data.pb.go influxdb-1.1.1+dfsg1/cluster/internal/data.pb.go --- influxdb-0.10.0+dfsg1/cluster/internal/data.pb.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/internal/data.pb.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: internal/data.proto -// DO NOT EDIT! - -/* -Package internal is a generated protocol buffer package. - -It is generated from these files: - internal/data.proto - -It has these top-level messages: - WriteShardRequest - WriteShardResponse - MapShardRequest - MapShardResponse -*/ -package internal - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type WriteShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req,name=ShardID" json:"ShardID,omitempty"` - Points [][]byte `protobuf:"bytes,2,rep,name=Points" json:"Points,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} } -func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) } -func (*WriteShardRequest) ProtoMessage() {} - -func (m *WriteShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *WriteShardRequest) GetPoints() [][]byte { - if m != nil { - return m.Points - } - return nil -} - -type WriteShardResponse struct { - Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} } -func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) } -func (*WriteShardResponse) ProtoMessage() {} - -func (m *WriteShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *WriteShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type MapShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req,name=ShardID" json:"ShardID,omitempty"` - Query *string `protobuf:"bytes,2,req,name=Query" json:"Query,omitempty"` - ChunkSize *int32 `protobuf:"varint,3,req,name=ChunkSize" json:"ChunkSize,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardRequest) Reset() { *m = MapShardRequest{} } -func (m *MapShardRequest) String() string { return proto.CompactTextString(m) } -func (*MapShardRequest) ProtoMessage() {} - -func (m *MapShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *MapShardRequest) GetQuery() string { - if m != nil && m.Query != nil { - return *m.Query - } - return "" -} - -func (m *MapShardRequest) GetChunkSize() int32 { - if m != nil && m.ChunkSize != nil { - return *m.ChunkSize - } - return 0 -} - -type MapShardResponse struct { - Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=Data" json:"Data,omitempty"` - TagSets []string `protobuf:"bytes,4,rep,name=TagSets" json:"TagSets,omitempty"` - Fields []string `protobuf:"bytes,5,rep,name=Fields" json:"Fields,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardResponse) Reset() { *m = MapShardResponse{} } -func (m *MapShardResponse) String() string { return proto.CompactTextString(m) } -func (*MapShardResponse) ProtoMessage() {} - -func (m *MapShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *MapShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *MapShardResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *MapShardResponse) GetTagSets() []string { - if m != nil { - return m.TagSets - } - return nil -} - -func (m *MapShardResponse) GetFields() []string { - if m != nil { - return m.Fields - } - return nil -} diff -Nru influxdb-0.10.0+dfsg1/cluster/internal/data.proto influxdb-1.1.1+dfsg1/cluster/internal/data.proto --- influxdb-0.10.0+dfsg1/cluster/internal/data.proto 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/internal/data.proto 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -package internal; - -message WriteShardRequest { - required uint64 ShardID = 1; - repeated bytes Points = 2; -} - -message WriteShardResponse { - required int32 Code = 1; - optional string Message = 2; -} - -message MapShardRequest { - required uint64 ShardID = 1; - required string Query = 2; - required int32 ChunkSize = 3; -} - -message MapShardResponse { - required int32 Code = 1; - optional string Message = 2; - optional bytes Data = 3; - repeated string TagSets = 4; - repeated string Fields = 5; -} diff -Nru influxdb-0.10.0+dfsg1/cluster/points_writer.go influxdb-1.1.1+dfsg1/cluster/points_writer.go --- influxdb-0.10.0+dfsg1/cluster/points_writer.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/points_writer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,399 +0,0 @@ -package cluster - -import ( - "errors" - "expvar" - "fmt" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful -type ConsistencyLevel int - -// The statistics generated by the "write" mdoule -const ( - statWriteReq = "req" - statPointWriteReq = "pointReq" - statPointWriteReqLocal = "pointReqLocal" - statPointWriteReqRemote = "pointReqRemote" - statWriteOK = "writeOk" - statWritePartial = "writePartial" - statWriteTimeout = "writeTimeout" - statWriteErr = "writeError" - statWritePointReqHH = "pointReqHH" - statSubWriteOK = "subWriteOk" - statSubWriteDrop = "subWriteDrop" -) - -const ( - // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet - ConsistencyLevelAny ConsistencyLevel = iota - - // ConsistencyLevelOne requires at least one data node acknowledged a write - ConsistencyLevelOne - - // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write - ConsistencyLevelQuorum - - // ConsistencyLevelAll requires all data nodes to acknowledge a write - ConsistencyLevelAll -) - -var ( - // ErrTimeout is returned when a write times out. - ErrTimeout = errors.New("timeout") - - // ErrPartialWrite is returned when a write partially succeeds but does - // not meet the requested consistency level. - ErrPartialWrite = errors.New("partial write") - - // ErrWriteFailed is returned when no writes succeeded. - ErrWriteFailed = errors.New("write failed") - - // ErrInvalidConsistencyLevel is returned when parsing the string version - // of a consistency level. - ErrInvalidConsistencyLevel = errors.New("invalid consistency level") -) - -// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const -func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { - switch strings.ToLower(level) { - case "any": - return ConsistencyLevelAny, nil - case "one": - return ConsistencyLevelOne, nil - case "quorum": - return ConsistencyLevelQuorum, nil - case "all": - return ConsistencyLevelAll, nil - default: - return 0, ErrInvalidConsistencyLevel - } -} - -// PointsWriter handles writes across multiple local and remote data nodes. -type PointsWriter struct { - mu sync.RWMutex - closing chan struct{} - WriteTimeout time.Duration - Logger *log.Logger - - Node *influxdb.Node - - MetaClient interface { - Database(name string) (di *meta.DatabaseInfo, err error) - RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) - CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, retentionPolicy string, shardID uint64) error - WriteToShard(shardID uint64, points []models.Point) error - } - - ShardWriter interface { - WriteShard(shardID, ownerID uint64, points []models.Point) error - } - - HintedHandoff interface { - WriteShard(shardID, ownerID uint64, points []models.Point) error - } - - Subscriber interface { - Points() chan<- *WritePointsRequest - } - subPoints chan<- *WritePointsRequest - - statMap *expvar.Map -} - -// NewPointsWriter returns a new instance of PointsWriter for a node. -func NewPointsWriter() *PointsWriter { - return &PointsWriter{ - closing: make(chan struct{}), - WriteTimeout: DefaultWriteTimeout, - Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), - statMap: influxdb.NewStatistics("write", "write", nil), - } -} - -// ShardMapping contains a mapping of a shards to a points. -type ShardMapping struct { - Points map[uint64][]models.Point // The points associated with a shard ID - Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID -} - -// NewShardMapping creates an empty ShardMapping -func NewShardMapping() *ShardMapping { - return &ShardMapping{ - Points: map[uint64][]models.Point{}, - Shards: map[uint64]*meta.ShardInfo{}, - } -} - -// MapPoint maps a point to shard -func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { - points, ok := s.Points[shardInfo.ID] - if !ok { - s.Points[shardInfo.ID] = []models.Point{p} - } else { - s.Points[shardInfo.ID] = append(points, p) - } - s.Shards[shardInfo.ID] = shardInfo -} - -// Open opens the communication channel with the point writer -func (w *PointsWriter) Open() error { - w.mu.Lock() - defer w.mu.Unlock() - w.closing = make(chan struct{}) - if w.Subscriber != nil { - w.subPoints = w.Subscriber.Points() - } - return nil -} - -// Close closes the communication channel with the point writer -func (w *PointsWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closing != nil { - close(w.closing) - } - if w.subPoints != nil { - // 'nil' channels always block so this makes the - // select statement in WritePoints hit its default case - // dropping any in-flight writes. - w.subPoints = nil - } - return nil -} - -// MapShards maps the points contained in wp to a ShardMapping. If a point -// maps to a shard group or shard that does not currently exist, it will be -// created before returning the mapping. -func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { - - // holds the start time ranges for required shard groups - timeRanges := map[time.Time]*meta.ShardGroupInfo{} - - rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) - if err != nil { - return nil, err - } - if rp == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) - } - - for _, p := range wp.Points { - timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil - } - - // holds all the shard groups and shards that are required for writes - for t := range timeRanges { - sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, t) - if err != nil { - return nil, err - } - timeRanges[t] = sg - } - - mapping := NewShardMapping() - for _, p := range wp.Points { - sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] - sh := sg.ShardFor(p.HashID()) - mapping.MapPoint(&sh, p) - } - return mapping, nil -} - -// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of -// a cluster structure for information. This is to avoid a circular dependency -func (w *PointsWriter) WritePointsInto(p *tsdb.IntoWriteRequest) error { - req := WritePointsRequest{ - Database: p.Database, - RetentionPolicy: p.RetentionPolicy, - ConsistencyLevel: ConsistencyLevelAny, - Points: p.Points, - } - return w.WritePoints(&req) -} - -// WritePoints writes across multiple local and remote data nodes according the consistency level. -func (w *PointsWriter) WritePoints(p *WritePointsRequest) error { - w.statMap.Add(statWriteReq, 1) - w.statMap.Add(statPointWriteReq, int64(len(p.Points))) - - if p.RetentionPolicy == "" { - db, err := w.MetaClient.Database(p.Database) - if err != nil { - return err - } else if db == nil { - return influxdb.ErrDatabaseNotFound(p.Database) - } - p.RetentionPolicy = db.DefaultRetentionPolicy - } - - shardMappings, err := w.MapShards(p) - if err != nil { - return err - } - - // Write each shard in it's own goroutine and return as soon - // as one fails. - ch := make(chan error, len(shardMappings.Points)) - for shardID, points := range shardMappings.Points { - go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { - ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points) - }(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points) - } - - // Send points to subscriptions if possible. - ok := false - // We need to lock just in case the channel is about to be nil'ed - w.mu.RLock() - select { - case w.subPoints <- p: - ok = true - default: - } - w.mu.RUnlock() - if ok { - w.statMap.Add(statSubWriteOK, 1) - } else { - w.statMap.Add(statSubWriteDrop, 1) - } - - for range shardMappings.Points { - select { - case <-w.closing: - return ErrWriteFailed - case err := <-ch: - if err != nil { - return err - } - } - } - return nil -} - -// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write -// partially succeeds, ErrPartialWrite is returned. -func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, - consistency ConsistencyLevel, points []models.Point) error { - // The required number of writes to achieve the requested consistency level - required := len(shard.Owners) - switch consistency { - case ConsistencyLevelAny, ConsistencyLevelOne: - required = 1 - case ConsistencyLevelQuorum: - required = required/2 + 1 - } - - // response channel for each shard writer go routine - type AsyncWriteResult struct { - Owner meta.ShardOwner - Err error - } - ch := make(chan *AsyncWriteResult, len(shard.Owners)) - - for _, owner := range shard.Owners { - go func(shardID uint64, owner meta.ShardOwner, points []models.Point) { - if w.Node.ID == owner.NodeID { - w.statMap.Add(statPointWriteReqLocal, int64(len(points))) - - err := w.TSDBStore.WriteToShard(shardID, points) - // If we've written to shard that should exist on the current node, but the store has - // not actually created this shard, tell it to create it and retry the write - if err == tsdb.ErrShardNotFound { - err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID) - if err != nil { - ch <- &AsyncWriteResult{owner, err} - return - } - err = w.TSDBStore.WriteToShard(shardID, points) - } - ch <- &AsyncWriteResult{owner, err} - return - } - - w.statMap.Add(statPointWriteReqRemote, int64(len(points))) - err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points) - if err != nil && tsdb.IsRetryable(err) { - // The remote write failed so queue it via hinted handoff - w.statMap.Add(statWritePointReqHH, int64(len(points))) - hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points) - if hherr != nil { - ch <- &AsyncWriteResult{owner, hherr} - return - } - - // If the write consistency level is ANY, then a successful hinted handoff can - // be considered a successful write so send nil to the response channel - // otherwise, let the original error propagate to the response channel - if hherr == nil && consistency == ConsistencyLevelAny { - ch <- &AsyncWriteResult{owner, nil} - return - } - } - ch <- &AsyncWriteResult{owner, err} - - }(shard.ID, owner, points) - } - - var wrote int - timeout := time.After(w.WriteTimeout) - var writeError error - for range shard.Owners { - select { - case <-w.closing: - return ErrWriteFailed - case <-timeout: - w.statMap.Add(statWriteTimeout, 1) - // return timeout error to caller - return ErrTimeout - case result := <-ch: - // If the write returned an error, continue to the next response - if result.Err != nil { - w.statMap.Add(statWriteErr, 1) - w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, result.Owner.NodeID, result.Err) - - // Keep track of the first error we see to return back to the client - if writeError == nil { - writeError = result.Err - } - continue - } - - wrote++ - - // We wrote the required consistency level - if wrote >= required { - w.statMap.Add(statWriteOK, 1) - return nil - } - } - } - - if wrote > 0 { - w.statMap.Add(statWritePartial, 1) - return ErrPartialWrite - } - - if writeError != nil { - return fmt.Errorf("write failed: %v", writeError) - } - - return ErrWriteFailed -} diff -Nru influxdb-0.10.0+dfsg1/cluster/points_writer_test.go influxdb-1.1.1+dfsg1/cluster/points_writer_test.go --- influxdb-0.10.0+dfsg1/cluster/points_writer_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/points_writer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,495 +0,0 @@ -package cluster_test - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" -) - -// Ensures the points writer maps a single point to a single shard. -func TestPointsWriter_MapShards_One(t *testing.T) { - ms := MetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return &rp.ShardGroups[0], nil - } - - c := cluster.PointsWriter{MetaClient: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - pr.AddPoint("cpu", 1.0, time.Now(), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 1; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } -} - -// Ensures the points writer maps a multiple points across shard group boundaries. -func TestPointsWriter_MapShards_Multiple(t *testing.T) { - ms := MetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - - c := cluster.PointsWriter{MetaClient: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 2; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } - - for _, points := range shardMappings.Points { - // First shard shoud have 1 point w/ first point added - if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) - } - - // Second shard shoud have the last two points added - if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) - } - - if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) - } - } -} - -func TestPointsWriter_WritePoints(t *testing.T) { - tests := []struct { - name string - database string - retentionPolicy string - consistency cluster.ConsistencyLevel - - // the responses returned by each shard write call. node ID 1 = pos 0 - err []error - expErr error - }{ - // Consistency one - { - name: "write one success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write one error", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Consistency any - { - name: "write any success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - // Consistency all - { - name: "write all success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write all, 2/3, partial write", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write all, 1/3 (failure)", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: cluster.ErrPartialWrite, - }, - - // Consistency quorum - { - name: "write quorum, 1/3 failure", - consistency: cluster.ConsistencyLevelQuorum, - database: "mydb", - retentionPolicy: "myrp", - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write quorum, 2/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - { - name: "write quorum, 3/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, nil}, - expErr: nil, - }, - - // Error write error - { - name: "no writes succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Hinted handoff w/ ANY - { - name: "hinted handoff write succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: nil, - }, - - // Write to non-existent database - { - name: "write to non-existent database", - database: "doesnt_exist", - retentionPolicy: "", - consistency: cluster.ConsistencyLevelAny, - err: []error{nil, nil, nil}, - expErr: fmt.Errorf("database not found: doesnt_exist"), - }, - } - - for _, test := range tests { - - pr := &cluster.WritePointsRequest{ - Database: test.database, - RetentionPolicy: test.retentionPolicy, - ConsistencyLevel: test.consistency, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - // copy to prevent data race - theTest := test - sm := cluster.NewShardMapping() - sm.MapPoint( - &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[0]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[1]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[2]) - - // Local cluster.Node ShardWriter - // lock on the write increment since these functions get called in parallel - var mu sync.Mutex - sw := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[int(nodeID)-1] - }, - } - - store := &fakeStore{ - WriteFn: func(shardID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[0] - }, - } - - hh := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { - return nil - }, - } - - ms := NewMetaClient() - ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) { - return nil, nil - } - ms.NodeIDFn = func() uint64 { return 1 } - - subPoints := make(chan *cluster.WritePointsRequest, 1) - sub := Subscriber{} - sub.PointsFn = func() chan<- *cluster.WritePointsRequest { - return subPoints - } - - c := cluster.NewPointsWriter() - c.MetaClient = ms - c.ShardWriter = sw - c.TSDBStore = store - c.HintedHandoff = hh - c.Subscriber = sub - c.Node = &influxdb.Node{ID: 1} - - c.Open() - defer c.Close() - - err := c.WritePoints(pr) - if err == nil && test.expErr != nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - - if err != nil && test.expErr == nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if test.expErr == nil { - select { - case p := <-subPoints: - if p != pr { - t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) - } - default: - t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name) - } - } - } -} - -var shardID uint64 - -type fakeShardWriter struct { - ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error -} - -func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error { - return f.ShardWriteFn(shardID, nodeID, points) -} - -type fakeStore struct { - WriteFn func(shardID uint64, points []models.Point) error - CreateShardfn func(database, retentionPolicy string, shardID uint64) error -} - -func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { - return f.WriteFn(shardID, points) -} - -func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error { - return f.CreateShardfn(database, retentionPolicy, shardID) -} - -func NewMetaClient() *MetaClient { - ms := &MetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - return ms -} - -type MetaClient struct { - NodeIDFn func() uint64 - RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - DatabaseFn func(database string) (*meta.DatabaseInfo, error) - ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) -} - -func (m MetaClient) NodeID() uint64 { return m.NodeIDFn() } - -func (m MetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { - return m.RetentionPolicyFn(database, name) -} - -func (m MetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) -} - -func (m MetaClient) Database(database string) (*meta.DatabaseInfo, error) { - return m.DatabaseFn(database) -} - -func (m MetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { - return m.ShardOwnerFn(shardID) -} - -type Subscriber struct { - PointsFn func() chan<- *cluster.WritePointsRequest -} - -func (s Subscriber) Points() chan<- *cluster.WritePointsRequest { - return s.PointsFn() -} - -func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { - shards := []meta.ShardInfo{} - owners := []meta.ShardOwner{} - for i := 1; i <= nodeCount; i++ { - owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) - } - - // each node is fully replicated with each other - shards = append(shards, meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }) - - rp := &meta.RetentionPolicyInfo{ - Name: "myrp", - ReplicaN: nodeCount, - Duration: duration, - ShardGroupDuration: duration, - ShardGroups: []meta.ShardGroupInfo{ - meta.ShardGroupInfo{ - ID: nextShardID(), - StartTime: time.Unix(0, 0), - EndTime: time.Unix(0, 0).Add(duration).Add(-1), - Shards: shards, - }, - }, - } - return rp -} - -func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { - var startTime, endTime time.Time - if len(rp.ShardGroups) == 0 { - startTime = time.Unix(0, 0) - } else { - startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) - } - endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) - - sh := meta.ShardGroupInfo{ - ID: uint64(len(rp.ShardGroups) + 1), - StartTime: startTime, - EndTime: endTime, - Shards: []meta.ShardInfo{ - meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }, - }, - } - rp.ShardGroups = append(rp.ShardGroups, sh) -} - -func nextShardID() uint64 { - return atomic.AddUint64(&shardID, 1) -} diff -Nru influxdb-0.10.0+dfsg1/cluster/pool.go influxdb-1.1.1+dfsg1/cluster/pool.go --- influxdb-0.10.0+dfsg1/cluster/pool.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/pool.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,188 +0,0 @@ -package cluster - -import ( - "errors" - "fmt" - "net" - "sync" - "sync/atomic" - "time" - - "gopkg.in/fatih/pool.v2" -) - -// boundedPool implements the Pool interface based on buffered channels. -type boundedPool struct { - // storage for our net.Conn connections - mu sync.Mutex - conns chan net.Conn - - timeout time.Duration - total int32 - // net.Conn generator - factory Factory -} - -// Factory is a function to create new connections. -type Factory func() (net.Conn, error) - -// NewBoundedPool returns a new pool based on buffered channels with an initial -// capacity, maximum capacity and timeout to wait for a connection from the pool. -// Factory is used when initial capacity is -// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool -// until a new Get() is called. During a Get(), If there is no new connection -// available in the pool and total connections is less than the max, a new connection -// will be created via the Factory() method. Othewise, the call will block until -// a connection is available or the timeout is reached. -func NewBoundedPool(initialCap, maxCap int, timeout time.Duration, factory Factory) (pool.Pool, error) { - if initialCap < 0 || maxCap <= 0 || initialCap > maxCap { - return nil, errors.New("invalid capacity settings") - } - - c := &boundedPool{ - conns: make(chan net.Conn, maxCap), - factory: factory, - timeout: timeout, - } - - // create initial connections, if something goes wrong, - // just close the pool error out. - for i := 0; i < initialCap; i++ { - conn, err := factory() - if err != nil { - c.Close() - return nil, fmt.Errorf("factory is not able to fill the pool: %s", err) - } - c.conns <- conn - atomic.AddInt32(&c.total, 1) - } - - return c, nil -} - -func (c *boundedPool) getConns() chan net.Conn { - c.mu.Lock() - conns := c.conns - c.mu.Unlock() - return conns -} - -// Get implements the Pool interfaces Get() method. If there is no new -// connection available in the pool, a new connection will be created via the -// Factory() method. -func (c *boundedPool) Get() (net.Conn, error) { - conns := c.getConns() - if conns == nil { - return nil, pool.ErrClosed - } - - // Try and grab a connection from the pool - select { - case conn := <-conns: - if conn == nil { - return nil, pool.ErrClosed - } - return c.wrapConn(conn), nil - default: - // Could not get connection, can we create a new one? - if atomic.LoadInt32(&c.total) < int32(cap(conns)) { - conn, err := c.factory() - if err != nil { - return nil, err - } - atomic.AddInt32(&c.total, 1) - - return c.wrapConn(conn), nil - } - } - - // The pool was empty and we couldn't create a new one to - // retry until one is free or we timeout - select { - case conn := <-conns: - if conn == nil { - return nil, pool.ErrClosed - } - return c.wrapConn(conn), nil - case <-time.After(c.timeout): - return nil, fmt.Errorf("timed out waiting for free connection") - } - -} - -// put puts the connection back to the pool. If the pool is full or closed, -// conn is simply closed. A nil conn will be rejected. -func (c *boundedPool) put(conn net.Conn) error { - if conn == nil { - return errors.New("connection is nil. rejecting") - } - - c.mu.Lock() - defer c.mu.Unlock() - - if c.conns == nil { - // pool is closed, close passed connection - return conn.Close() - } - - // put the resource back into the pool. If the pool is full, this will - // block and the default case will be executed. - select { - case c.conns <- conn: - return nil - default: - // pool is full, close passed connection - return conn.Close() - } -} - -func (c *boundedPool) Close() { - c.mu.Lock() - conns := c.conns - c.conns = nil - c.factory = nil - c.mu.Unlock() - - if conns == nil { - return - } - - close(conns) - for conn := range conns { - conn.Close() - } -} - -func (c *boundedPool) Len() int { return len(c.getConns()) } - -// newConn wraps a standard net.Conn to a poolConn net.Conn. -func (c *boundedPool) wrapConn(conn net.Conn) net.Conn { - p := &pooledConn{c: c} - p.Conn = conn - return p -} - -// pooledConn is a wrapper around net.Conn to modify the the behavior of -// net.Conn's Close() method. -type pooledConn struct { - net.Conn - c *boundedPool - unusable bool -} - -// Close() puts the given connects back to the pool instead of closing it. -func (p pooledConn) Close() error { - if p.unusable { - if p.Conn != nil { - return p.Conn.Close() - } - return nil - } - return p.c.put(p.Conn) -} - -// MarkUnusable() marks the connection not usable any more, to let the pool close it instead of returning it to pool. -func (p *pooledConn) MarkUnusable() { - p.unusable = true - atomic.AddInt32(&p.c.total, -1) -} diff -Nru influxdb-0.10.0+dfsg1/cluster/rpc.go influxdb-1.1.1+dfsg1/cluster/rpc.go --- influxdb-0.10.0+dfsg1/cluster/rpc.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/rpc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,221 +0,0 @@ -package cluster - -import ( - "fmt" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/influxdb/influxdb/cluster/internal" - "github.com/influxdb/influxdb/models" -) - -//go:generate protoc --gogo_out=. internal/data.proto - -// MapShardRequest represents the request to map a remote shard for a query. -type MapShardRequest struct { - pb internal.MapShardRequest -} - -// ShardID of the map request -func (m *MapShardRequest) ShardID() uint64 { return m.pb.GetShardID() } - -// Query returns the Shard map request's query -func (m *MapShardRequest) Query() string { return m.pb.GetQuery() } - -// ChunkSize returns Shard map request's chunk size -func (m *MapShardRequest) ChunkSize() int32 { return m.pb.GetChunkSize() } - -// SetShardID sets the map request's shard id -func (m *MapShardRequest) SetShardID(id uint64) { m.pb.ShardID = &id } - -// SetQuery sets the Shard map request's Query -func (m *MapShardRequest) SetQuery(query string) { m.pb.Query = &query } - -// SetChunkSize sets the Shard map request's chunk size -func (m *MapShardRequest) SetChunkSize(chunkSize int32) { m.pb.ChunkSize = &chunkSize } - -// MarshalBinary encodes the object to a binary format. -func (m *MapShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&m.pb) -} - -// UnmarshalBinary populates MapShardRequest from a binary format. -func (m *MapShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &m.pb); err != nil { - return err - } - return nil -} - -// MapShardResponse represents the response returned from a remote MapShardRequest call -type MapShardResponse struct { - pb internal.MapShardResponse -} - -// NewMapShardResponse returns the response returned from a remote MapShardRequest call -func NewMapShardResponse(code int, message string) *MapShardResponse { - m := &MapShardResponse{} - m.SetCode(code) - m.SetMessage(message) - return m -} - -// Code returns the Shard map response's code -func (r *MapShardResponse) Code() int { return int(r.pb.GetCode()) } - -// Message returns the the Shard map response's Message -func (r *MapShardResponse) Message() string { return r.pb.GetMessage() } - -// TagSets returns Shard map response's tag sets -func (r *MapShardResponse) TagSets() []string { return r.pb.GetTagSets() } - -// Fields returns the Shard map response's Fields -func (r *MapShardResponse) Fields() []string { return r.pb.GetFields() } - -// Data returns the Shard map response's Data -func (r *MapShardResponse) Data() []byte { return r.pb.GetData() } - -// SetCode sets the Shard map response's code -func (r *MapShardResponse) SetCode(code int) { r.pb.Code = proto.Int32(int32(code)) } - -// SetMessage sets Shard map response's message -func (r *MapShardResponse) SetMessage(message string) { r.pb.Message = &message } - -// SetTagSets sets Shard map response's tagsets -func (r *MapShardResponse) SetTagSets(tagsets []string) { r.pb.TagSets = tagsets } - -// SetFields sets the Shard map response's Fields -func (r *MapShardResponse) SetFields(fields []string) { r.pb.Fields = fields } - -// SetData sets the Shard map response's Data -func (r *MapShardResponse) SetData(data []byte) { r.pb.Data = data } - -// MarshalBinary encodes the object to a binary format. -func (r *MapShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&r.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (r *MapShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &r.pb); err != nil { - return err - } - return nil -} - -// WritePointsRequest represents a request to write point data to the cluster -type WritePointsRequest struct { - Database string - RetentionPolicy string - ConsistencyLevel ConsistencyLevel - Points []models.Point -} - -// AddPoint adds a point to the WritePointRequest with field key 'value' -func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - pt, err := models.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - ) - if err != nil { - return - } - w.Points = append(w.Points, pt) -} - -// WriteShardRequest represents the a request to write a slice of points to a shard -type WriteShardRequest struct { - pb internal.WriteShardRequest -} - -// WriteShardResponse represents the response returned from a remote WriteShardRequest call -type WriteShardResponse struct { - pb internal.WriteShardResponse -} - -// SetShardID sets the ShardID -func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id } - -// ShardID gets the ShardID -func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() } - -// Points returns the time series Points -func (w *WriteShardRequest) Points() []models.Point { return w.unmarshalPoints() } - -// AddPoint adds a new time series point -func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - pt, err := models.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - ) - if err != nil { - return - } - w.AddPoints([]models.Point{pt}) -} - -// AddPoints adds a new time series point -func (w *WriteShardRequest) AddPoints(points []models.Point) { - for _, p := range points { - b, err := p.MarshalBinary() - if err != nil { - // A error here means that we create a point higher in the stack that we could - // not marshal to a byte slice. If that happens, the endpoint that created that - // point needs to be fixed. - panic(fmt.Sprintf("failed to marshal point: `%v`: %v", p, err)) - } - w.pb.Points = append(w.pb.Points, b) - } -} - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} - -func (w *WriteShardRequest) unmarshalPoints() []models.Point { - points := make([]models.Point, len(w.pb.GetPoints())) - for i, p := range w.pb.GetPoints() { - pt, err := models.NewPointFromBytes(p) - if err != nil { - // A error here means that one node created a valid point and sent us an - // unparseable version. We could log and drop the point and allow - // anti-entropy to resolve the discrepancy, but this shouldn't ever happen. - panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err)) - } - - points[i] = pt - } - return points -} - -// SetCode sets the Code -func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } - -// SetMessage sets the Message -func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message } - -// Code returns the Code -func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) } - -// Message returns the Message -func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() } - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} diff -Nru influxdb-0.10.0+dfsg1/cluster/rpc_test.go influxdb-1.1.1+dfsg1/cluster/rpc_test.go --- influxdb-0.10.0+dfsg1/cluster/rpc_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/rpc_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -package cluster - -import ( - "testing" - "time" -) - -func TestWriteShardRequestBinary(t *testing.T) { - sr := &WriteShardRequest{} - - sr.SetShardID(uint64(1)) - if exp := uint64(1); sr.ShardID() != exp { - t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp) - } - - sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"}) - sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - b, err := sr.MarshalBinary() - if err != nil { - t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardRequest{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.ShardID() != sr.ShardID() { - t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID()) - } - - if len(got.Points()) != len(sr.Points()) { - t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points())) - } - - srPoints := sr.Points() - gotPoints := got.Points() - for i, p := range srPoints { - g := gotPoints[i] - - if g.Name() != p.Name() { - t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name()) - } - - if !g.Time().Equal(p.Time()) { - t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time()) - } - - if g.HashID() != p.HashID() { - t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID()) - } - - for k, v := range p.Tags() { - if g.Tags()[k] != v { - t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v) - } - } - - if len(p.Fields()) != len(g.Fields()) { - t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields())) - } - - for j, f := range p.Fields() { - if g.Fields()[j] != f { - t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f) - } - } - } -} - -func TestWriteShardResponseBinary(t *testing.T) { - sr := &WriteShardResponse{} - sr.SetCode(10) - sr.SetMessage("foo") - b, err := sr.MarshalBinary() - - if exp := 10; sr.Code() != exp { - t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp) - } - - if exp := "foo"; sr.Message() != exp { - t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp) - } - - if err != nil { - t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardResponse{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.Code() != sr.Code() { - t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code()) - } - - if got.Message() != sr.Message() { - t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message()) - } - -} diff -Nru influxdb-0.10.0+dfsg1/cluster/service.go influxdb-1.1.1+dfsg1/cluster/service.go --- influxdb-0.10.0+dfsg1/cluster/service.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/service.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -package cluster - -import ( - "encoding/binary" - "encoding/json" - "expvar" - "fmt" - "io" - "log" - "net" - "os" - "strings" - "sync" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// MaxMessageSize defines how large a message can be before we reject it -const MaxMessageSize = 1024 * 1024 * 1024 // 1GB - -// MuxHeader is the header byte used in the TCP mux. -const MuxHeader = 2 - -// Statistics maintained by the cluster package -const ( - writeShardReq = "writeShardReq" - writeShardPointsReq = "writeShardPointsReq" - writeShardFail = "writeShardFail" - mapShardReq = "mapShardReq" - mapShardResp = "mapShardResp" -) - -// Service processes data received over raw TCP connections. -type Service struct { - mu sync.RWMutex - - wg sync.WaitGroup - closing chan struct{} - - Listener net.Listener - - MetaClient interface { - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, policy string, shardID uint64) error - WriteToShard(shardID uint64, points []models.Point) error - CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) - } - - Logger *log.Logger - statMap *expvar.Map -} - -// NewService returns a new instance of Service. -func NewService(c Config) *Service { - return &Service{ - closing: make(chan struct{}), - Logger: log.New(os.Stderr, "[cluster] ", log.LstdFlags), - statMap: influxdb.NewStatistics("cluster", "cluster", nil), - } -} - -// Open opens the network listener and begins serving requests. -func (s *Service) Open() error { - - s.Logger.Println("Starting cluster service") - // Begin serving conections. - s.wg.Add(1) - go s.serve() - - return nil -} - -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l -} - -// serve accepts connections from the listener and handles them. -func (s *Service) serve() { - defer s.wg.Done() - - for { - // Check if the service is shutting down. - select { - case <-s.closing: - return - default: - } - - // Accept the next connection. - conn, err := s.Listener.Accept() - if err != nil { - if strings.Contains(err.Error(), "connection closed") { - s.Logger.Printf("cluster service accept error: %s", err) - return - } - s.Logger.Printf("accept error: %s", err) - continue - } - - // Delegate connection handling to a separate goroutine. - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.handleConn(conn) - }() - } -} - -// Close shuts down the listener and waits for all connections to finish. -func (s *Service) Close() error { - if s.Listener != nil { - s.Listener.Close() - } - - // Shut down all handlers. - close(s.closing) - s.wg.Wait() - - return nil -} - -// handleConn services an individual TCP connection. -func (s *Service) handleConn(conn net.Conn) { - // Ensure connection is closed when service is closed. - closing := make(chan struct{}) - defer close(closing) - go func() { - select { - case <-closing: - case <-s.closing: - } - conn.Close() - }() - - s.Logger.Printf("accept remote connection from %v\n", conn.RemoteAddr()) - defer func() { - s.Logger.Printf("close remote connection from %v\n", conn.RemoteAddr()) - }() - for { - // Read type-length-value. - typ, buf, err := ReadTLV(conn) - if err != nil { - if strings.HasSuffix(err.Error(), "EOF") { - return - } - s.Logger.Printf("unable to read type-length-value %s", err) - return - } - - // Delegate message processing by type. - switch typ { - case writeShardRequestMessage: - s.statMap.Add(writeShardReq, 1) - err := s.processWriteShardRequest(buf) - if err != nil { - s.Logger.Printf("process write shard error: %s", err) - } - s.writeShardResponse(conn, err) - case mapShardRequestMessage: - s.statMap.Add(mapShardReq, 1) - err := s.processMapShardRequest(conn, buf) - if err != nil { - s.Logger.Printf("process map shard error: %s", err) - if err := writeMapShardResponseMessage(conn, NewMapShardResponse(1, err.Error())); err != nil { - s.Logger.Printf("process map shard error writing response: %s", err.Error()) - } - } - default: - s.Logger.Printf("cluster service message type not found: %d", typ) - } - } -} - -func (s *Service) processWriteShardRequest(buf []byte) error { - // Build request - var req WriteShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - points := req.Points() - s.statMap.Add(writeShardPointsReq, int64(len(points))) - err := s.TSDBStore.WriteToShard(req.ShardID(), points) - - // We may have received a write for a shard that we don't have locally because the - // sending node may have just created the shard (via the metastore) and the write - // arrived before the local store could create the shard. In this case, we need - // to check the metastore to determine what database and retention policy this - // shard should reside within. - if err == tsdb.ErrShardNotFound { - - // Query the metastore for the owner of this shard - database, retentionPolicy, sgi := s.MetaClient.ShardOwner(req.ShardID()) - if sgi == nil { - // If we can't find it, then we need to drop this request - // as it is no longer valid. This could happen if writes were queued via - // hinted handoff and delivered after a shard group was deleted. - s.Logger.Printf("drop write request: shard=%d. shard group does not exist or was deleted", req.ShardID()) - return nil - } - - err = s.TSDBStore.CreateShard(database, retentionPolicy, req.ShardID()) - if err != nil { - return err - } - return s.TSDBStore.WriteToShard(req.ShardID(), points) - } - - if err != nil { - s.statMap.Add(writeShardFail, 1) - return fmt.Errorf("write shard %d: %s", req.ShardID(), err) - } - - return nil -} - -func (s *Service) writeShardResponse(w io.Writer, e error) { - // Build response. - var resp WriteShardResponse - if e != nil { - resp.SetCode(1) - resp.SetMessage(e.Error()) - } else { - resp.SetCode(0) - } - - // Marshal response to binary. - buf, err := resp.MarshalBinary() - if err != nil { - s.Logger.Printf("error marshalling shard response: %s", err) - return - } - - // Write to connection. - if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil { - s.Logger.Printf("write shard response error: %s", err) - } -} - -func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error { - // Decode request - var req MapShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - // Parse the statement. - q, err := influxql.ParseQuery(req.Query()) - if err != nil { - return fmt.Errorf("processing map shard: %s", err) - } else if len(q.Statements) != 1 { - return fmt.Errorf("processing map shard: expected 1 statement but got %d", len(q.Statements)) - } - - m, err := s.TSDBStore.CreateMapper(req.ShardID(), q.Statements[0], int(req.ChunkSize())) - if err != nil { - return fmt.Errorf("create mapper: %s", err) - } - if m == nil { - return writeMapShardResponseMessage(w, NewMapShardResponse(0, "")) - } - - if err := m.Open(); err != nil { - return fmt.Errorf("mapper open: %s", err) - } - defer m.Close() - - var metaSent bool - for { - var resp MapShardResponse - - if !metaSent { - resp.SetTagSets(m.TagSets()) - resp.SetFields(m.Fields()) - metaSent = true - } - - chunk, err := m.NextChunk() - if err != nil { - return fmt.Errorf("next chunk: %s", err) - } - - // NOTE: Even if the chunk is nil, we still need to send one - // empty response to let the other side know we're out of data. - - if chunk != nil { - b, err := json.Marshal(chunk) - if err != nil { - return fmt.Errorf("encoding: %s", err) - } - resp.SetData(b) - } - - // Write to connection. - resp.SetCode(0) - if err := writeMapShardResponseMessage(w, &resp); err != nil { - return err - } - s.statMap.Add(mapShardResp, 1) - - if chunk == nil { - // All mapper data sent. - return nil - } - } -} - -func writeMapShardResponseMessage(w io.Writer, msg *MapShardResponse) error { - buf, err := msg.MarshalBinary() - if err != nil { - return err - } - return WriteTLV(w, mapShardResponseMessage, buf) -} - -// ReadTLV reads a type-length-value record from r. -func ReadTLV(r io.Reader) (byte, []byte, error) { - var typ [1]byte - if _, err := io.ReadFull(r, typ[:]); err != nil { - return 0, nil, fmt.Errorf("read message type: %s", err) - } - - // Read the size of the message. - var sz int64 - if err := binary.Read(r, binary.BigEndian, &sz); err != nil { - return 0, nil, fmt.Errorf("read message size: %s", err) - } - - if sz == 0 { - return 0, nil, fmt.Errorf("invalid message size: %d", sz) - } - - if sz >= MaxMessageSize { - return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) - } - - // Read the value. - buf := make([]byte, sz) - if _, err := io.ReadFull(r, buf); err != nil { - return 0, nil, fmt.Errorf("read message value: %s", err) - } - - return typ[0], buf, nil -} - -// WriteTLV writes a type-length-value record to w. -func WriteTLV(w io.Writer, typ byte, buf []byte) error { - if _, err := w.Write([]byte{typ}); err != nil { - return fmt.Errorf("write message type: %s", err) - } - - // Write the size of the message. - if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { - return fmt.Errorf("write message size: %s", err) - } - - // Write the value. - if _, err := w.Write(buf); err != nil { - return fmt.Errorf("write message value: %s", err) - } - - return nil -} diff -Nru influxdb-0.10.0+dfsg1/cluster/service_test.go influxdb-1.1.1+dfsg1/cluster/service_test.go --- influxdb-0.10.0+dfsg1/cluster/service_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/service_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -package cluster_test - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" -) - -type metaClient struct { - host string -} - -func (m *metaClient) DataNode(nodeID uint64) (*meta.NodeInfo, error) { - return &meta.NodeInfo{ - ID: nodeID, - TCPHost: m.host, - }, nil -} - -type testService struct { - nodeID uint64 - ln net.Listener - muxln net.Listener - writeShardFunc func(shardID uint64, points []models.Point) error - createShardFunc func(database, policy string, shardID uint64) error - createMapperFunc func(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) -} - -func newTestWriteService(f func(shardID uint64, points []models.Point) error) testService { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - - mux := tcp.NewMux() - muxln := mux.Listen(cluster.MuxHeader) - go mux.Serve(ln) - - return testService{ - writeShardFunc: f, - ln: ln, - muxln: muxln, - } -} - -func (ts *testService) Close() { - if ts.ln != nil { - ts.ln.Close() - } -} - -type serviceResponses []serviceResponse -type serviceResponse struct { - shardID uint64 - ownerID uint64 - points []models.Point -} - -func (t testService) WriteToShard(shardID uint64, points []models.Point) error { - return t.writeShardFunc(shardID, points) -} - -func (t testService) CreateShard(database, policy string, shardID uint64) error { - return t.createShardFunc(database, policy, shardID) -} - -func (t testService) CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { - return t.createMapperFunc(shardID, stmt, chunkSize) -} - -func writeShardSuccess(shardID uint64, points []models.Point) error { - responses <- &serviceResponse{ - shardID: shardID, - points: points, - } - return nil -} - -func writeShardFail(shardID uint64, points []models.Point) error { - return fmt.Errorf("failed to write") -} - -func writeShardSlow(shardID uint64, points []models.Point) error { - time.Sleep(1 * time.Second) - return nil -} - -var responses = make(chan *serviceResponse, 1024) - -func (testService) ResponseN(n int) ([]*serviceResponse, error) { - var a []*serviceResponse - for { - select { - case r := <-responses: - a = append(a, r) - if len(a) == n { - return a, nil - } - case <-time.After(time.Second): - return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a)) - } - } -} diff -Nru influxdb-0.10.0+dfsg1/cluster/shard_mapper.go influxdb-1.1.1+dfsg1/cluster/shard_mapper.go --- influxdb-0.10.0+dfsg1/cluster/shard_mapper.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/shard_mapper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "math/rand" - "net" - "time" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// ShardMapper is responsible for providing mappers for requested shards. It is -// responsible for creating those mappers from the local store, or reaching -// out to another node on the cluster. -type ShardMapper struct { - ForceRemoteMapping bool // All shards treated as remote. Useful for testing. - - Node *influxdb.Node - - MetaClient interface { - DataNode(id uint64) (ni *meta.NodeInfo, err error) - } - - TSDBStore interface { - CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) - } - - timeout time.Duration - pool *clientPool -} - -// NewShardMapper returns a mapper of local and remote shards. -func NewShardMapper(timeout time.Duration) *ShardMapper { - return &ShardMapper{ - pool: newClientPool(), - timeout: timeout, - } -} - -// CreateMapper returns a Mapper for the given shard ID. -func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { - // Create a remote mapper if the local node doesn't own the shard. - if !sh.OwnedBy(s.Node.ID) || s.ForceRemoteMapping { - // Pick a node in a pseudo-random manner. - conn, err := s.dial(sh.Owners[rand.Intn(len(sh.Owners))].NodeID) - if err != nil { - return nil, err - } - conn.SetDeadline(time.Now().Add(s.timeout)) - - return NewRemoteMapper(conn, sh.ID, stmt, chunkSize), nil - } - - // If it is local then return the mapper from the store. - m, err := s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize) - if err != nil { - return nil, err - } - - return m, nil -} - -func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) { - ni, err := s.MetaClient.DataNode(nodeID) - if err != nil { - return nil, err - } - conn, err := net.Dial("tcp", ni.TCPHost) - if err != nil { - return nil, err - } - - // Write the cluster multiplexing header byte - conn.Write([]byte{MuxHeader}) - - return conn, nil -} - -// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node, -// sends a query, and interprets the stream of data that comes back. -type RemoteMapper struct { - shardID uint64 - stmt influxql.Statement - chunkSize int - - tagsets []string - fields []string - - conn net.Conn - bufferedResponse *MapShardResponse - - unmarshallers []tsdb.UnmarshalFunc // Mapping-specific unmarshal functions. -} - -// NewRemoteMapper returns a new remote mapper using the given connection. -func NewRemoteMapper(c net.Conn, shardID uint64, stmt influxql.Statement, chunkSize int) *RemoteMapper { - return &RemoteMapper{ - conn: c, - shardID: shardID, - stmt: stmt, - chunkSize: chunkSize, - } -} - -// Open connects to the remote node and starts receiving data. -func (r *RemoteMapper) Open() (err error) { - defer func() { - if err != nil { - r.conn.Close() - } - }() - - // Build Map request. - var request MapShardRequest - request.SetShardID(r.shardID) - request.SetQuery(r.stmt.String()) - request.SetChunkSize(int32(r.chunkSize)) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil { - return err - } - - // Read the response. - _, buf, err = ReadTLV(r.conn) - if err != nil { - return err - } - - // Unmarshal response. - r.bufferedResponse = &MapShardResponse{} - if err := r.bufferedResponse.UnmarshalBinary(buf); err != nil { - return err - } - - if r.bufferedResponse.Code() != 0 { - return fmt.Errorf("error code %d: %s", r.bufferedResponse.Code(), r.bufferedResponse.Message()) - } - - // Decode the first response to get the TagSets. - r.tagsets = r.bufferedResponse.TagSets() - r.fields = r.bufferedResponse.Fields() - - // Set up each mapping function for this statement. - if stmt, ok := r.stmt.(*influxql.SelectStatement); ok { - for _, c := range stmt.FunctionCalls() { - fn, err := tsdb.InitializeUnmarshaller(c) - if err != nil { - return err - } - r.unmarshallers = append(r.unmarshallers, fn) - } - } - - return nil -} - -// TagSets returns the TagSets -func (r *RemoteMapper) TagSets() []string { - return r.tagsets -} - -// Fields returns RemoteMapper's Fields -func (r *RemoteMapper) Fields() []string { - return r.fields -} - -// NextChunk returns the next chunk read from the remote node to the client. -func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) { - var response *MapShardResponse - if r.bufferedResponse != nil { - response = r.bufferedResponse - r.bufferedResponse = nil - } else { - response = &MapShardResponse{} - - // Read the response. - _, buf, err := ReadTLV(r.conn) - if err != nil { - return nil, err - } - - // Unmarshal response. - if err := response.UnmarshalBinary(buf); err != nil { - return nil, err - } - - if response.Code() != 0 { - return nil, fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - } - - if response.Data() == nil { - return nil, nil - } - - moj := &tsdb.MapperOutputJSON{} - if err := json.Unmarshal(response.Data(), moj); err != nil { - return nil, err - } - mvj := []*tsdb.MapperValueJSON{} - if err := json.Unmarshal(moj.Values, &mvj); err != nil { - return nil, err - } - - // Prep the non-JSON version of Mapper output. - mo := &tsdb.MapperOutput{ - Name: moj.Name, - Tags: moj.Tags, - Fields: moj.Fields, - CursorKey: moj.CursorKey, - } - - if len(mvj) == 1 && len(mvj[0].AggData) > 0 { - // The MapperValue is carrying aggregate data, so run it through the - // custom unmarshallers for the map functions through which the data - // was mapped. - aggValues := []interface{}{} - for i, b := range mvj[0].AggData { - v, err := r.unmarshallers[i](b) - if err != nil { - return nil, err - } - aggValues = append(aggValues, v) - } - mo.Values = []*tsdb.MapperValue{&tsdb.MapperValue{ - Time: mvj[0].Time, - Value: aggValues, - Tags: mvj[0].Tags, - }} - } else { - // Must be raw data instead. - for _, v := range mvj { - var rawValue interface{} - if err := json.Unmarshal(v.RawData, &rawValue); err != nil { - return nil, err - } - - mo.Values = append(mo.Values, &tsdb.MapperValue{ - Time: v.Time, - Value: rawValue, - Tags: v.Tags, - }) - } - } - - return mo, nil -} - -// Close the Mapper -func (r *RemoteMapper) Close() { - r.conn.Close() -} diff -Nru influxdb-0.10.0+dfsg1/cluster/shard_mapper_test.go influxdb-1.1.1+dfsg1/cluster/shard_mapper_test.go --- influxdb-0.10.0+dfsg1/cluster/shard_mapper_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/shard_mapper_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -package cluster - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "testing" - - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/tsdb" -) - -// remoteShardResponder implements the remoteShardConn interface. -type remoteShardResponder struct { - net.Conn - t *testing.T - rxBytes []byte - - buffer *bytes.Buffer -} - -func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *remoteShardResponder { - r := &remoteShardResponder{} - a := make([]byte, 0, 1024) - r.buffer = bytes.NewBuffer(a) - - // Pump the outputs in the buffer for later reading. - for _, o := range outputs { - resp := &MapShardResponse{} - resp.SetCode(0) - if o != nil { - d, _ := json.Marshal(o) - resp.SetData(d) - resp.SetTagSets(tagsets) - } - - g, _ := resp.MarshalBinary() - WriteTLV(r.buffer, mapShardResponseMessage, g) - } - - return r -} - -func (r remoteShardResponder) Close() error { return nil } -func (r remoteShardResponder) Read(p []byte) (n int, err error) { - return io.ReadFull(r.buffer, p) -} - -func (r remoteShardResponder) Write(p []byte) (n int, err error) { - if r.rxBytes == nil { - r.rxBytes = make([]byte, 0) - } - r.rxBytes = append(r.rxBytes, p...) - return len(p), nil -} - -// Ensure a RemoteMapper can process valid responses from a remote shard. -func TestShardWriter_RemoteMapper_Success(t *testing.T) { - expTagSets := []string{"tagsetA"} - expOutput := &tsdb.MapperOutput{ - Name: "cpu", - Tags: map[string]string{"host": "serverA"}, - } - - c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets) - - r := NewRemoteMapper(c, 1234, mustParseStmt("SELECT * FROM CPU"), 10) - if err := r.Open(); err != nil { - t.Fatalf("failed to open remote mapper: %s", err.Error()) - } - - if r.TagSets()[0] != expTagSets[0] { - t.Fatalf("incorrect tagsets received, exp %v, got %v", expTagSets, r.TagSets()) - } - - // Get first chunk from mapper. - chunk, err := r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - output, ok := chunk.(*tsdb.MapperOutput) - if !ok { - t.Fatal("chunk is not of expected type") - } - if output.Name != "cpu" { - t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output) - } - - // Next chunk should be nil, indicating no more data. - chunk, err = r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - if chunk != nil { - t.Fatal("received more chunks when none expected") - } -} - -// mustParseStmt parses a single statement or panics. -func mustParseStmt(stmt string) influxql.Statement { - q, err := influxql.ParseQuery(stmt) - if err != nil { - panic(err) - } else if len(q.Statements) != 1 { - panic(fmt.Sprintf("expected 1 statement but got %d", len(q.Statements))) - } - return q.Statements[0] -} diff -Nru influxdb-0.10.0+dfsg1/cluster/shard_writer.go influxdb-1.1.1+dfsg1/cluster/shard_writer.go --- influxdb-0.10.0+dfsg1/cluster/shard_writer.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/shard_writer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -package cluster - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" -) - -const ( - writeShardRequestMessage byte = iota + 1 - writeShardResponseMessage - mapShardRequestMessage - mapShardResponseMessage -) - -// ShardWriter writes a set of points to a shard. -type ShardWriter struct { - pool *clientPool - timeout time.Duration - maxConnections int - - MetaClient interface { - DataNode(id uint64) (ni *meta.NodeInfo, err error) - } -} - -// NewShardWriter returns a new instance of ShardWriter. -func NewShardWriter(timeout time.Duration, maxConnections int) *ShardWriter { - return &ShardWriter{ - pool: newClientPool(), - timeout: timeout, - maxConnections: maxConnections, - } -} - -// WriteShard writes time series points to a shard -func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []models.Point) error { - c, err := w.dial(ownerID) - if err != nil { - return err - } - - conn, ok := c.(*pooledConn) - if !ok { - panic("wrong connection type") - } - defer func(conn net.Conn) { - conn.Close() // return to pool - }(conn) - - // Build write request. - var request WriteShardRequest - request.SetShardID(shardID) - request.AddPoints(points) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - conn.SetWriteDeadline(time.Now().Add(w.timeout)) - if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil { - conn.MarkUnusable() - return err - } - - // Read the response. - conn.SetReadDeadline(time.Now().Add(w.timeout)) - _, buf, err = ReadTLV(conn) - if err != nil { - conn.MarkUnusable() - return err - } - - // Unmarshal response. - var response WriteShardResponse - if err := response.UnmarshalBinary(buf); err != nil { - return err - } - - if response.Code() != 0 { - return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - - return nil -} - -func (w *ShardWriter) dial(nodeID uint64) (net.Conn, error) { - // If we don't have a connection pool for that addr yet, create one - _, ok := w.pool.getPool(nodeID) - if !ok { - factory := &connFactory{nodeID: nodeID, clientPool: w.pool, timeout: w.timeout} - factory.metaClient = w.MetaClient - - p, err := NewBoundedPool(1, w.maxConnections, w.timeout, factory.dial) - if err != nil { - return nil, err - } - w.pool.setPool(nodeID, p) - } - return w.pool.conn(nodeID) -} - -// Close closes ShardWriter's pool -func (w *ShardWriter) Close() error { - if w.pool == nil { - return fmt.Errorf("client already closed") - } - w.pool.close() - w.pool = nil - return nil -} - -const ( - maxConnections = 500 - maxRetries = 3 -) - -var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections) - -type connFactory struct { - nodeID uint64 - timeout time.Duration - - clientPool interface { - size() int - } - - metaClient interface { - DataNode(id uint64) (ni *meta.NodeInfo, err error) - } -} - -func (c *connFactory) dial() (net.Conn, error) { - if c.clientPool.size() > maxConnections { - return nil, errMaxConnectionsExceeded - } - - ni, err := c.metaClient.DataNode(c.nodeID) - if err != nil { - return nil, err - } - - if ni == nil { - return nil, fmt.Errorf("node %d does not exist", c.nodeID) - } - - conn, err := net.DialTimeout("tcp", ni.TCPHost, c.timeout) - if err != nil { - return nil, err - } - - // Write a marker byte for cluster messages. - _, err = conn.Write([]byte{MuxHeader}) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} diff -Nru influxdb-0.10.0+dfsg1/cluster/shard_writer_test.go influxdb-1.1.1+dfsg1/cluster/shard_writer_test.go --- influxdb-0.10.0+dfsg1/cluster/shard_writer_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cluster/shard_writer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -package cluster_test - -import ( - "net" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/toml" -) - -// Ensure the shard writer can successful write a single request. -func TestShardWriter_WriteShard_Success(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute, 1) - w.MetaClient = &metaClient{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []models.Point - points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer can successful write a multiple requests. -func TestShardWriter_WriteShard_Multiple(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute, 1) - w.MetaClient = &metaClient{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []models.Point - points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard twice and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer returns an error when the server fails to accept the write. -func TestShardWriter_WriteShard_Error(t *testing.T) { - ts := newTestWriteService(writeShardFail) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute, 1) - w.MetaClient = &metaClient{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { - t.Fatalf("unexpected error: %v", err) - } -} - -// Ensure the shard writer returns an error when dialing times out. -func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Nanosecond, 1) - w.MetaClient = &metaClient{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { - t.Fatalf("expected error %v, to contain %s", err, exp) - } -} - -// Ensure the shard writer returns an error when reading times out. -func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - w := cluster.NewShardWriter(time.Millisecond, 1) - w.MetaClient = &metaClient{host: ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure the shard writer returns an error when we can't get a connection. -func TestShardWriter_Write_PoolMax(t *testing.T) { - ts := newTestWriteService(writeShardSlow) - s := cluster.NewService(cluster.Config{ - ShardWriterTimeout: toml.Duration(100 * time.Millisecond), - }) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(100*time.Millisecond, 1) - w.MetaClient = &metaClient{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - go w.WriteShard(shardID, ownerID, points) - time.Sleep(time.Millisecond) - if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "timed out waiting for free connection" { - t.Fatalf("unexpected error: %v", err) - } -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx/cli/cli.go influxdb-1.1.1+dfsg1/cmd/influx/cli/cli.go --- influxdb-0.10.0+dfsg1/cmd/influx/cli/cli.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx/cli/cli.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,4 +1,4 @@ -package cli +package cli // import "github.com/influxdata/influxdb/cmd/influx/cli" import ( "bytes" @@ -7,11 +7,11 @@ "errors" "fmt" "io" + "io/ioutil" "net" "net/url" "os" "os/signal" - "os/user" "path/filepath" "sort" "strconv" @@ -19,9 +19,11 @@ "syscall" "text/tabwriter" - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/importer/v8" + "golang.org/x/crypto/ssh/terminal" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/importer/v8" + "github.com/influxdata/influxdb/models" "github.com/peterh/liner" ) @@ -42,6 +44,7 @@ Password string Database string Ssl bool + UnsafeSsl bool RetentionPolicy string ClientVersion string ServerVersion string @@ -55,8 +58,10 @@ PPS int // Controls how many points per second the import will allow via throttling Path string Compressed bool + Chunked bool Quit chan struct{} IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing) + ForceTTY bool // Force the CLI to act as if it were connected to a TTY osSignals chan os.Signal historyFilePath string } @@ -72,10 +77,7 @@ // Run executes the CLI func (c *CommandLine) Run() error { - // register OS signals for graceful termination - if !c.IgnoreSignals { - signal.Notify(c.osSignals, os.Kill, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) - } + hasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd())) var promptForPassword bool // determine if they set the password flag but provided no value @@ -87,18 +89,29 @@ } } - c.Line = liner.NewLiner() - defer c.Line.Close() - - c.Line.SetMultiLineMode(true) + // Check if we will be able to prompt for the password later. + if promptForPassword && !hasTTY { + return errors.New("Unable to prompt for a password with no TTY.") + } + // Read environment variables for username/password. + if c.Username == "" { + c.Username = os.Getenv("INFLUX_USERNAME") + } + // If we are going to be prompted for a password, always use the entered password. if promptForPassword { - p, e := c.Line.PasswordPrompt("password: ") + // Open the liner (temporarily) and prompt for the password. + p, e := func() (string, error) { + l := liner.NewLiner() + defer l.Close() + return l.PasswordPrompt("password: ") + }() if e != nil { - fmt.Println("Unable to parse password.") - } else { - c.Password = p + return errors.New("Unable to parse password") } + c.Password = p + } else if c.Password == "" { + c.Password = os.Getenv("INFLUX_PASSWORD") } if err := c.Connect(""); err != nil { @@ -110,17 +123,6 @@ // Modify precision. c.SetPrecision(c.Precision) - if c.Execute == "" && !c.Import { - token, err := c.DatabaseToken() - if err != nil { - return fmt.Errorf("Failed to check token: %s", err.Error()) - } - if token == "" { - fmt.Printf(noTokenMsg) - } - fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) - } - if c.Execute != "" { // Make the non-interactive mode send everything through the CLI's parser // the same way the interactive mode works @@ -130,8 +132,6 @@ return err } } - - c.Line.Close() return nil } @@ -157,19 +157,44 @@ i := v8.NewImporter(config) if err := i.Import(); err != nil { err = fmt.Errorf("ERROR: %s\n", err) - c.Line.Close() return err } - c.Line.Close() return nil } + if !hasTTY { + cmd, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + return c.ExecuteQuery(string(cmd)) + } + + if !c.IgnoreSignals { + // register OS signals for graceful termination + signal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM) + } + + c.Line = liner.NewLiner() + defer c.Line.Close() + + c.Line.SetMultiLineMode(true) + + token, err := c.DatabaseToken() + if err != nil { + return fmt.Errorf("Failed to check token: %s", err.Error()) + } + if token == "" { + fmt.Printf(noTokenMsg) + } + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + c.Version() - usr, err := user.Current() - // Only load/write history if we can get the user - if err == nil { - c.historyFilePath = filepath.Join(usr.HomeDir, ".influx_history") + // Only load/write history if HOME environment variable is set. + if homeDir := os.Getenv("HOME"); homeDir != "" { + // Attempt to load the history file. + c.historyFilePath = filepath.Join(homeDir, ".influx_history") if historyFile, err := os.Open(c.historyFilePath); err == nil { c.Line.ReadHistory(historyFile) historyFile.Close() @@ -177,10 +202,16 @@ } // read from prompt until exit is run + return c.mainLoop() +} + +// mainLoop runs the main prompt loop for the CLI. +func (c *CommandLine) mainLoop() error { for { select { case <-c.osSignals: - close(c.Quit) + c.exit() + return nil case <-c.Quit: c.exit() return nil @@ -190,9 +221,10 @@ // Instead of die, register that someone exited the program gracefully l = "exit" } else if e != nil { - break + c.exit() + return e } - if err := c.ParseCommand(l); err != ErrBlankCommand { + if err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), "auth") { c.Line.AppendHistory(l) c.saveHistory() } @@ -208,7 +240,6 @@ if len(tokens) > 0 { switch tokens[0] { case "exit", "quit": - // signal the program to exit close(c.Quit) case "gopher": c.gopher() @@ -273,6 +304,7 @@ config.Password = c.Password config.UserAgent = "InfluxDBShell/" + c.ClientVersion config.Precision = c.Precision + config.UnsafeSsl = c.UnsafeSsl cl, err := client.NewClient(config) if err != nil { return fmt.Errorf("Could not create client %s", err) @@ -284,6 +316,13 @@ return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr()) } c.ServerVersion = v + // Update the command with the current connection information + if h, p, err := net.SplitHostPort(config.URL.Host); err == nil { + c.Host = h + if i, err := strconv.Atoi(p); err == nil { + c.Port = i + } + } return nil } @@ -329,41 +368,46 @@ } d := args[1] - // validate if specified database exists + // Validate if specified database exists response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"}) if err != nil { fmt.Printf("ERR: %s\n", err) return - } - - if err := response.Error(); err != nil { - fmt.Printf("ERR: %s\n", err) - return - } - - // verify the provided database exists - databaseExists := func() bool { - for _, result := range response.Results { - for _, row := range result.Series { - if row.Name == "databases" { - for _, values := range row.Values { - for _, database := range values { - if database == d { - return true + } else if err := response.Error(); err != nil { + if c.Username == "" { + fmt.Printf("ERR: %s\n", err) + return + } + // TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397. + // If we are unable to run SHOW DATABASES, display a warning and use the + // database anyway in case the person doesn't have permission to run the + // command, but does have permission to use the database. + fmt.Printf("WARN: %s\n", err) + } else { + // Verify the provided database exists + if databaseExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + if row.Name == "databases" { + for _, values := range row.Values { + for _, database := range values { + if database == d { + return true + } } } } } } + return false + }(); !databaseExists { + fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", d) + return } - return false - }() - if databaseExists { - c.Database = d - fmt.Printf("Using database %s\n", d) - } else { - fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", d) } + + c.Database = d + fmt.Printf("Using database %s\n", d) } // SetPrecision sets client precision @@ -407,7 +451,7 @@ // normalize cmd cmd = strings.ToLower(cmd) - _, err := cluster.ParseConsistencyLevel(cmd) + _, err := models.ParseConsistencyLevel(cmd) if err != nil { fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) return @@ -501,7 +545,7 @@ }, Database: c.Database, RetentionPolicy: c.RetentionPolicy, - Precision: "n", + Precision: c.Precision, WriteConsistency: c.WriteConsistency, }) if err != nil { @@ -516,9 +560,18 @@ return nil } +// query creates a query struct to be used with the client. +func (c *CommandLine) query(query string, database string) client.Query { + return client.Query{ + Command: query, + Database: database, + Chunked: true, + } +} + // ExecuteQuery runs any query statement func (c *CommandLine) ExecuteQuery(query string) error { - response, err := c.Client.Query(client.Query{Command: query, Database: c.Database}) + response, err := c.Client.Query(c.query(query, c.Database)) if err != nil { fmt.Printf("ERR: %s\n", err) return err @@ -537,11 +590,12 @@ // DatabaseToken retrieves database token func (c *CommandLine) DatabaseToken() (string, error) { - response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"}) + response, err := c.Client.Query(c.query("SHOW DIAGNOSTICS for 'registration'", "")) if err != nil { return "", err } - if response.Error() != nil || len((*response).Results[0].Series) == 0 { + + if response.Error() != nil || len(response.Results) == 0 || len(response.Results[0].Series) == 0 { return "", nil } @@ -596,22 +650,27 @@ } func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { + // Create a tabbed writer for each result as they won't always line up + writer := new(tabwriter.Writer) + writer.Init(w, 0, 8, 1, '\t', 0) + for _, result := range response.Results { - // Create a tabbed writer for each result a they won't always line up - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) + // Print out all messages first + for _, m := range result.Messages { + fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text) + } csv := c.formatResults(result, "\t") for _, r := range csv { - fmt.Fprintln(w, r) + fmt.Fprintln(writer, r) } - w.Flush() + writer.Flush() } } // formatResults will behave differently if you are formatting for columns or csv func (c *CommandLine) formatResults(result client.Result, separator string) []string { rows := []string{} - // Create a tabbed writer for each result a they won't always line up + // Create a tabbed writer for each result as they won't always line up for i, row := range result.Series { // gather tags tags := []string{} @@ -642,15 +701,11 @@ rows = append(rows, "") } - // If we are column format, we break out the name/tag to seperate lines + // If we are column format, we break out the name/tag to separate lines if c.Format == "column" { if row.Name != "" { n := fmt.Sprintf("name: %s", row.Name) rows = append(rows, n) - if len(tags) == 0 { - l := strings.Repeat("-", len(n)) - rows = append(rows, l) - } } if len(tags) > 0 { t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) @@ -660,8 +715,8 @@ rows = append(rows, strings.Join(columnNames, separator)) - // if format is column, break tags to their own line/format - if c.Format == "column" && len(tags) > 0 { + // if format is column, write dashes under each column + if c.Format == "column" { lines := []string{} for _, columnName := range columnNames { lines = append(lines, strings.Repeat("-", len(columnName))) @@ -685,7 +740,7 @@ } rows = append(rows, strings.Join(values, separator)) } - // Outout a line separator if in column format + // Output a line separator if in column format if c.Format == "column" { rows = append(rows, "") } @@ -711,7 +766,7 @@ // Settings prints current settings func (c *CommandLine) Settings() { w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) + w.Init(os.Stdout, 0, 1, 1, '\t', 0) if c.Port > 0 { fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) } else { @@ -746,7 +801,7 @@ show field keys show field key information A full list of influxql commands can be found at: - https://influxdb.com/docs/v0.9/query_language/spec.html + https://docs.influxdata.com/influxdb/latest/query_language/spec/ `) } @@ -824,7 +879,7 @@ // Version prints CLI version func (c *CommandLine) Version() { - fmt.Println("InfluxDB shell " + c.ClientVersion) + fmt.Println("InfluxDB shell version:", c.ClientVersion) } func (c *CommandLine) exit() { diff -Nru influxdb-0.10.0+dfsg1/cmd/influx/cli/cli_test.go influxdb-1.1.1+dfsg1/cmd/influx/cli/cli_test.go --- influxdb-0.10.0+dfsg1/cmd/influx/cli/cli_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx/cli/cli_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -3,6 +3,7 @@ import ( "bufio" "bytes" + "fmt" "io" "net" "net/http" @@ -12,9 +13,9 @@ "strings" "testing" - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cmd/influx/cli" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" + "github.com/influxdata/influxdb/influxql" "github.com/peterh/liner" ) @@ -47,6 +48,7 @@ c.Host = h c.Port, _ = strconv.Atoi(p) c.IgnoreSignals = true + c.ForceTTY = true go func() { close(c.Quit) }() @@ -68,6 +70,7 @@ c.Precision = "ms" c.Execute = "INSERT sensor,floor=1 value=2" c.IgnoreSignals = true + c.ForceTTY = true if err := c.Run(); err != nil { t.Fatalf("Run failed with error: %s", err) } @@ -194,7 +197,7 @@ // assert connection is established if err := c.ParseCommand(cmd); err != nil { - t.Fatalf("There was an error while connecting to %s: %s", u.Path, err) + t.Fatalf("There was an error while connecting to %v: %v", u.Path, err) } // assert server version is populated @@ -272,7 +275,6 @@ if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } - m := cli.CommandLine{Client: c} tests := []struct { cmd string @@ -286,6 +288,7 @@ } for _, test := range tests { + m := cli.CommandLine{Client: c} if err := m.ParseCommand(test.cmd); err != nil { t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) } @@ -296,6 +299,59 @@ } } +func TestParseCommand_UseAuth(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + tests := []struct { + cmd string + user string + database string + }{ + { + cmd: "use db", + user: "admin", + database: "db", + }, + { + cmd: "use blank", + user: "admin", + database: "", + }, + { + cmd: "use db", + user: "anonymous", + database: "db", + }, + { + cmd: "use blank", + user: "anonymous", + database: "blank", + }, + } + + for i, tt := range tests { + config := client.Config{URL: *u, Username: tt.user} + fmt.Println("using auth:", tt.user) + c, err := client.NewClient(config) + if err != nil { + t.Errorf("%d. unexpected error. expected %v, actual %v", i, nil, err) + continue + } + m := cli.CommandLine{Client: c, Username: tt.user} + + if err := m.ParseCommand(tt.cmd); err != nil { + t.Fatalf(`%d. Got error %v for command %q, expected nil.`, i, err, tt.cmd) + } + + if m.Database != tt.database { + t.Fatalf(`%d. Command "use" changed database to %q. Expected %q`, i, m.Database, tt.database) + } + } +} + func TestParseCommand_Consistency(t *testing.T) { t.Parallel() c := cli.CommandLine{} @@ -490,6 +546,14 @@ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Influxdb-Version", SERVER_VERSION) + // Fake authorization entirely based on the username. + authorized := false + user, _, _ := r.BasicAuth() + switch user { + case "", "admin": + authorized = true + } + switch r.URL.Path { case "/query": values := r.URL.Query() @@ -503,7 +567,12 @@ switch stmt.(type) { case *influxql.ShowDatabasesStatement: - io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`) + if authorized { + io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`) + } else { + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, fmt.Sprintf(`{"error":"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege"}`, user)) + } case *influxql.ShowDiagnosticsStatement: io.WriteString(w, `{"results":[{}]}`) } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx/main.go influxdb-1.1.1+dfsg1/cmd/influx/main.go --- influxdb-0.10.0+dfsg1/cmd/influx/main.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx/main.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,13 +5,13 @@ "fmt" "os" - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cmd/influx/cli" + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" ) // These variables are populated via the Go linker. var ( - version = "0.9" + version string ) const ( @@ -26,6 +26,13 @@ defaultPPS = 0 ) +func init() { + // If version is not set, make that clear. + if version == "" { + version = "unknown" + } +} + func main() { c := cli.New(version) @@ -36,9 +43,10 @@ fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`) fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") + fs.BoolVar(&c.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.") fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") fs.StringVar(&c.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") - fs.StringVar(&c.WriteConsistency, "consistency", "any", "Set write consistency level: any, one, quorum, or all.") + fs.StringVar(&c.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.") fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") @@ -64,6 +72,8 @@ Username to connect to the server. -ssl Use https for requests. + -unsafeSsl + Set this when connecting to the cluster using https and not use SSL verification. -execute 'command' Execute command and quit. -format 'json|csv|column' diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/backup/backup.go influxdb-1.1.1+dfsg1/cmd/influxd/backup/backup.go --- influxdb-0.10.0+dfsg1/cmd/influxd/backup/backup.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/backup/backup.go 2016-12-06 21:36:15.000000000 +0000 @@ -15,8 +15,8 @@ "strings" "time" - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/tcp" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/tcp" ) const ( @@ -233,7 +233,7 @@ return err } - magic := btou64(binData[:8]) + magic := binary.BigEndian.Uint64(binData[:8]) if magic != snapshotter.BackupMagicHeader { cmd.Logger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)") return errors.New("invalid metadata received") @@ -273,6 +273,16 @@ } } + f, err := os.Stat(tmppath) + if err != nil { + return err + } + + // There was nothing downloaded, don't create an empty backup file. + if f.Size() == 0 { + return os.Remove(tmppath) + } + // Rename temporary file to final path. if err := os.Rename(tmppath, path); err != nil { return fmt.Errorf("rename: %s", err) @@ -335,22 +345,21 @@ // printUsage prints the usage message to STDERR. func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stdout, `usage: influxd backup [flags] PATH + fmt.Fprintf(cmd.Stdout, `Downloads a snapshot of a data node and saves it to disk. -Backup downloads a snapshot of a data node and saves it to disk. +Usage: influxd backup [flags] PATH -Options: - -host - The host to connect to snapshot. Defaults to 127.0.0.1:8088. - -database - The database to backup. - -retention - Optional. The retention policy to backup. - -shard - Optional. The shard id to backup. If specified, retention is required. - -since <2015-12-24T08:12:23> - Optional. Do an incremental backup since the passed in RFC3339 - formatted time. + -host + The host to connect to snapshot. Defaults to 127.0.0.1:8088. + -database + The database to backup. + -retention + Optional. The retention policy to backup. + -shard + Optional. The shard id to backup. If specified, retention is required. + -since <2015-12-24T08:12:23> + Optional. Do an incremental backup since the passed in RFC3339 + formatted time. `) } @@ -358,14 +367,10 @@ // retentionAndShardFromPath will take the shard relative path and split it into the // retention policy name and shard ID. The first part of the path should be the database name. func retentionAndShardFromPath(path string) (retention, shard string, err error) { - a := strings.Split(path, "/") + a := strings.Split(path, string(filepath.Separator)) if len(a) != 3 { return "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) } return a[1], a[2], nil } - -func btou64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/help/help.go influxdb-1.1.1+dfsg1/cmd/influxd/help/help.go --- influxdb-0.10.0+dfsg1/cmd/influxd/help/help.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/help/help.go 2016-12-06 21:36:15.000000000 +0000 @@ -28,19 +28,18 @@ const usage = ` Configure and start an InfluxDB server. -Usage: - - influxd [[command] [arguments]] +Usage: influxd [[command] [arguments]] The commands are: backup downloads a snapshot of a data node and saves it to disk config display the default configuration + help display this help message restore uses a snapshot of a data node to rebuild a cluster run run node with existing configuration version displays the InfluxDB version "run" is the default command. -Use "influxd help [command]" for more information about a command. +Use "influxd [command] -help" for more information about a command. ` diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/main.go influxdb-1.1.1+dfsg1/cmd/influxd/main.go --- influxdb-0.10.0+dfsg1/cmd/influxd/main.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/main.go 2016-12-06 21:36:15.000000000 +0000 @@ -8,35 +8,34 @@ "math/rand" "os" "os/signal" - "strings" "syscall" "time" - "github.com/influxdb/influxdb/cmd/influxd/backup" - "github.com/influxdb/influxdb/cmd/influxd/help" - "github.com/influxdb/influxdb/cmd/influxd/restore" - "github.com/influxdb/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/help" + "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/run" ) // These variables are populated via the Go linker. var ( - version = "0.9" - commit string - branch string - buildTime string + version string + commit string + branch string ) func init() { // If commit, branch, or build time are not set, make that clear. + if version == "" { + version = "unknown" + } if commit == "" { commit = "unknown" } if branch == "" { branch = "unknown" } - if buildTime == "" { - buildTime = "unknown" - } } func main() { @@ -70,7 +69,7 @@ // Run determines and runs the command specified by the CLI args. func (m *Main) Run(args ...string) error { - name, args := ParseCommandName(args) + name, args := cmd.ParseCommandName(args) // Extract name from args. switch name { @@ -81,7 +80,6 @@ cmd.Version = version cmd.Commit = commit cmd.Branch = branch - cmd.BuildTime = buildTime if err := cmd.Run(args...); err != nil { return fmt.Errorf("run: %s", err) @@ -143,32 +141,6 @@ return nil } -// ParseCommandName extracts the command name and args from the args list. -func ParseCommandName(args []string) (string, []string) { - // Retrieve command name as first argument. - var name string - if len(args) > 0 && !strings.HasPrefix(args[0], "-") { - name = args[0] - } - - // Special case -h immediately following binary name - if len(args) > 0 && args[0] == "-h" { - name = "help" - } - - // If command is "help" and has an argument then rewrite args to use "-h". - if name == "help" && len(args) > 1 { - args[0], args[1] = args[1], "-h" - name = args[0] - } - - // If a named command is specified then return it with its arguments. - if name != "" { - return name, args[1:] - } - return "", args -} - // VersionCommand represents the command executed by "influxd version". type VersionCommand struct { Stdout io.Writer @@ -187,19 +159,18 @@ func (cmd *VersionCommand) Run(args ...string) error { // Parse flags in case -h is specified. fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) } + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, versionUsage) } if err := fs.Parse(args); err != nil { return err } // Print version info. - fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s, built %s)\n", version, branch, commit, buildTime) + fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) return nil } -var versionUsage = ` -usage: version +var versionUsage = `Displays the InfluxDB version, build branch and git commit hash. - version displays the InfluxDB version, build branch and git commit hash +Usage: influxd version ` diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/restore/restore.go influxdb-1.1.1+dfsg1/cmd/influxd/restore/restore.go --- influxdb-0.10.0+dfsg1/cmd/influxd/restore/restore.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/restore/restore.go 2016-12-06 21:36:15.000000000 +0000 @@ -9,16 +9,15 @@ "fmt" "io" "io/ioutil" - "log" "net" "os" "path/filepath" "strconv" "sync" - "github.com/influxdb/influxdb/cmd/influxd/backup" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/snapshotter" ) // Command represents the program execution for "influxd restore". @@ -52,11 +51,6 @@ return err } - if err := cmd.ensureStopped(); err != nil { - fmt.Fprintln(cmd.Stderr, "influxd cannot be running during a restore. Please stop any running instances and try again.") - return err - } - if cmd.metadir != "" { if err := cmd.unpackMeta(); err != nil { return err @@ -119,15 +113,6 @@ return nil } -func (cmd *Command) ensureStopped() error { - ln, err := net.Listen("tcp", cmd.MetaConfig.BindAddress) - if err != nil { - return fmt.Errorf("influxd running on %s: aborting.", cmd.MetaConfig.BindAddress) - } - defer ln.Close() - return nil -} - // unpackMeta reads the metadata from the backup directory and initializes a raft // cluster and replaces the root metadata. func (cmd *Command) unpackMeta() error { @@ -159,20 +144,20 @@ var i int // Make sure the file is actually a meta store backup file - magic := btou64(b[:8]) + magic := binary.BigEndian.Uint64(b[:8]) if magic != snapshotter.BackupMagicHeader { return fmt.Errorf("invalid metadata file") } i += 8 // Size of the meta store bytes - length := int(btou64(b[i : i+8])) + length := int(binary.BigEndian.Uint64(b[i : i+8])) i += 8 metaBytes := b[i : i+length] i += int(length) // Size of the node.json bytes - length = int(btou64(b[i : i+8])) + length = int(binary.BigEndian.Uint64(b[i : i+8])) i += 8 nodeBytes := b[i:] @@ -184,8 +169,7 @@ // Copy meta config and remove peers so it starts in single mode. c := cmd.MetaConfig - c.JoinPeers = nil - c.LoggingEnabled = false + c.Dir = cmd.metadir // Create the meta dir if os.MkdirAll(c.Dir, 0700); err != nil { @@ -197,25 +181,8 @@ return err } - // Initialize meta store. - store := meta.NewService(c) - store.RaftListener = newNopListener() - - // Open the meta store. - if err := store.Open(); err != nil { - return fmt.Errorf("open store: %s", err) - } - defer store.Close() - - // Wait for the store to be ready or error. - select { - case err := <-store.Err(): - return err - default: - } - - client := meta.NewClient([]string{store.HTTPAddr()}, false) - client.SetLogger(log.New(ioutil.Discard, "", 0)) + client := meta.NewClient(c) + client.SetLogOutput(ioutil.Discard) if err := client.Open(); err != nil { return err } @@ -225,6 +192,25 @@ if err := client.SetData(&data); err != nil { return fmt.Errorf("set data: %s", err) } + + // remove the raft.db file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "raft.db")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // remove the node.json file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "node.json")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + return nil } @@ -345,27 +331,26 @@ // printUsage prints the usage message to STDERR. func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stdout, `usage: influxd restore [flags] PATH - -Restore uses backups from the PATH to restore the metastore, databases, + fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be -running during restore. +running during a restore. + +Usage: influxd restore [flags] PATH -Options: - -metadir - Optional. If set the metastore will be recovered to the given path. - -datadir - Optional. If set the restore process will recover the specified - database, retention policy or shard to the given directory. - -database - Optional. Required if no metadir given. Will restore the database - TSM files. - -retention - Optional. If given, database is required. Will restore the retention policy's - TSM files. - -shard - Optional. If given, database and retention are required. Will restore the shard's - TSM files. + -metadir + Optional. If set the metastore will be recovered to the given path. + -datadir + Optional. If set the restore process will recover the specified + database, retention policy or shard to the given directory. + -database + Optional. Required if no metadir given. Will restore the database + TSM files. + -retention + Optional. If given, database is required. Will restore the retention policy's + TSM files. + -shard + Optional. If given, database and retention are required. Will restore the shard's + TSM files. `) } @@ -399,14 +384,3 @@ } func (ln *nopListener) Addr() net.Addr { return &net.TCPAddr{} } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -func btou64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/backup_restore_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/backup_restore_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/backup_restore_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/backup_restore_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -8,17 +8,17 @@ "testing" "time" - "github.com/influxdb/influxdb/cmd/influxd/backup" - "github.com/influxdb/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/restore" ) func TestServer_BackupAndRestore(t *testing.T) { + t.Skip("currently fails intermittently. See issue https://github.com/influxdata/influxdb/issues/6590") config := NewConfig() config.Data.Engine = "tsm1" config.Data.Dir, _ = ioutil.TempDir("", "data_backup") config.Meta.Dir, _ = ioutil.TempDir("", "meta_backup") - config.Meta.BindAddress = freePort() - config.Meta.HTTPBindAddress = freePort() + config.BindAddress = freePort() backupDir, _ := ioutil.TempDir("", "backup") defer os.RemoveAll(backupDir) @@ -31,17 +31,19 @@ config.Data.CacheSnapshotMemorySize = 1 func() { - s := OpenServer(config, "") + s := OpenServer(config) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicyInfo(rp, 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(db, rp); err != nil { t.Fatal(err) } - s.MustWrite(db, rp, "myseries,host=A value=23 1000000", nil) + if _, err := s.Write(db, rp, "myseries,host=A value=23 1000000", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } // wait for the snapshot to write time.Sleep(time.Second) @@ -56,8 +58,13 @@ // now backup cmd := backup.NewCommand() - if err := cmd.Run("-host", config.Meta.BindAddress, "-database", "mydb", backupDir); err != nil { - t.Fatalf("error backing up: %s", err.Error()) + _, port, err := net.SplitHostPort(config.BindAddress) + if err != nil { + t.Fatal(err) + } + hostAddress := net.JoinHostPort("localhost", port) + if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil { + t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress) } }() @@ -71,7 +78,6 @@ // restore cmd := restore.NewCommand() - cmd.MetaConfig.BindAddress = config.Meta.BindAddress if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil { t.Fatalf("error restoring: %s", err.Error()) @@ -84,7 +90,7 @@ } // now open it up and verify we're good - s := OpenServer(config, "") + s := OpenServer(config) defer s.Close() res, err := s.Query(`select * from "mydb"."forever"."myseries"`) diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/command.go influxdb-1.1.1+dfsg1/cmd/influxd/run/command.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/command.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/command.go 2016-12-06 21:36:15.000000000 +0000 @@ -10,11 +10,7 @@ "path/filepath" "runtime" "strconv" - "strings" "time" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb" ) const logo = ` @@ -68,12 +64,16 @@ // Print sweet InfluxDB logo. fmt.Print(logo) + // Configure default logging. + log.SetPrefix("[run] ") + log.SetFlags(log.LstdFlags) + // Set parallelism. runtime.GOMAXPROCS(runtime.NumCPU()) // Mark start-up in log. - log.Printf("InfluxDB starting, version %s, branch %s, commit %s, built %s", - cmd.Version, cmd.Branch, cmd.Commit, cmd.BuildTime) + log.Printf("InfluxDB starting, version %s, branch %s, commit %s", + cmd.Version, cmd.Branch, cmd.Commit) log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)) // Write the PID file. @@ -81,11 +81,8 @@ return fmt.Errorf("write pid file: %s", err) } - // Turn on block profiling to debug stuck databases - runtime.SetBlockProfileRate(int(1 * time.Second)) - // Parse config - config, err := cmd.ParseConfig(options.ConfigPath) + config, err := cmd.ParseConfig(options.GetConfigPath()) if err != nil { return fmt.Errorf("parse config: %s", err) } @@ -95,20 +92,16 @@ return fmt.Errorf("apply env config: %v", err) } - // If we have a node ID, ignore the join argument - // We are not using the reference to this node var, just checking - // to see if we have a node ID on disk - if node, _ := influxdb.LoadNode(config.Meta.Dir, []string{config.Meta.HTTPBindAddress}); node == nil || node.ID == 0 { - if options.Join != "" { - config.Meta.JoinPeers = strings.Split(options.Join, ",") - } - } - // Validate the configuration. if err := config.Validate(); err != nil { return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) } + if config.HTTPD.PprofEnabled { + // Turn on block profiling to debug stuck databases + runtime.SetBlockProfileRate(int(1 * time.Second)) + } + // Create server from config and start it. buildInfo := &BuildInfo{ Version: cmd.Version, @@ -161,7 +154,8 @@ fs := flag.NewFlagSet("", flag.ContinueOnError) fs.StringVar(&options.ConfigPath, "config", "", "") fs.StringVar(&options.PIDFile, "pidfile", "", "") - fs.StringVar(&options.Join, "join", "", "") + // Ignore hostname option. + _ = fs.String("hostname", "", "") fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") fs.StringVar(&options.MemProfile, "memprofile", "", "") fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } @@ -205,39 +199,64 @@ log.Printf("Using configuration at: %s\n", path) config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { + if err := config.FromTomlFile(path); err != nil { return nil, err } return config, nil } -var usage = `usage: run [flags] - -run starts the InfluxDB server. If this is the first time running the command -then a new cluster will be initialized unless the -join argument is used. - - -config - Set the path to the configuration file. +var usage = `Runs the InfluxDB server. - -join - Joins the server to an existing cluster. Should be the HTTP bind address of an existing meta server +Usage: influxd run [flags] - -pidfile - Write process ID to a file. - - -cpuprofile - Write CPU profiling information to a file. - - -memprofile - Write memory usage information to a file. + -config + Set the path to the configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). + -pidfile + Write process ID to a file. + -cpuprofile + Write CPU profiling information to a file. + -memprofile + Write memory usage information to a file. ` // Options represents the command line options that can be parsed. type Options struct { ConfigPath string PIDFile string - Join string CPUProfile string MemProfile string } + +// GetConfigPath returns the config path from the options. +// It will return a path by searching in this order: +// 1. The CLI option in ConfigPath +// 2. The environment variable INFLUXDB_CONFIG_PATH +// 3. The first influxdb.conf file on the path: +// - ~/.influxdb +// - /etc/influxdb +func (opt *Options) GetConfigPath() string { + if opt.ConfigPath != "" { + if opt.ConfigPath == os.DevNull { + return "" + } + return opt.ConfigPath + } else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" { + return envVar + } + + for _, path := range []string{ + os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"), + "/etc/influxdb/influxdb.conf", + } { + if _, err := os.Stat(path); err == nil { + return path + } + } + return "" +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/config_command.go influxdb-1.1.1+dfsg1/cmd/influxd/run/config_command.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/config_command.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/config_command.go 2016-12-06 21:36:15.000000000 +0000 @@ -36,7 +36,8 @@ } // Parse config from path. - config, err := cmd.parseConfig(*configPath) + opt := Options{ConfigPath: *configPath} + config, err := cmd.parseConfig(opt.GetConfigPath()) if err != nil { return fmt.Errorf("parse config: %s", err) } @@ -60,18 +61,32 @@ // ParseConfig parses the config at path. // Returns a demo configuration if path is blank. func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { + config, err := NewDemoConfig() + if err != nil { + config = NewConfig() + } + if path == "" { - return NewDemoConfig() + return config, nil } - config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { + fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path) + + if err := config.FromTomlFile(path); err != nil { return nil, err } return config, nil } -var printConfigUsage = `usage: config +var printConfigUsage = `Displays the default configuration. + +Usage: influxd config [flags] - config displays the default configuration + -config + Set the path to the initial configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). ` diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/config.go influxdb-1.1.1+dfsg1/cmd/influxd/run/config.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/config.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/config.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,61 +1,59 @@ package run import ( - "errors" + "bytes" "fmt" + "io/ioutil" + "log" "os" "os/user" "path/filepath" "reflect" + "regexp" "strconv" "strings" "time" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/monitor" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/subscriber" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tsdb" + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/admin" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tsdb" ) const ( - // DefaultBindAddress is the default address for raft, cluster, snapshot, etc.. + // DefaultBindAddress is the default address for various RPC services. DefaultBindAddress = ":8088" - - // DefaultHostname is the default hostname used if we are unable to determine - // the hostname from the system - DefaultHostname = "localhost" ) // Config represents the configuration format for the influxd binary. type Config struct { - Meta *meta.Config `toml:"meta"` - Data tsdb.Config `toml:"data"` - Cluster cluster.Config `toml:"cluster"` - Retention retention.Config `toml:"retention"` - Precreator precreator.Config `toml:"shard-precreation"` - - Admin admin.Config `toml:"admin"` - Monitor monitor.Config `toml:"monitor"` - Subscriber subscriber.Config `toml:"subscriber"` - HTTPD httpd.Config `toml:"http"` - Graphites []graphite.Config `toml:"graphite"` - Collectd collectd.Config `toml:"collectd"` - OpenTSDB opentsdb.Config `toml:"opentsdb"` - UDPs []udp.Config `toml:"udp"` + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` + Coordinator coordinator.Config `toml:"coordinator"` + Retention retention.Config `toml:"retention"` + Precreator precreator.Config `toml:"shard-precreation"` + + Admin admin.Config `toml:"admin"` + Monitor monitor.Config `toml:"monitor"` + Subscriber subscriber.Config `toml:"subscriber"` + HTTPD httpd.Config `toml:"http"` + GraphiteInputs []graphite.Config `toml:"graphite"` + CollectdInputs []collectd.Config `toml:"collectd"` + OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"` + UDPInputs []udp.Config `toml:"udp"` ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` - HintedHandoff hh.Config `toml:"hinted-handoff"` // Server reporting ReportingDisabled bool `toml:"reporting-disabled"` @@ -69,19 +67,21 @@ c := &Config{} c.Meta = meta.NewConfig() c.Data = tsdb.NewConfig() - c.Cluster = cluster.NewConfig() + c.Coordinator = coordinator.NewConfig() c.Precreator = precreator.NewConfig() c.Admin = admin.NewConfig() c.Monitor = monitor.NewConfig() c.Subscriber = subscriber.NewConfig() c.HTTPD = httpd.NewConfig() - c.Collectd = collectd.NewConfig() - c.OpenTSDB = opentsdb.NewConfig() + + c.GraphiteInputs = []graphite.Config{graphite.NewConfig()} + c.CollectdInputs = []collectd.Config{collectd.NewConfig()} + c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()} + c.UDPInputs = []udp.Config{udp.NewConfig()} c.ContinuousQuery = continuous_querier.NewConfig() c.Retention = retention.NewConfig() - c.HintedHandoff = hh.NewConfig() c.BindAddress = DefaultBindAddress return c @@ -104,34 +104,64 @@ c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") - c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh") c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") - c.HintedHandoff.Enabled = true - c.Admin.Enabled = true - return c, nil } +// trimBOM trims the Byte-Order-Marks from the beginning of the file. +// this is for Windows compatability only. +// see https://github.com/influxdata/telegraf/issues/1378 +func trimBOM(f []byte) []byte { + return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf")) +} + +// FromTomlFile loads the config from a TOML file. +func (c *Config) FromTomlFile(fpath string) error { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return err + } + bs = trimBOM(bs) + return c.FromToml(string(bs)) +} + +// FromToml loads the config from TOML. +func (c *Config) FromToml(input string) error { + // Replace deprecated [cluster] with [coordinator] + re := regexp.MustCompile(`(?m)^\s*\[cluster\]`) + input = re.ReplaceAllStringFunc(input, func(in string) string { + in = strings.TrimSpace(in) + out := "[coordinator]" + log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in) + return out + }) + + _, err := toml.Decode(input, c) + return err +} + // Validate returns an error if the config is invalid. func (c *Config) Validate() error { - if !c.Meta.Enabled && !c.Data.Enabled { - return errors.New("either Meta, Data, or both must be enabled") - } if err := c.Meta.Validate(); err != nil { return err } + if err := c.Data.Validate(); err != nil { return err } - if c.Data.Enabled { - if err := c.HintedHandoff.Validate(); err != nil { - return err - } - for _, g := range c.Graphites { - if err := g.Validate(); err != nil { - return fmt.Errorf("invalid graphite config: %v", err) - } + + if err := c.Monitor.Validate(); err != nil { + return err + } + + if err := c.Subscriber.Validate(); err != nil { + return err + } + + for _, g := range c.GraphiteInputs { + if err := g.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) } } @@ -178,6 +208,9 @@ // e.g. GRAPHITE_0 if f.Kind() == reflect.Slice || f.Kind() == reflect.Array { for i := 0; i < f.Len(); i++ { + if err := c.applyEnvOverrides(key, f.Index(i)); err != nil { + return err + } if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil { return err } @@ -221,6 +254,15 @@ } f.SetInt(intValue) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var intValue uint64 + var err error + intValue, err = strconv.ParseUint(value, 0, f.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) + } + + f.SetUint(intValue) case reflect.Bool: boolValue, err := strconv.ParseBool(value) if err != nil { diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/config_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/config_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/config_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/config_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,21 +5,21 @@ "testing" "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/cmd/influxd/run" ) // Ensure the configuration can be parsed. func TestConfig_Parse(t *testing.T) { // Parse configuration. var c run.Config - if _, err := toml.Decode(` + if err := c.FromToml(` [meta] dir = "/tmp/meta" [data] dir = "/tmp/data" -[cluster] +[coordinator] [admin] bind-address = ":8083" @@ -33,12 +33,21 @@ [[graphite]] protocol = "tcp" -[collectd] +[[collectd]] bind-address = ":1000" -[opentsdb] +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] bind-address = ":2000" +[[opentsdb]] +bind-address = ":2010" + +[[opentsdb]] +bind-address = ":2020" + [[udp]] bind-address = ":4444" @@ -50,7 +59,7 @@ [continuous_queries] enabled = true -`, &c); err != nil { +`); err != nil { t.Fatal(err) } @@ -63,18 +72,24 @@ t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress) } else if c.HTTPD.BindAddress != ":8087" { t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) - } else if len(c.Graphites) != 2 { - t.Fatalf("unexpected graphites count: %d", len(c.Graphites)) - } else if c.Graphites[0].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) - } else if c.Graphites[1].Protocol != "tcp" { - t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol) - } else if c.Collectd.BindAddress != ":1000" { - t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress) - } else if c.OpenTSDB.BindAddress != ":2000" { - t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress) - } else if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + } else if len(c.GraphiteInputs) != 2 { + t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs)) + } else if c.GraphiteInputs[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol) + } else if c.GraphiteInputs[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol) + } else if c.CollectdInputs[0].BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress) + } else if c.CollectdInputs[1].BindAddress != ":1010" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } else if c.OpenTSDBInputs[0].BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } else if c.OpenTSDBInputs[1].BindAddress != ":2010" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress) + } else if c.OpenTSDBInputs[2].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) + } else if c.UDPInputs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) } else if c.Subscriber.Enabled != true { t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) } else if c.ContinuousQuery.Enabled != true { @@ -93,7 +108,7 @@ [data] dir = "/tmp/data" -[cluster] +[coordinator] [admin] bind-address = ":8083" @@ -107,15 +122,23 @@ [[graphite]] protocol = "tcp" -[collectd] +[[collectd]] bind-address = ":1000" -[opentsdb] +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] bind-address = ":2000" +[[opentsdb]] +bind-address = ":2010" + [[udp]] bind-address = ":4444" +[[udp]] + [monitoring] enabled = true @@ -129,20 +152,53 @@ t.Fatalf("failed to set env var: %v", err) } + if err := os.Setenv("INFLUXDB_UDP_0_BIND_ADDRESS", ":5555"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { t.Fatalf("failed to set env var: %v", err) } + if err := os.Setenv("INFLUXDB_COLLECTD_1_BIND_ADDRESS", ":1020"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_OPENTSDB_0_BIND_ADDRESS", ":2020"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + // uint64 type + if err := os.Setenv("INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE", "1000"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + if err := c.ApplyEnvOverrides(); err != nil { t.Fatalf("failed to apply env overrides: %v", err) } - if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + if c.UDPInputs[0].BindAddress != ":5555" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } + + if c.UDPInputs[1].BindAddress != ":1234" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress) } - if c.Graphites[1].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) + if c.GraphiteInputs[1].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol: %s", c.GraphiteInputs[1].Protocol) + } + + if c.CollectdInputs[1].BindAddress != ":1020" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } + + if c.OpenTSDBInputs[0].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } + + if c.Data.CacheMaxMemorySize != 1000 { + t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize) } } @@ -159,6 +215,43 @@ } if e := c.Validate(); e == nil { - t.Fatalf("expected error, got nil") + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) { + c := run.NewConfig() + if _, err := toml.Decode(` +[monitor] +store-enabled = true + +[meta] +dir = "foo" + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_DeprecatedOptions(t *testing.T) { + // Parse configuration. + var c run.Config + if err := c.FromToml(` +[cluster] +max-select-point = 100 +`); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Coordinator.MaxSelectPointN != 100 { + t.Fatalf("unexpected coordinator max select points: %d", c.Coordinator.MaxSelectPointN) + } } diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server_bench_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server_bench_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server_bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server_bench_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,136 @@ +package run_test + +import ( + "bytes" + "fmt" + "net/url" + "testing" +) + +var strResult string + +func BenchmarkServer_Query_Count_1(b *testing.B) { benchmarkServerQueryCount(b, 1) } +func BenchmarkServer_Query_Count_1K(b *testing.B) { benchmarkServerQueryCount(b, 1000) } +func BenchmarkServer_Query_Count_100K(b *testing.B) { benchmarkServerQueryCount(b, 100000) } +func BenchmarkServer_Query_Count_1M(b *testing.B) { benchmarkServerQueryCount(b, 1000000) } + +func benchmarkServerQueryCount(b *testing.B, pointN int) { + if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil { + b.Fatal(err) + } + + // Write data into server. + var buf bytes.Buffer + for i := 0; i < pointN; i++ { + fmt.Fprintf(&buf, `cpu value=100 %d`, i+1) + if i != pointN-1 { + fmt.Fprint(&buf, "\n") + } + } + benchServer.MustWrite("db0", "rp0", buf.String(), nil) + + // Query simple count from server. + b.ResetTimer() + b.ReportAllocs() + var err error + for i := 0; i < b.N; i++ { + if strResult, err = benchServer.Query(`SELECT count(value) FROM db0.rp0.cpu`); err != nil { + b.Fatal(err) + } else if strResult != fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",%d]]}]}]}`, pointN) { + b.Fatalf("unexpected result: %s", strResult) + } + } +} + +func BenchmarkServer_Query_Count_Where_500(b *testing.B) { + benchmarkServerQueryCountWhere(b, false, 500) +} +func BenchmarkServer_Query_Count_Where_1K(b *testing.B) { + benchmarkServerQueryCountWhere(b, false, 1000) +} +func BenchmarkServer_Query_Count_Where_10K(b *testing.B) { + benchmarkServerQueryCountWhere(b, false, 10000) +} +func BenchmarkServer_Query_Count_Where_100K(b *testing.B) { + benchmarkServerQueryCountWhere(b, false, 100000) +} + +func BenchmarkServer_Query_Count_Where_Regex_500(b *testing.B) { + benchmarkServerQueryCountWhere(b, true, 500) +} +func BenchmarkServer_Query_Count_Where_Regex_1K(b *testing.B) { + benchmarkServerQueryCountWhere(b, true, 1000) +} +func BenchmarkServer_Query_Count_Where_Regex_10K(b *testing.B) { + benchmarkServerQueryCountWhere(b, true, 10000) +} +func BenchmarkServer_Query_Count_Where_Regex_100K(b *testing.B) { + benchmarkServerQueryCountWhere(b, true, 100000) +} + +func benchmarkServerQueryCountWhere(b *testing.B, useRegex bool, pointN int) { + if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil { + b.Fatal(err) + } + + // Write data into server. + var buf bytes.Buffer + for i := 0; i < pointN; i++ { + fmt.Fprintf(&buf, `cpu,host=server-%d value=100 %d`, i, i) + if i != pointN-1 { + fmt.Fprint(&buf, "\n") + } + } + benchServer.MustWrite("db0", "rp0", buf.String(), nil) + + // Query count from server with WHERE + var ( + err error + condition = `host = 'server-487'` + ) + + if useRegex { + condition = `host =~ /^server-487$/` + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if strResult, err = benchServer.Query(fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE %s`, condition)); err != nil { + b.Fatal(err) + } else if strResult == `{"results":[{}]}` { + b.Fatal("no results") + } + } +} + +func BenchmarkServer_ShowSeries_1(b *testing.B) { benchmarkServerShowSeries(b, 1) } +func BenchmarkServer_ShowSeries_1K(b *testing.B) { benchmarkServerShowSeries(b, 1000) } +func BenchmarkServer_ShowSeries_100K(b *testing.B) { benchmarkServerShowSeries(b, 100000) } +func BenchmarkServer_ShowSeries_1M(b *testing.B) { benchmarkServerShowSeries(b, 1000000) } + +func benchmarkServerShowSeries(b *testing.B, pointN int) { + if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil { + b.Fatal(err) + } + + // Write data into server. + var buf bytes.Buffer + for i := 0; i < pointN; i++ { + fmt.Fprintf(&buf, `cpu,host=server%d value=100 %d`, i, i+1) + if i != pointN-1 { + fmt.Fprint(&buf, "\n") + } + } + benchServer.MustWrite("db0", "rp0", buf.String(), nil) + + // Query simple count from server. + b.ResetTimer() + b.ReportAllocs() + var err error + for i := 0; i < b.N; i++ { + if strResult, err = benchServer.QueryWithParams(`SHOW SERIES`, url.Values{"db": {"db0"}}); err != nil { + b.Fatal(err) + } + } +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server_cluster_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server_cluster_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server_cluster_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server_cluster_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,357 +0,0 @@ -package run_test - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cmd/influxd/run" -) - -func TestCluster_CreateDatabase(t *testing.T) { - t.Skip() - t.Parallel() - - c, err := NewClusterWithDefaults(5) - defer c.Close() - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } -} - -func TestCluster_Write(t *testing.T) { - t.Skip() - t.Parallel() - - c, err := NewClusterWithDefaults(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - _, err = c.Servers[0].Write("db0", "default", strings.Join(writes, "\n"), nil) - if err != nil { - t.Fatal(err) - } - - q := &Query{ - name: "write", - command: `SELECT * FROM db0."default".cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, - } - err = c.QueryAll(q) - if err != nil { - t.Fatal(err) - } -} - -func TestCluster_DatabaseCommands(t *testing.T) { - t.Skip() - t.Parallel() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - - defer c.Close() - - test := tests.load(t, "database_commands") - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_Query_DropAndRecreateDatabase(t *testing.T) { - t.Skip() - t.Parallel() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "drop_and_recreate_database") - - s := c.Servers[0] - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { - t.Fatal(err) - } - - if err = writeTestData(c.Servers[0], &test); err != nil { - t.Fatal(err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_Query_DropDatabaseIsolated(t *testing.T) { - t.Skip() - t.Parallel() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "drop_database_isolated") - - s := c.Servers[0] - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { - t.Fatal(err) - } - - if err = writeTestData(s, &test); err != nil { - t.Fatal(err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_Query_DropAndRecreateSeries(t *testing.T) { - t.Parallel() - t.Skip() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "drop_and_recreate_series") - - s := c.Servers[0] - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - if err = writeTestData(s, &test); err != nil { - t.Fatal(err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Fatal(query.Error(err)) - } - } - - // Re-write data and test again. - retest := tests.load(t, "drop_and_recreate_series_retest") - - if err = writeTestData(s, &test); err != nil { - t.Fatal(err) - } - - for _, query := range retest.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_Query_DropSeriesFromRegex(t *testing.T) { - t.Parallel() - t.Skip() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "drop_series_from_regex") - - s := c.Servers[0] - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { - t.Fatal(err) - } - - if err = writeTestData(s, &test); err != nil { - t.Fatal(err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_RetentionPolicyCommands(t *testing.T) { - t.Skip() - t.Parallel() - - configFunc := func(index int, config *run.Config) { - config.Meta.RetentionAutoCreate = false - } - - c, err := NewClusterCustom(5, configFunc) - - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "retention_policy_commands") - - s := c.Servers[0] - if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil { - t.Fatal(err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} - -func TestCluster_DatabaseRetentionPolicyAutoCreate(t *testing.T) { - t.Parallel() - t.Skip() - c, err := NewCluster(5) - if err != nil { - t.Fatalf("error creating cluster: %s", err) - } - defer c.Close() - - test := tests.load(t, "retention_policy_auto_create") - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - t.Logf("Running %s", query.name) - if query.once { - if _, err := c.Query(query); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - continue - } - if err := c.QueryAll(query); err != nil { - t.Error(query.Error(err)) - } - } -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server.go 2016-12-06 21:36:15.000000000 +0000 @@ -2,39 +2,45 @@ import ( "fmt" + "io" "log" "net" "os" "path/filepath" - "reflect" "runtime" "runtime/pprof" "time" - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/monitor" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/copier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/services/subscriber" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" - "github.com/influxdb/usage-client/v1" + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/admin" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tcp" + "github.com/influxdata/influxdb/tsdb" + client "github.com/influxdata/usage-client/v1" // Initialize the engine packages - _ "github.com/influxdb/influxdb/tsdb/engine" + _ "github.com/influxdata/influxdb/tsdb/engine" ) +var startTime time.Time + +func init() { + startTime = time.Now().UTC() +} + // BuildInfo represents the build details for the server code. type BuildInfo struct { Version string @@ -55,25 +61,19 @@ BindAddress string Listener net.Listener - Node *influxdb.Node + Logger *log.Logger - MetaClient *meta.Client - MetaService *meta.Service + MetaClient *meta.Client TSDBStore *tsdb.Store - QueryExecutor *tsdb.QueryExecutor - PointsWriter *cluster.PointsWriter - ShardWriter *cluster.ShardWriter - ShardMapper *cluster.ShardMapper - HintedHandoff *hh.Service + QueryExecutor *influxql.QueryExecutor + PointsWriter *coordinator.PointsWriter Subscriber *subscriber.Service Services []Service // These references are required for the tcp muxer. - ClusterService *cluster.Service SnapshotterService *snapshotter.Service - CopierService *copier.Service Monitor *monitor.Monitor @@ -84,12 +84,6 @@ CPUProfile string MemProfile string - // joinPeers are the metaservers specified at run time to join this server to - joinPeers []string - - // metaUseTLS specifies if we should use a TLS connection to the meta servers - metaUseTLS bool - // httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data httpAPIAddr string @@ -100,6 +94,10 @@ tcpAddr string config *Config + + // logOutput is the writer to which all services should be configured to + // write logs to after appension. + logOutput io.Writer } // NewServer returns a new instance of Server built from a config. @@ -123,40 +121,20 @@ } } - // load the node information - metaAddresses := []string{c.Meta.HTTPBindAddress} - if !c.Meta.Enabled { - metaAddresses = c.Meta.JoinPeers - } - - node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses) + _, err := influxdb.LoadNode(c.Meta.Dir) if err != nil { if !os.IsNotExist(err) { return nil, err - } else { - node = influxdb.NewNode(c.Meta.Dir, metaAddresses) } } + if err := raftDBExists(c.Meta.Dir); err != nil { + return nil, err + } + // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress - if c.Meta.BindAddress != "" { - bind = c.Meta.BindAddress - } - - if !c.Data.Enabled && !c.Meta.Enabled { - return nil, fmt.Errorf("must run as either meta node or data node or both") - } - - httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress) - if err != nil { - return nil, err - } - tcpBindAddress, err := defaultHost(DefaultHostname, bind) - if err != nil { - return nil, err - } s := &Server{ buildInfo: *buildInfo, @@ -165,103 +143,96 @@ BindAddress: bind, - Node: node, + Logger: log.New(os.Stderr, "", log.LstdFlags), - Monitor: monitor.New(c.Monitor), + MetaClient: meta.NewClient(c.Meta), reportingDisabled: c.ReportingDisabled, - joinPeers: c.Meta.JoinPeers, - metaUseTLS: c.Meta.HTTPSEnabled, - httpAPIAddr: httpBindAddress, + httpAPIAddr: c.HTTPD.BindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, - tcpAddr: tcpBindAddress, + tcpAddr: bind, - config: c, + config: c, + logOutput: os.Stderr, } + s.Monitor = monitor.New(s, c.Monitor) - if c.Meta.Enabled { - s.MetaService = meta.NewService(c.Meta) + if err := s.MetaClient.Open(); err != nil { + return nil, err } - if c.Data.Enabled { - s.TSDBStore = tsdb.NewStore(c.Data.Dir) - s.TSDBStore.EngineOptions.Config = c.Data - - // Copy TSDB configuration. - s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine - s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize - s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) - s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) - - // Set the shard mapper - s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) - s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping - s.ShardMapper.TSDBStore = s.TSDBStore - s.ShardMapper.Node = node - - // Initialize query executor. - s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) - s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} - s.QueryExecutor.ShardMapper = s.ShardMapper - s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled - - // Set the shard writer - s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), - c.Cluster.MaxRemoteWriteConnections) - - // Create the hinted handoff service - s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) - s.HintedHandoff.Monitor = s.Monitor - - // Create the Subscriber service - s.Subscriber = subscriber.NewService(c.Subscriber) - - // Initialize points writer. - s.PointsWriter = cluster.NewPointsWriter() - s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) - s.PointsWriter.TSDBStore = s.TSDBStore - s.PointsWriter.ShardWriter = s.ShardWriter - s.PointsWriter.HintedHandoff = s.HintedHandoff - s.PointsWriter.Subscriber = s.Subscriber - s.PointsWriter.Node = s.Node - - // needed for executing INTO queries. - s.QueryExecutor.IntoWriter = s.PointsWriter - - // Initialize the monitor - s.Monitor.Version = s.buildInfo.Version - s.Monitor.Commit = s.buildInfo.Commit - s.Monitor.Branch = s.buildInfo.Branch - s.Monitor.BuildTime = s.buildInfo.Time - s.Monitor.PointsWriter = s.PointsWriter - } + s.TSDBStore = tsdb.NewStore(c.Data.Dir) + s.TSDBStore.EngineOptions.Config = c.Data + // Copy TSDB configuration. + s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine + + // Create the Subscriber service + s.Subscriber = subscriber.NewService(c.Subscriber) + + // Initialize points writer. + s.PointsWriter = coordinator.NewPointsWriter() + s.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout) + s.PointsWriter.TSDBStore = s.TSDBStore + s.PointsWriter.Subscriber = s.Subscriber + + // Initialize query executor. + s.QueryExecutor = influxql.NewQueryExecutor() + s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: s.MetaClient, + TaskManager: s.QueryExecutor.TaskManager, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + Monitor: s.Monitor, + PointsWriter: s.PointsWriter, + MaxSelectPointN: c.Coordinator.MaxSelectPointN, + MaxSelectSeriesN: c.Coordinator.MaxSelectSeriesN, + MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN, + } + s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout) + s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter) + s.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries + + // Initialize the monitor + s.Monitor.Version = s.buildInfo.Version + s.Monitor.Commit = s.buildInfo.Commit + s.Monitor.Branch = s.buildInfo.Branch + s.Monitor.BuildTime = s.buildInfo.Time + s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter) return s, nil } -func (s *Server) appendClusterService(c cluster.Config) { - srv := cluster.NewService(c) - srv.TSDBStore = s.TSDBStore - srv.MetaClient = s.MetaClient - s.Services = append(s.Services, srv) - s.ClusterService = srv +func (s *Server) Statistics(tags map[string]string) []models.Statistic { + var statistics []models.Statistic + statistics = append(statistics, s.QueryExecutor.Statistics(tags)...) + statistics = append(statistics, s.TSDBStore.Statistics(tags)...) + statistics = append(statistics, s.PointsWriter.Statistics(tags)...) + statistics = append(statistics, s.Subscriber.Statistics(tags)...) + for _, srv := range s.Services { + if m, ok := srv.(monitor.Reporter); ok { + statistics = append(statistics, m.Statistics(tags)...) + } + } + return statistics } func (s *Server) appendSnapshotterService() { srv := snapshotter.NewService() srv.TSDBStore = s.TSDBStore srv.MetaClient = s.MetaClient - srv.Node = s.Node s.Services = append(s.Services, srv) s.SnapshotterService = srv } -func (s *Server) appendCopierService() { - srv := copier.NewService() - srv.TSDBStore = s.TSDBStore - s.Services = append(s.Services, srv) - s.CopierService = srv +// SetLogOutput sets the logger used for all messages. It must not be called +// after the Open method has been called. +func (s *Server) SetLogOutput(w io.Writer) { + s.Logger = log.New(os.Stderr, "", log.LstdFlags) + s.logOutput = w +} + +func (s *Server) appendMonitorService() { + s.Services = append(s.Services, s.Monitor) } func (s *Server) appendRetentionPolicyService(c retention.Config) { @@ -278,6 +249,7 @@ if !c.Enabled { return } + c.Version = s.buildInfo.Version srv := admin.NewService(c) s.Services = append(s.Services, srv) } @@ -288,17 +260,13 @@ } srv := httpd.NewService(c) srv.Handler.MetaClient = s.MetaClient + srv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient) + srv.Handler.WriteAuthorizer = meta.NewWriteAuthorizer(s.MetaClient) srv.Handler.QueryExecutor = s.QueryExecutor + srv.Handler.Monitor = s.Monitor srv.Handler.PointsWriter = s.PointsWriter srv.Handler.Version = s.buildInfo.Version - // If a ContinuousQuerier service has been started, attach it. - for _, srvc := range s.Services { - if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok { - srv.Handler.ContinuousQuerier = cqsrvc - } - } - s.Services = append(s.Services, srv) } @@ -395,85 +363,73 @@ mux := tcp.NewMux() go mux.Serve(ln) - if s.MetaService != nil { - s.MetaService.RaftListener = mux.Listen(meta.MuxHeader) - // Open meta service. - if err := s.MetaService.Open(); err != nil { - return fmt.Errorf("open meta service: %s", err) + // Append services. + s.appendMonitorService() + s.appendPrecreatorService(s.config.Precreator) + s.appendSnapshotterService() + s.appendAdminService(s.config.Admin) + s.appendContinuousQueryService(s.config.ContinuousQuery) + s.appendHTTPDService(s.config.HTTPD) + s.appendRetentionPolicyService(s.config.Retention) + for _, i := range s.config.GraphiteInputs { + if err := s.appendGraphiteService(i); err != nil { + return err } - go s.monitorErrorChan(s.MetaService.Err()) } - - // initialize MetaClient. - if err = s.initializeMetaClient(); err != nil { - return err + for _, i := range s.config.CollectdInputs { + s.appendCollectdService(i) } - - if s.TSDBStore != nil { - // Append services. - s.appendClusterService(s.config.Cluster) - s.appendPrecreatorService(s.config.Precreator) - s.appendSnapshotterService() - s.appendCopierService() - s.appendAdminService(s.config.Admin) - s.appendContinuousQueryService(s.config.ContinuousQuery) - s.appendHTTPDService(s.config.HTTPD) - s.appendCollectdService(s.config.Collectd) - if err := s.appendOpenTSDBService(s.config.OpenTSDB); err != nil { + for _, i := range s.config.OpenTSDBInputs { + if err := s.appendOpenTSDBService(i); err != nil { return err } - for _, g := range s.config.UDPs { - s.appendUDPService(g) - } - s.appendRetentionPolicyService(s.config.Retention) - for _, g := range s.config.Graphites { - if err := s.appendGraphiteService(g); err != nil { - return err - } - } - - s.Subscriber.MetaClient = s.MetaClient - s.ShardMapper.MetaClient = s.MetaClient - s.QueryExecutor.MetaClient = s.MetaClient - s.ShardWriter.MetaClient = s.MetaClient - s.HintedHandoff.MetaClient = s.MetaClient - s.Subscriber.MetaClient = s.MetaClient - s.PointsWriter.MetaClient = s.MetaClient - s.Monitor.MetaClient = s.MetaClient + } + for _, i := range s.config.UDPInputs { + s.appendUDPService(i) + } - s.ClusterService.Listener = mux.Listen(cluster.MuxHeader) - s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) - s.CopierService.Listener = mux.Listen(copier.MuxHeader) + s.Subscriber.MetaClient = s.MetaClient + s.Subscriber.MetaClient = s.MetaClient + s.PointsWriter.MetaClient = s.MetaClient + s.Monitor.MetaClient = s.MetaClient - // Open TSDB store. - if err := s.TSDBStore.Open(); err != nil { - return fmt.Errorf("open tsdb store: %s", err) - } + s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) - // Open the hinted handoff service - if err := s.HintedHandoff.Open(); err != nil { - return fmt.Errorf("open hinted handoff: %s", err) - } + // Configure logging for all services and clients. + w := s.logOutput + if s.config.Meta.LoggingEnabled { + s.MetaClient.SetLogOutput(w) + } + s.TSDBStore.SetLogOutput(w) + if s.config.Data.QueryLogEnabled { + s.QueryExecutor.SetLogOutput(w) + } + s.PointsWriter.SetLogOutput(w) + s.Subscriber.SetLogOutput(w) + for _, svc := range s.Services { + svc.SetLogOutput(w) + } + s.SnapshotterService.SetLogOutput(w) + s.Monitor.SetLogOutput(w) - // Open the subcriber service - if err := s.Subscriber.Open(); err != nil { - return fmt.Errorf("open subscriber: %s", err) - } + // Open TSDB store. + if err := s.TSDBStore.Open(); err != nil { + return fmt.Errorf("open tsdb store: %s", err) + } - // Open the points writer service - if err := s.PointsWriter.Open(); err != nil { - return fmt.Errorf("open points writer: %s", err) - } + // Open the subcriber service + if err := s.Subscriber.Open(); err != nil { + return fmt.Errorf("open subscriber: %s", err) + } - // Open the monitor service - if err := s.Monitor.Open(); err != nil { - return fmt.Errorf("open monitor: %v", err) - } + // Open the points writer service + if err := s.PointsWriter.Open(); err != nil { + return fmt.Errorf("open points writer: %s", err) + } - for _, service := range s.Services { - if err := service.Open(); err != nil { - return fmt.Errorf("open service: %s", err) - } + for _, service := range s.Services { + if err := service.Open(); err != nil { + return fmt.Errorf("open service: %s", err) } } @@ -500,16 +456,12 @@ service.Close() } - if s.Monitor != nil { - s.Monitor.Close() - } - if s.PointsWriter != nil { s.PointsWriter.Close() } - if s.HintedHandoff != nil { - s.HintedHandoff.Close() + if s.QueryExecutor != nil { + s.QueryExecutor.Close() } // Close the TSDBStore, no more reads or writes at this point @@ -521,11 +473,6 @@ s.Subscriber.Close() } - // Finally close the meta-store since everything else depends on it - if s.MetaService != nil { - s.MetaService.Close() - } - if s.MetaClient != nil { s.MetaClient.Close() } @@ -536,24 +483,23 @@ // startServerReporting starts periodic server reporting. func (s *Server) startServerReporting() { + s.reportServer() + + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() for { select { case <-s.closing: return - default: + case <-ticker.C: + s.reportServer() } - s.reportServer() - <-time.After(24 * time.Hour) } } -// reportServer reports anonymous statistics about the system. +// reportServer reports usage statistics about the system. func (s *Server) reportServer() { - dis, err := s.MetaClient.Databases() - if err != nil { - log.Printf("failed to retrieve databases for reporting: %s", err.Error()) - return - } + dis := s.MetaClient.Databases() numDatabases := len(dis) numMeasurements := 0 @@ -574,11 +520,6 @@ } clusterID := s.MetaClient.ClusterID() - if err != nil { - log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error()) - return - } - cl := client.New("") usage := client.Usage{ Product: "influxdb", @@ -588,17 +529,17 @@ "os": runtime.GOOS, "arch": runtime.GOARCH, "version": s.buildInfo.Version, - "server_id": fmt.Sprintf("%v", s.Node.ID), "cluster_id": fmt.Sprintf("%v", clusterID), "num_series": numSeries, "num_measurements": numMeasurements, "num_databases": numDatabases, + "uptime": time.Since(startTime).Seconds(), }, }, }, } - log.Printf("Sending anonymous usage statistics to m.influxdb.com") + s.Logger.Printf("Sending usage statistics to usage.influxdata.com") go cl.Save(usage) } @@ -618,88 +559,9 @@ } } -// initializeMetaClient will set the MetaClient and join the node to the cluster if needed -func (s *Server) initializeMetaClient() error { - // if the node ID is > 0 then we just need to initialize the metaclient - if s.Node.ID > 0 { - s.MetaClient = meta.NewClient(s.Node.MetaServers, s.metaUseTLS) - if err := s.MetaClient.Open(); err != nil { - return err - } - - go s.updateMetaNodeInformation() - - return nil - } - - // It's the first time starting up and we need to either join - // the cluster or initialize this node as the first member - if len(s.joinPeers) == 0 { - // start up a new single node cluster - if s.MetaService == nil { - return fmt.Errorf("server not set to join existing cluster must run also as a meta node") - } - s.MetaClient = meta.NewClient([]string{s.MetaService.HTTPAddr()}, s.metaUseTLS) - } else { - // join this node to the cluster - s.MetaClient = meta.NewClient(s.joinPeers, s.metaUseTLS) - } - if err := s.MetaClient.Open(); err != nil { - return err - } - - if s.TSDBStore != nil { - n, err := s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr) - if err != nil { - return err - } - s.Node.ID = n.ID - } - metaNodes, err := s.MetaClient.MetaNodes() - if err != nil { - return err - } - for _, n := range metaNodes { - s.Node.AddMetaServers([]string{n.Host}) - } - - if err := s.Node.Save(); err != nil { - return err - } - - go s.updateMetaNodeInformation() - - return nil -} - -// updateMetaNodeInformation will continuously run and save the node.json file -// if the list of metaservers in the cluster changes -func (s *Server) updateMetaNodeInformation() { - for { - c := s.MetaClient.WaitForDataChanged() - select { - case <-c: - nodes, _ := s.MetaClient.MetaNodes() - var nodeAddrs []string - for _, n := range nodes { - nodeAddrs = append(nodeAddrs, n.Host) - } - if !reflect.DeepEqual(nodeAddrs, s.Node.MetaServers) { - s.Node.MetaServers = nodeAddrs - if err := s.Node.Save(); err != nil { - log.Printf("error saving node information: %s\n", err.Error()) - } else { - log.Printf("updated node metaservers with: %v\n", s.Node.MetaServers) - } - } - case <-s.closing: - return - } - } -} - // Service represents a service attached to the server. type Service interface { + SetLogOutput(w io.Writer) Open() error Close() error } @@ -748,19 +610,25 @@ } } -func defaultHost(hostname, addr string) (string, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return "", err - } - - if host == "" { - return net.JoinHostPort(hostname, port), nil - } - return addr, nil -} - type tcpaddr struct{ host string } func (a *tcpaddr) Network() string { return "tcp" } func (a *tcpaddr) String() string { return a.host } + +// monitorPointsWriter is a wrapper around `coordinator.PointsWriter` that helps +// to prevent a circular dependency between the `cluster` and `monitor` packages. +type monitorPointsWriter coordinator.PointsWriter + +func (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error { + return (*coordinator.PointsWriter)(pw).WritePoints(database, retentionPolicy, models.ConsistencyLevelAny, points) +} + +func raftDBExists(dir string) error { + // Check to see if there is a raft db, if so, error out with a message + // to downgrade, export, and then import the meta data + raftFile := filepath.Join(dir, "raft.db") + if _, err := os.Stat(raftFile); err == nil { + return fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile) + } + return nil +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server_helpers_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server_helpers_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server_helpers_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server_helpers_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -7,22 +7,19 @@ "fmt" "io" "io/ioutil" - "log" "math" "net/http" "net/url" "os" "regexp" "strings" - "sync" "testing" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/influxdb/cmd/influxd/run" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/meta" - "github.com/influxdb/influxdb/toml" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/toml" ) const emptyResults = `{"results":[{}]}` @@ -49,10 +46,7 @@ } // OpenServer opens a test server. -func OpenServer(c *run.Config, joinURLs string) *Server { - if len(joinURLs) > 0 { - c.Meta.JoinPeers = strings.Split(joinURLs, ",") - } +func OpenServer(c *run.Config) *Server { s := NewServer(c) configureLogging(s) if err := s.Open(); err != nil { @@ -68,7 +62,6 @@ Commit: "", Branch: "", } - fmt.Println(">>> ", c.Data.Enabled) srv, _ := run.NewServer(c, buildInfo) s := Server{ Server: srv, @@ -83,9 +76,9 @@ } // OpenDefaultServer opens a test server with a default database & retention policy. -func OpenDefaultServer(c *run.Config, joinURLs string) *Server { - s := OpenServer(c, joinURLs) - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { +func OpenDefaultServer(c *run.Config) *Server { + s := OpenServer(c) + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { panic(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -99,9 +92,12 @@ if err := s.Server.Close(); err != nil { panic(err.Error()) } - os.RemoveAll(s.Config.Meta.Dir) - os.RemoveAll(s.Config.Data.Dir) - os.RemoveAll(s.Config.HintedHandoff.Dir) + if err := os.RemoveAll(s.Config.Meta.Dir); err != nil { + panic(err.Error()) + } + if err := os.RemoveAll(s.Config.Data.Dir); err != nil { + panic(err.Error()) + } } // URL returns the base URL for the httpd endpoint. @@ -115,7 +111,7 @@ } // CreateDatabaseAndRetentionPolicy will create the database and retention policy. -func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error { +func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec) error { if _, err := s.MetaClient.CreateDatabase(db); err != nil { return err } else if _, err := s.MetaClient.CreateRetentionPolicy(db, rp); err != nil { @@ -129,6 +125,15 @@ return s.QueryWithParams(query, nil) } +// MustQuery executes a query against the server and returns the results. +func (s *Server) MustQuery(query string) string { + results, err := s.Query(query) + if err != nil { + panic(err) + } + return results +} + // Query executes a query against the server and returns the results. func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) { var v url.Values @@ -138,7 +143,16 @@ v, _ = url.ParseQuery(values.Encode()) } v.Set("q", query) - return s.HTTPGet(s.URL() + "/query?" + v.Encode()) + return s.HTTPPost(s.URL()+"/query?"+v.Encode(), nil) +} + +// MustQueryWithParams executes a query against the server and returns the results. +func (s *Server) MustQueryWithParams(query string, values url.Values) string { + results, err := s.QueryWithParams(query, values) + if err != nil { + panic(err) + } + return results } // HTTPGet makes an HTTP GET request to the server and returns the response. @@ -147,7 +161,7 @@ if err != nil { return "", err } - body := string(MustReadAll(resp.Body)) + body := strings.TrimSpace(string(MustReadAll(resp.Body))) switch resp.StatusCode { case http.StatusBadRequest: if !expectPattern(".*error parsing query*.", body) { @@ -168,7 +182,7 @@ if err != nil { return "", err } - body := string(MustReadAll(resp.Body)) + body := strings.TrimSpace(string(MustReadAll(resp.Body))) switch resp.StatusCode { case http.StatusBadRequest: if !expectPattern(".*error parsing query*.", body) { @@ -214,16 +228,10 @@ // NewConfig returns the default config with temporary paths. func NewConfig() *run.Config { c := run.NewConfig() + c.BindAddress = "127.0.0.1:0" c.ReportingDisabled = true - c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second) - c.Cluster.WriteTimeout = toml.Duration(30 * time.Second) + c.Coordinator.WriteTimeout = toml.Duration(30 * time.Second) c.Meta.Dir = MustTempFile() - c.Meta.BindAddress = "127.0.0.1:0" - c.Meta.HTTPBindAddress = "127.0.0.1:0" - c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond) if !testing.Verbose() { c.Meta.LoggingEnabled = false @@ -231,9 +239,6 @@ c.Data.Dir = MustTempFile() c.Data.WALDir = MustTempFile() - c.Data.WALLoggingEnabled = false - - c.HintedHandoff.Dir = MustTempFile() c.HTTPD.Enabled = true c.HTTPD.BindAddress = "127.0.0.1:0" @@ -244,8 +249,8 @@ return c } -func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo { - return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration} +func newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec { + return &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration} } func maxFloat64() string { @@ -450,7 +455,7 @@ w.rp = t.retentionPolicy() } - if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicyInfo(w.rp, 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0)); err != nil { return err } if err := s.MetaClient.SetDefaultRetentionPolicy(w.db, w.rp); err != nil { @@ -470,212 +475,6 @@ func configureLogging(s *Server) { // Set the logger to discard unless verbose is on if !testing.Verbose() { - type logSetter interface { - SetLogger(*log.Logger) - } - nullLogger := log.New(ioutil.Discard, "", 0) - s.TSDBStore.Logger = nullLogger - s.HintedHandoff.SetLogger(nullLogger) - s.Monitor.SetLogger(nullLogger) - s.QueryExecutor.SetLogger(nullLogger) - s.Subscriber.SetLogger(nullLogger) - for _, service := range s.Services { - if service, ok := service.(logSetter); ok { - service.SetLogger(nullLogger) - } - } - } -} - -type Cluster struct { - Servers []*Server -} - -func NewCluster(size int) (*Cluster, error) { - c := Cluster{} - c.Servers = append(c.Servers, OpenServer(NewConfig(), "")) - metaServiceAddr := c.Servers[0].Node.MetaServers[0] - - for i := 1; i < size; i++ { - c.Servers = append(c.Servers, OpenServer(NewConfig(), metaServiceAddr)) - } - - for _, s := range c.Servers { - configureLogging(s) - } - - if err := verifyCluster(&c, size); err != nil { - return nil, err - } - - return &c, nil -} - -func verifyCluster(c *Cluster, size int) error { - r, err := c.Servers[0].Query("SHOW SERVERS") - if err != nil { - return err - } - var cl client.Response - if e := json.Unmarshal([]byte(r), &cl); e != nil { - return e - } - - // grab only the meta nodes series - series := cl.Results[0].Series[0] - for i, value := range series.Values { - addr := c.Servers[i].Node.MetaServers[i] - if value[0].(float64) != float64(i+1) { - return fmt.Errorf("expected nodeID %d, got %v", i, value[0]) - } - if value[1].(string) != addr { - return fmt.Errorf("expected addr %s, got %v", addr, value[1]) - } - } - - return nil -} - -func NewClusterWithDefaults(size int) (*Cluster, error) { - c, err := NewCluster(size) - if err != nil { - return nil, err - } - - r, err := c.Query(&Query{command: "CREATE DATABASE db0"}) - if err != nil { - return nil, err - } - if r != emptyResults { - return nil, fmt.Errorf("%s", r) - } - - for i, s := range c.Servers { - got, err := s.Query("SHOW DATABASES") - if err != nil { - return nil, fmt.Errorf("failed to query databases on node %d for show databases", i+1) - } - if exp := `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`; got != exp { - return nil, fmt.Errorf("unexpected result node %d\nexp: %s\ngot: %s\n", i+1, exp, got) - } - } - - return c, nil -} - -func NewClusterCustom(size int, cb func(index int, config *run.Config)) (*Cluster, error) { - c := Cluster{} - - config := NewConfig() - cb(0, config) - - c.Servers = append(c.Servers, OpenServer(config, "")) - metaServiceAddr := c.Servers[0].Node.MetaServers[0] - - for i := 1; i < size; i++ { - config := NewConfig() - cb(i, config) - c.Servers = append(c.Servers, OpenServer(config, metaServiceAddr)) - } - - for _, s := range c.Servers { - configureLogging(s) - } - - if err := verifyCluster(&c, size); err != nil { - return nil, err - } - - return &c, nil -} - -// Close shuts down all servers. -func (c *Cluster) Close() { - var wg sync.WaitGroup - wg.Add(len(c.Servers)) - - for _, s := range c.Servers { - go func(s *Server) { - defer wg.Done() - s.Close() - }(s) - } - wg.Wait() -} - -func (c *Cluster) Query(q *Query) (string, error) { - r, e := c.Servers[0].Query(q.command) - q.act = r - return r, e -} - -func (c *Cluster) QueryIndex(index int, q string) (string, error) { - return c.Servers[index].Query(q) -} - -func (c *Cluster) QueryAll(q *Query) error { - type Response struct { - Val string - Err error - } - - timeoutErr := fmt.Errorf("timed out waiting for response") - - queryAll := func() error { - // if a server doesn't return in 5 seconds, fail the response - timeout := time.After(5 * time.Second) - ch := make(chan Response, 0) - - for _, s := range c.Servers { - go func(s *Server) { - r, err := s.QueryWithParams(q.command, q.params) - ch <- Response{Val: r, Err: err} - }(s) - } - - resps := []Response{} - for i := 0; i < len(c.Servers); i++ { - select { - case r := <-ch: - resps = append(resps, r) - case <-timeout: - return timeoutErr - } - } - - for _, r := range resps { - if r.Err != nil { - return r.Err - } - if q.pattern { - if !expectPattern(q.exp, r.Val) { - return fmt.Errorf("unexpected pattern: \n\texp: %s\n\tgot: %s\n", q.exp, r.Val) - } - } else { - if r.Val != q.exp { - return fmt.Errorf("unexpected value:\n\texp: %s\n\tgot: %s\n", q.exp, r.Val) - } - } - } - - return nil - } - - tick := time.Tick(100 * time.Millisecond) - // if we don't reach consensus in 20 seconds, fail the query - timeout := time.After(20 * time.Second) - - if err := queryAll(); err == nil { - return nil - } - for { - select { - case <-tick: - if err := queryAll(); err == nil { - return nil - } - case <-timeout: - return fmt.Errorf("timed out waiting for response") - } + s.SetLogOutput(ioutil.Discard) } } diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server_suite_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server_suite_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server_suite_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server_suite_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -31,7 +31,7 @@ &Query{ name: "create database should error with bad name", command: `CREATE DATABASE 0xdb0`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`, + exp: `{"error":"error parsing query: found 0xdb0, expected identifier at line 1, char 17"}`, }, &Query{ name: "create database with retention duration should error with bad retention duration", @@ -41,7 +41,7 @@ &Query{ name: "create database with retention replication should error with bad retention replication number", command: `CREATE DATABASE db0 WITH REPLICATION xyz`, - exp: `{"error":"error parsing query: found xyz, expected number at line 1, char 38"}`, + exp: `{"error":"error parsing query: found xyz, expected integer at line 1, char 38"}`, }, &Query{ name: "create database with retention name should error with missing retention name", @@ -54,24 +54,24 @@ exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"]]}]}]}`, }, &Query{ - name: "create database should not error with existing database with IF NOT EXISTS", - command: `CREATE DATABASE IF NOT EXISTS db0`, + name: "create database should not error with existing database", + command: `CREATE DATABASE db0`, exp: `{"results":[{}]}`, }, &Query{ - name: "create database should create non-existing database with IF NOT EXISTS", - command: `CREATE DATABASE IF NOT EXISTS db1`, + name: "create database should create non-existing database", + command: `CREATE DATABASE db1`, exp: `{"results":[{}]}`, }, &Query{ - name: "create database with retention duration should not error with existing database with IF NOT EXISTS", - command: `CREATE DATABASE IF NOT EXISTS db1 WITH DURATION 24h`, - exp: `{"results":[{}]}`, + name: "create database with retention duration should error if retention policy is different", + command: `CREATE DATABASE db1 WITH DURATION 24h`, + exp: `{"results":[{"error":"retention policy conflicts with an existing policy"}]}`, }, &Query{ - name: "create database should error IF NOT EXISTS with bad retention duration", - command: `CREATE DATABASE IF NOT EXISTS db1 WITH DURATION xyz`, - exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 49"}`, + name: "create database should error with bad retention duration", + command: `CREATE DATABASE db1 WITH DURATION xyz`, + exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 35"}`, }, &Query{ name: "show database should succeed", @@ -97,13 +97,13 @@ once: true, }, &Query{ - name: "drop database should error if it does not exists", + name: "drop database should not error if it does not exists", command: `DROP DATABASE db1`, - exp: `{"results":[{"error":"database not found: db1"}]}`, + exp: `{"results":[{}]}`, }, &Query{ - name: "drop database should not error with non-existing database db1 WITH IF EXISTS", - command: `DROP DATABASE IF EXISTS db1`, + name: "drop database should not error with non-existing database db1", + command: `DROP DATABASE db1`, exp: `{"results":[{}]}`, }, &Query{ @@ -112,9 +112,14 @@ exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, }, &Query{ - name: "drop database should error if it doesn't exist", - command: `DROP DATABASE db0`, - exp: `{"results":[{"error":"database not found: db0"}]}`, + name: "create database with shard group duration should succeed", + command: `CREATE DATABASE db0 WITH SHARD DURATION 61m`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create database with shard group duration and duration should succeed", + command: `CREATE DATABASE db1 WITH DURATION 60m SHARD DURATION 30m`, + exp: `{"results":[{}]}`, }, }, } @@ -199,6 +204,50 @@ }, } + tests["delete_series"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-02T00:00:00Z").UnixNano())}, + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=200 %d`, mustParseTime(time.RFC3339Nano, "2000-01-03T00:00:00Z").UnixNano())}, + &Write{db: "db1", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + }, + queries: []*Query{ + &Query{ + name: "Show series is present", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Delete series", + command: `DELETE FROM cpu WHERE time < '2000-01-03T00:00:00Z'`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + once: true, + }, + &Query{ + name: "Show series still exists", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Make sure last point still exists", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-03T00:00:00Z","serverA","uswest",200]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Make sure data wasn't deleted from other database.", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db1"}}, + }, + }, + } + tests["drop_and_recreate_series"] = Test{ db: "db0", rp: "rp0", @@ -210,7 +259,7 @@ &Query{ name: "Show series is present", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -244,7 +293,7 @@ &Query{ name: "Show series is present again after re-write", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }, @@ -265,7 +314,7 @@ &Query{ name: "Show series is present", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"a","columns":["_key","host","region"],"values":[["a,host=serverA,region=uswest","serverA","uswest"]]},{"name":"aa","columns":["_key","host","region"],"values":[["aa,host=serverA,region=uswest","serverA","uswest"]]},{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["a,host=serverA,region=uswest"],["aa,host=serverA,region=uswest"],["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -278,7 +327,7 @@ &Query{ name: "Show series is gone", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -291,19 +340,19 @@ &Query{ name: "make sure DROP SERIES doesn't delete anything when regex doesn't match", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "Drop series with WHERE field should error", command: `DROP SERIES FROM c WHERE val > 50.0`, - exp: `{"results":[{"error":"DROP SERIES doesn't support fields in WHERE clause"}]}`, + exp: `{"results":[{"error":"fields not supported in WHERE clause during deletion"}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "make sure DROP SERIES with field in WHERE didn't delete data", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -327,7 +376,7 @@ &Query{ name: "show retention policy should succeed", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","1h0m0s","1h0m0s",1,false]]}]}]}`, }, &Query{ name: "alter retention policy should succeed", @@ -338,12 +387,12 @@ &Query{ name: "show retention policy should have new altered information", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`, }, &Query{ name: "show retention policy should still show policy", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`, }, &Query{ name: "create a second non-default retention policy", @@ -354,7 +403,7 @@ &Query{ name: "show retention policy should show both", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true],["rp2","1h0m0s",1,false]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true],["rp2","1h0m0s","1h0m0s",1,false]]}]}]}`, }, &Query{ name: "dropping non-default retention policy succeed", @@ -363,20 +412,37 @@ once: true, }, &Query{ + name: "create a third non-default retention policy", + command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 30m`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policy should show both with custom shard", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true],["rp3","1h0m0s","30m0s",1,false]]}]}]}`, + }, + &Query{ + name: "dropping non-default custom shard retention policy succeed", + command: `DROP RETENTION POLICY rp3 ON db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ name: "show retention policy should show just default", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`, }, &Query{ name: "Ensure retention policy with unacceptable retention cannot be created", - command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1s REPLICATION 1`, + command: `CREATE RETENTION POLICY rp4 ON db0 DURATION 1s REPLICATION 1`, exp: `{"results":[{"error":"retention policy duration must be at least 1h0m0s"}]}`, once: true, }, &Query{ name: "Check error when deleting retention policy on non-existent database", command: `DROP RETENTION POLICY rp1 ON mydatabase`, - exp: `{"results":[{"error":"database not found: mydatabase"}]}`, + exp: `{"results":[{}]}`, }, &Query{ name: "Ensure retention policy for non existing db is not created", @@ -398,7 +464,7 @@ &Query{ name: "show retention policies should return auto-created policy", command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["autogen","0s","168h0m0s",1,true]]}]}]}`, }, }, } diff -Nru influxdb-0.10.0+dfsg1/cmd/influxd/run/server_test.go influxdb-1.1.1+dfsg1/cmd/influxd/run/server_test.go --- influxdb-0.10.0+dfsg1/cmd/influxd/run/server_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influxd/run/server_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,17 +1,47 @@ package run_test import ( + "flag" "fmt" "net/http" "net/url" + "os" "strconv" "strings" "testing" "time" - "github.com/influxdb/influxdb/cluster" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/models" ) +// Global server used by benchmarks +var benchServer *Server + +func TestMain(m *testing.M) { + flag.Parse() + + // Setup + c := NewConfig() + c.Retention.Enabled = false + c.Monitor.StoreEnabled = false + c.Meta.LoggingEnabled = false + c.Admin.Enabled = false + c.Subscriber.Enabled = false + c.ContinuousQuery.Enabled = false + c.Data.MaxSeriesPerDatabase = 10000000 // 10M + c.Data.MaxValuesPerTag = 1000000 // 1M + benchServer = OpenDefaultServer(c) + + // Run suite. + r := m.Run() + + // Cleanup + benchServer.Close() + + os.Exit(r) +} + // Ensure that HTTP responses include the InfluxDB version. func TestServer_HTTPResponseVersion(t *testing.T) { version := "v1234" @@ -28,7 +58,7 @@ // Ensure the database commands work. func TestServer_DatabaseCommands(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "database_commands") @@ -48,12 +78,12 @@ func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_and_recreate_database") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { @@ -80,18 +110,50 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_database_isolated") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp1", 1, 0)); err != nil { + t.Fatal(err) + } + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DeleteSeries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := tests.load(t, "delete_series") + + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } @@ -115,20 +177,18 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") - fmt.Println("1") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_and_recreate_series") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } - fmt.Println("2") for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { @@ -146,7 +206,6 @@ } } - fmt.Println("3") // Re-write data and test again. retest := tests.load(t, "drop_and_recreate_series_retest") @@ -166,19 +225,16 @@ t.Error(query.failureMessage()) } } - - fmt.Println("4") - } func TestServer_Query_DropSeriesFromRegex(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_series_from_regex") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { @@ -208,7 +264,7 @@ t.Parallel() c := NewConfig() c.Meta.RetentionAutoCreate = false - s := OpenServer(c, "") + s := OpenServer(c) defer s.Close() test := tests.load(t, "retention_policy_commands") @@ -234,7 +290,7 @@ // Ensure the autocreation of retention policy works. func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "retention_policy_auto_create") @@ -255,7 +311,7 @@ // Ensure user commands work. func TestServer_UserCommands(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() // Create a database. @@ -303,7 +359,7 @@ &Query{ name: "bad create user request", command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, + exp: `{"error":"error parsing query: found 0xBAD, expected identifier at line 1, char 13"}`, }, &Query{ name: "bad create user request, no name", @@ -346,38 +402,13 @@ } } -// Ensure the server can create a single point via json protocol and read it back. -func TestServer_Write_JSON(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - // Ensure the server can create a single point via line protocol with float type and read it back. func TestServer_Write_LineProtocol_Float(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } @@ -399,10 +430,10 @@ // Ensure the server can create a single point via line protocol with bool type and read it back. func TestServer_Write_LineProtocol_Bool(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } @@ -424,10 +455,10 @@ // Ensure the server can create a single point via line protocol with string type and read it back. func TestServer_Write_LineProtocol_String(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } @@ -449,10 +480,10 @@ // Ensure the server can create a single point via line protocol with integer type and read it back. func TestServer_Write_LineProtocol_Integer(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } @@ -475,10 +506,10 @@ // the successfully parsed points can be queried. func TestServer_Write_LineProtocol_Partial(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } @@ -507,17 +538,9 @@ // Ensure the server can query with default databases (via param) and default retention policy func TestServer_Query_DefaultDBAndRP(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano())}, @@ -533,7 +556,7 @@ &Query{ name: "default rp exists", command: `show retention policies ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,false],["rp0","1h0m0s",1,true]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["autogen","0s","168h0m0s",1,false],["rp0","0s","168h0m0s",1,true]]}]}]}`, }, &Query{ name: "default rp", @@ -568,13 +591,9 @@ // Ensure the server can have a database with multiple measurements. func TestServer_Query_Multiple_Measurements(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - // Make sure we do writes for measurements that will span across shards writes := []string{ fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), @@ -594,7 +613,7 @@ &Query{ name: "measurement in one shard but not another shouldn't panic server", command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, }, }...) @@ -618,13 +637,9 @@ // Ensure the server correctly supports data with identical tag values. func TestServer_Query_IdenticalTagValues(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - writes := []string{ fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), @@ -639,12 +654,12 @@ &Query{ name: "measurements with identical tag values - SELECT *, no GROUP BY", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, }, &Query{ name: "measurements with identical tag values - SELECT *, with GROUP BY", command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, }, &Query{ name: "measurements with identical tag values - SELECT value no GROUP BY", @@ -673,13 +688,9 @@ // Ensure the server can handle a query that involves accessing no shards. func TestServer_Query_NoShards(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -715,13 +726,9 @@ // Ensure the server can query a non-existent field func TestServer_Query_NonExistent(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -762,13 +769,9 @@ // Ensure the server can perform basic math func TestServer_Query_Math(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db", newRetentionPolicyInfo("rp", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() writes := []string{ "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), @@ -784,37 +787,37 @@ &Query{ name: "SELECT multiple of float value", command: `SELECT value * 2 from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT multiple of float value", command: `SELECT 2 * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT multiple of integer value", command: `SELECT value * 2 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT float multiple of integer value", command: `SELECT value * 2.0 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of float value", command: `SELECT value * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer value", command: `SELECT value * value from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer, float value", command: `SELECT value * value,float from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","","float"],"values":[["%s",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value","float"],"values":[["%s",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer value with alias", @@ -824,17 +827,17 @@ &Query{ name: "SELECT sum of aggregates", command: `SELECT max(value) + min(value) from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["1970-01-01T00:00:00Z",84]]}]}]}`), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","max_min"],"values":[["1970-01-01T00:00:00Z",84]]}]}]}`), }, &Query{ name: "SELECT square of enclosed integer value", command: `SELECT ((value) * (value)) from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of enclosed integer value", command: `SELECT (value * value) from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, }...) @@ -858,13 +861,9 @@ // Ensure the server can query with the count aggregate function func TestServer_Query_Count(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -893,7 +892,7 @@ &Query{ name: "selecting count(value) with filter that excludes all results should return 0", command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE value=100 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",0]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), + exp: `{"results":[{}]}`, }, &Query{ name: "selecting count(value1) with matching filter against value2 should return correct result", @@ -903,11 +902,16 @@ &Query{ name: "selecting count(value1) with non-matching filter against value2 should return correct result", command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=3 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"ram","columns":["time","count"],"values":[["%s",0]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), + exp: `{"results":[{}]}`, }, &Query{ - name: "selecting count(*) should error", + name: "selecting count(*) should expand the wildcard", command: `SELECT count(*) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count_value"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "selecting count(2) should error", + command: `SELECT count(2) FROM db0.rp0.cpu`, exp: `{"error":"error parsing query: expected field argument in count()"}`, }, }...) @@ -929,15 +933,52 @@ } } -// Ensure the server can query with Now(). -func TestServer_Query_Now(t *testing.T) { +// Ensure the server can limit concurrent series. +func TestServer_Query_MaxSelectSeriesN(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + config := NewConfig() + config.Coordinator.MaxSelectSeriesN = 3 + s := OpenServer(config) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: `cpu,host=server01 value=1.0 0`}, + &Write{data: `cpu,host=server02 value=1.0 0`}, + &Write{data: `cpu,host=server03 value=1.0 0`}, + &Write{data: `cpu,host=server04 value=1.0 0`}, + } + + test.addQueries([]*Query{ + &Query{ + name: "exceeed max series", + command: `SELECT COUNT(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"error":"max-select-series limit exceeded: (4/3)"}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } } +} + +// Ensure the server can query with Now(). +func TestServer_Query_Now(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() now := now() @@ -989,13 +1030,9 @@ // Ensure the server can query with epoch precisions. func TestServer_Query_EpochPrecision(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -1062,13 +1099,9 @@ // Ensure the server works with tag queries. func TestServer_Query_Tags(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() writes := []string{ @@ -1099,6 +1132,7 @@ name: "tag without field should return error", command: `SELECT host FROM db0.rp0.cpu`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): tags should stream as values }, &Query{ name: "field with tag should succeed", @@ -1108,7 +1142,7 @@ &Query{ name: "field with tag and GROUP BY should succeed", command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["%s","server01",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value"],"values":[["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "field with two tags should succeed", @@ -1118,7 +1152,7 @@ &Query{ name: "field with two tags and GROUP BY should succeed", command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value","core"],"values":[["%s",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value","core"],"values":[["%s",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value","core"],"values":[["%s","server01",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value","core"],"values":[["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "select * with tags should succeed", @@ -1242,13 +1276,9 @@ // Ensure the server correctly queries with an alias. func TestServer_Query_Alias(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - writes := []string{ fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), @@ -1272,12 +1302,12 @@ &Query{ name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, }, &Query{ name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, }, &Query{ name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", @@ -1321,13 +1351,9 @@ // Ensure the server will succeed and error for common scenarios. func TestServer_Query_Common(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -1398,13 +1424,9 @@ // Ensure the server can query two points. func TestServer_Query_SelectTwoPoints(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -1446,13 +1468,9 @@ // Ensure the server can query two negative points. func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() test := NewTest("db0", "rp0") @@ -1487,13 +1505,9 @@ // Ensure the server can query with relative time. func TestServer_Query_SelectRelativeTime(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - now := now() yesterday := yesterday() @@ -1536,13 +1550,9 @@ // Ensure the server can handle various simple derivative queries. func TestServer_Query_SelectRawDerivative(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000")}, @@ -1582,13 +1592,9 @@ // Ensure the server can handle various simple non_negative_derivative queries. func TestServer_Query_SelectRawNonNegativeDerivative(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010021000000000 @@ -1632,13 +1638,9 @@ // Ensure the server can handle various group by time derivative queries. func TestServer_Query_SelectGroupByTimeDerivative(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 @@ -1652,12 +1654,12 @@ &Query{ name: "calculate derivative of count with unit default (2s) group by time", command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit 4s group by time", command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit default (2s) group by time", @@ -1680,6 +1682,17 @@ exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ + name: "calculate derivative of mode with unit default (2s) group by time", + command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mode with unit 4s group by time", + command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + + &Query{ name: "calculate derivative of sum with unit default (2s) group by time", command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, @@ -1762,13 +1775,9 @@ // Ensure the server can handle various group by time derivative queries. func TestServer_Query_SelectGroupByTimeDerivativeWithFill(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 @@ -1780,12 +1789,12 @@ &Query{ name: "calculate derivative of count with unit default (2s) group by time with fill 0", command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-2]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit 4s group by time with fill 0", command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-4]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",-4]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit default (2s) group by time with fill previous", @@ -1798,19 +1807,14 @@ exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ - name: "calculate derivative of count of distinct with unit default (4s) group by time with fill previous", - command: `SELECT derivative(count(distinct(value))) from db0.rp0.position where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:07' group by time(4s) fill(previous)`, - exp: `{"results":[{"error":"aggregate call didn't contain a field derivative(count(distinct(value)))"}]}`, - }, - &Query{ name: "calculate derivative of mean with unit default (2s) group by time with fill 0", command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-15]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit 4s group by time with fill 0", command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit default (2s) group by time with fill previous", @@ -1825,12 +1829,12 @@ &Query{ name: "calculate derivative of median with unit default (2s) group by time with fill 0", command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-15]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit 4s group by time with fill 0", command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit default (2s) group by time with fill previous", @@ -1843,14 +1847,34 @@ exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ + name: "calculate derivative of mode with unit default (2s) group by time with fill 0", + command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mode with unit 4s group by time with fill 0", + command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mode with unit default (2s) group by time with fill previous", + command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mode with unit 4s group by time with fill previous", + command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ name: "calculate derivative of sum with unit default (2s) group by time with fill 0", command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit 4s group by time with fill 0", command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-60]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:02Z",-60]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit default (2s) group by time with fill previous", @@ -1865,12 +1889,12 @@ &Query{ name: "calculate derivative of first with unit default (2s) group by time with fill 0", command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit 4s group by time with fill 0", command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit default (2s) group by time with fill previous", @@ -1885,12 +1909,12 @@ &Query{ name: "calculate derivative of last with unit default (2s) group by time with fill 0", command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit 4s group by time with fill 0", command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-40]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit default (2s) group by time with fill previous", @@ -1905,12 +1929,12 @@ &Query{ name: "calculate derivative of min with unit default (2s) group by time with fill 0", command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit 4s group by time with fill 0", command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit default (2s) group by time with fill previous", @@ -1925,12 +1949,12 @@ &Query{ name: "calculate derivative of max with unit default (2s) group by time with fill 0", command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit 4s group by time with fill 0", command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-40]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit default (2s) group by time with fill previous", @@ -1945,12 +1969,12 @@ &Query{ name: "calculate derivative of percentile with unit default (2s) group by time with fill 0", command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit 4s group by time with fill 0", command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit default (2s) group by time with fill previous", @@ -1982,47 +2006,71 @@ } } -// mergeMany ensures that when merging many series together and some of them have a different number -// of points than others in a group by interval the results are correct -func TestServer_Query_MergeMany(t *testing.T) { +// Ensure the server can handle various group by time difference queries. +func TestServer_Query_SelectGroupByTimeDifference(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 11; i++ { - for j := 1; j < 5+i%3; j++ { - data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) - writes = append(writes, data) - } - } test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=15 1278010021000000000 +cpu value=20 1278010022000000000 +cpu value=25 1278010023000000000 +`)}, } test.addQueries([]*Query{ &Query{ - name: "GROUP by time", - command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, + name: "calculate difference of count", + command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ - skip: true, - name: "GROUP by tag - FIXME issue #2875", - command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + name: "calculate difference of mean", + command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ - name: "GROUP by field", - command: `SELECT count(value) FROM db0.rp0.cpu group by value`, - exp: `{"results":[{"error":"can not use field in GROUP BY clause: value"}]}`, + name: "calculate difference of median", + command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of mode", + command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of sum", + command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate difference of first", + command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of last", + command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of min", + command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of max", + command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate difference of percentile", + command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, }...) @@ -2044,58 +2092,782 @@ } } -func TestServer_Query_SLimitAndSOffset(t *testing.T) { +// Ensure the server can handle various group by time difference queries with fill. +func TestServer_Query_SelectGroupByTimeDifferenceWithFill(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 10; i++ { - data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) - writes = append(writes, data) - } test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=20 1278010021000000000 +`)}, } test.addQueries([]*Query{ &Query{ - name: "SLIMIT 2 SOFFSET 1", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + name: "calculate difference of count with fill 0", + command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, }, &Query{ - name: "SLIMIT 2 SOFFSET 3", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + name: "calculate difference of count with fill previous", + command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ - name: "SLIMIT 3 SOFFSET 8", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + name: "calculate difference of mean with fill 0", + command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { + &Query{ + name: "calculate difference of mean with fill previous", + command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of median with fill 0", + command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, + }, + &Query{ + name: "calculate difference of median with fill previous", + command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of mode with fill 0", + command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate difference of mode with fill previous", + command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of sum with fill 0", + command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, + }, + &Query{ + name: "calculate difference of sum with fill previous", + command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of first with fill 0", + command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate difference of first with fill previous", + command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of last with fill 0", + command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate difference of last with fill previous", + command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of min with fill 0", + command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate difference of min with fill previous", + command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of max with fill 0", + command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate difference of max with fill previous", + command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate difference of percentile with fill 0", + command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate difference of percentile with fill previous", + command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time moving average queries. +func TestServer_Query_SelectGroupByTimeMovingAverage(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=15 1278010021000000000 +cpu value=20 1278010022000000000 +cpu value=25 1278010023000000000 +cpu value=30 1278010024000000000 +cpu value=35 1278010025000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate moving average of count", + command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mean", + command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of median", + command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mode", + command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of sum", + command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",35],["2010-07-01T18:47:04Z",55]]}]}]}`, + }, + &Query{ + name: "calculate moving average of first", + command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of last", + command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, + }, + &Query{ + name: "calculate moving average of min", + command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of max", + command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, + }, + &Query{ + name: "calculate moving average of percentile", + command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time moving average queries. +func TestServer_Query_SelectGroupByTimeMovingAverageWithFill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=15 1278010021000000000 +cpu value=30 1278010024000000000 +cpu value=35 1278010025000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate moving average of count with fill 0", + command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",1],["2010-07-01T18:47:04Z",1]]}]}]}`, + }, + &Query{ + name: "calculate moving average of count with fill previous", + command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mean with fill 0", + command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mean with fill previous", + command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of median with fill 0", + command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of median with fill previous", + command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mode with fill 0", + command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, + }, + &Query{ + name: "calculate moving average of mode with fill previous", + command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, + }, + &Query{ + name: "calculate moving average of sum with fill 0", + command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",32.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of sum with fill previous", + command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",25],["2010-07-01T18:47:04Z",45]]}]}]}`, + }, + &Query{ + name: "calculate moving average of first with fill 0", + command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, + }, + &Query{ + name: "calculate moving average of first with fill previous", + command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, + }, + &Query{ + name: "calculate moving average of last with fill 0", + command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of last with fill previous", + command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of min with fill 0", + command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, + }, + &Query{ + name: "calculate moving average of min with fill previous", + command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, + }, + &Query{ + name: "calculate moving average of max with fill 0", + command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, + }, + &Query{ + name: "calculate moving average of max with fill previous", + command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, + }, + &Query{ + name: "calculate moving average of percentile with fill 0", + command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, + }, + &Query{ + name: "calculate moving average of percentile with fill previous", + command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time cumulative sum queries. +func TestServer_Query_SelectGroupByTimeCumulativeSum(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=15 1278010021000000000 +cpu value=20 1278010022000000000 +cpu value=25 1278010023000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate cumulative sum of count", + command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",4]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mean", + command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",35]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of median", + command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",35]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mode", + command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of sum", + command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",25],["2010-07-01T18:47:02Z",70]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of first", + command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of last", + command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",40]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of min", + command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of max", + command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",40]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of percentile", + command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time cumulative sum queries with fill. +func TestServer_Query_SelectGroupByTimeCumulativeSumWithFill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=20 1278010021000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate cumulative sum of count with fill 0", + command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",2]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of count with fill previous", + command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",4]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mean with fill 0", + command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",15]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mean with fill previous", + command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of median with fill 0", + command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",15]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of median with fill previous", + command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mode with fill 0", + command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of mode with fill previous", + command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of sum with fill 0", + command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",30]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of sum with fill previous", + command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",60]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of first with fill 0", + command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of first with fill previous", + command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of last with fill 0", + command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of last with fill previous", + command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",40]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of min with fill 0", + command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of min with fill previous", + command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of max with fill 0", + command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of max with fill previous", + command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",40]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of percentile with fill 0", + command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate cumulative sum of percentile with fill previous", + command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_SelectGroupByTime_MultipleAggregates(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`test,t=a x=1i 1000000000 +test,t=b y=1i 1000000000 +test,t=a x=2i 2000000000 +test,t=b y=2i 2000000000 +test,t=a x=3i 3000000000 +test,t=b y=3i 3000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "two aggregates with a group by host", + command: `SELECT mean(x) as x, mean(y) as y from db0.rp0.test where time >= 1s and time < 4s group by t, time(1s)`, + exp: `{"results":[{"series":[{"name":"test","tags":{"t":"a"},"columns":["time","x","y"],"values":[["1970-01-01T00:00:01Z",1,null],["1970-01-01T00:00:02Z",2,null],["1970-01-01T00:00:03Z",3,null]]},{"name":"test","tags":{"t":"b"},"columns":["time","x","y"],"values":[["1970-01-01T00:00:01Z",null,1],["1970-01-01T00:00:02Z",null,2],["1970-01-01T00:00:03Z",null,3]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_MathWithFill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=15 1278010020000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "multiplication with fill previous", + command: `SELECT 4*mean(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:30Z",60],["2010-07-01T18:48:00Z",60]]}]}]}`, + }, + &Query{ + name: "multiplication of mode value with fill previous", + command: `SELECT 4*mode(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mode"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:30Z",60],["2010-07-01T18:48:00Z",60]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// mergeMany ensures that when merging many series together and some of them have a different number +// of points than others in a group by interval the results are correct +func TestServer_Query_MergeMany(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 11; i++ { + for j := 1; j < 5+i%3; j++ { + data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) + writes = append(writes, data) + } + } + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "GROUP by time", + command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, + }, + &Query{ + skip: true, + name: "GROUP by tag - FIXME issue #2875", + command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "GROUP by field", + command: `SELECT count(value) FROM db0.rp0.cpu group by value`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"value":""},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_SLimitAndSOffset(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 10; i++ { + data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) + writes = append(writes, data) + } + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "SLIMIT 2 SOFFSET 1", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 2 SOFFSET 3", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 3 SOFFSET 8", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { t.Error(query.failureMessage()) } } @@ -2103,10 +2875,10 @@ func TestServer_Query_Regex(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -2175,7 +2947,7 @@ func TestServer_Query_Aggregates_Int(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2215,7 +2987,7 @@ func TestServer_Query_Aggregates_IntMax(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2255,7 +3027,7 @@ func TestServer_Query_Aggregates_IntMany(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2316,28 +3088,42 @@ exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, }, &Query{ + name: "mode - single - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MODE(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "mode - multiple - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MODE(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ name: "distinct as call - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct alt syntax - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT value FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(host) FROM intmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values }, &Query{ name: "distinct alt select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT host FROM intmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values }, &Query{ name: "count distinct - int", @@ -2356,12 +3142,14 @@ params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values }, &Query{ name: "count distinct as call select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values }, }...) @@ -2385,7 +3173,7 @@ func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2419,7 +3207,7 @@ name: "max order by time with time specified group by 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, &Query{ name: "min order by time without time specified group by 15s", @@ -2431,7 +3219,7 @@ name: "min order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "first order by time without time specified group by 15s", @@ -2443,7 +3231,7 @@ name: "first order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "last order by time without time specified group by 15s", @@ -2455,7 +3243,7 @@ name: "last order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, }...) @@ -2479,7 +3267,7 @@ func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2525,7 +3313,7 @@ func TestServer_Query_Aggregates_IntOverlap(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2539,19 +3327,19 @@ } test.addQueries([]*Query{ - &Query{ - name: "aggregation with no interval - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "sum - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - &Query{ + /* &Query{ + name: "aggregation with no interval - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, + exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "sum - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:10Z",30]]}]}]}`, + }, + */&Query{ name: "aggregation with a null field value - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, @@ -2592,7 +3380,7 @@ func TestServer_Query_Aggregates_FloatSingle(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2631,7 +3419,7 @@ func TestServer_Query_Aggregates_FloatMany(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2686,28 +3474,42 @@ exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, }, &Query{ + name: "mode - single - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MODE(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "mode - multiple - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MODE(value) FROM floatmany where time < '2000-01-01T00:00:10Z'`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ name: "distinct as call - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct alt syntax - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT value FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(host) FROM floatmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): show be allowed, stream tag values }, &Query{ name: "distinct alt select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT host FROM floatmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): show be allowed, stream tag values }, &Query{ name: "count distinct - float", @@ -2726,12 +3528,14 @@ params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values }, &Query{ name: "count distinct as call select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values }, }...) @@ -2755,7 +3559,7 @@ func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2819,9 +3623,68 @@ } } +func TestServer_Query_Aggregates_GroupByOffset(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`offset,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`offset,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`offset,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "group by offset - standard", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 5s) FILL(0)`, + exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, + }, + &Query{ + name: "group by offset - misaligned time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM "offset" WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:20Z' GROUP BY time(10s, 5s) FILL(0)`, + exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30],["2000-01-01T00:00:15Z",0]]}]}]}`, + }, + &Query{ + name: "group by offset - negative time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, -5s) FILL(0)`, + exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, + }, + &Query{ + name: "group by offset - modulo", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 35s) FILL(0)`, + exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + func TestServer_Query_Aggregates_Load(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2844,13 +3707,13 @@ name: "group by multiple dimensions", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value)*2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, + exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, }, &Query{ name: "group by multiple dimensions", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value)/2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, + exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, }, }...) @@ -2874,7 +3737,7 @@ func TestServer_Query_Aggregates_CPU(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2914,7 +3777,7 @@ func TestServer_Query_Aggregates_String(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") @@ -2932,36 +3795,42 @@ params: url.Values{"db": []string{"db0"}}, command: `SELECT STDDEV(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "MEAN on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEAN(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "MEDIAN on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "COUNT on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "FIRST on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT FIRST(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["2000-01-01T00:00:03Z","first"]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "LAST on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT LAST(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, }...) @@ -2985,10 +3854,10 @@ func TestServer_Query_AggregateSelectors(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3017,7 +3886,7 @@ name: "baseline", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM network`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","core","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"server01","west",10,20],["2000-01-01T00:00:10Z",3,"server02","west",40,50],["2000-01-01T00:00:20Z",4,"server03","east",40,55],["2000-01-01T00:00:30Z",1,"server04","east",40,60],["2000-01-01T00:00:40Z",2,"server05","west",50,70],["2000-01-01T00:00:50Z",3,"server06","east",50,40],["2000-01-01T00:01:00Z",4,"server07","west",70,30],["2000-01-01T00:01:10Z",1,"server08","east",90,10],["2000-01-01T00:01:20Z",2,"server09","east",5,4]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","core","core_1","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"1","server01","west",10,20],["2000-01-01T00:00:10Z",3,"2","server02","west",40,50],["2000-01-01T00:00:20Z",4,"3","server03","east",40,55],["2000-01-01T00:00:30Z",1,"4","server04","east",40,60],["2000-01-01T00:00:40Z",2,"1","server05","west",50,70],["2000-01-01T00:00:50Z",3,"2","server06","east",50,40],["2000-01-01T00:01:00Z",4,"3","server07","west",70,30],["2000-01-01T00:01:10Z",1,"4","server08","east",90,10],["2000-01-01T00:01:20Z",2,"1","server09","east",5,4]]}]}]}`, }, &Query{ name: "max - baseline 30s", @@ -3046,13 +3915,13 @@ name: "max - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",40],["2000-01-01T00:00:40Z",50],["2000-01-01T00:01:10Z",90]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, }, &Query{ name: "max - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:10Z",50,40],["2000-01-01T00:00:40Z",70,50],["2000-01-01T00:01:10Z",10,90]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, }, &Query{ name: "min - baseline 30s", @@ -3070,13 +3939,13 @@ name: "min - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:20Z",5]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "min - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:20Z",4,5]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "max,min - baseline 30s", @@ -3124,13 +3993,13 @@ name: "last - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:20Z",40],["2000-01-01T00:00:50Z",50],["2000-01-01T00:01:20Z",5]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "last - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:20Z",55,40],["2000-01-01T00:00:50Z",40,50],["2000-01-01T00:01:20Z",4,5]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "count - baseline 30s", @@ -3154,7 +4023,7 @@ name: "distinct - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",[10,40]],["2000-01-01T00:00:30Z",[40,50]],["2000-01-01T00:01:00Z",[5,70,90]]]}]}]}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70],["2000-01-01T00:01:00Z",90],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "distinct - time", @@ -3205,6 +4074,42 @@ exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ + name: "mode - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","mode"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "mode - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "mode - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "mode - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","mode"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "mode - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "mode - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ name: "spread - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, @@ -3250,13 +4155,13 @@ name: "percentile - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "percentile - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","percentile"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",30,70]]}]}]}`, }, }...) @@ -3281,10 +4186,10 @@ func TestServer_Query_TopInt(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3363,53 +4268,18 @@ exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, }, &Query{ - name: "top - cpu - time specified - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - time specified - hourly - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: fmt.Sprintf( - `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[[%d,4],[%d,7],[%d,9]]}]}]}`, - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()/int64(time.Millisecond), - ), - }, - &Query{ - name: "top - cpu - time specified (not first) - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ name: "top - cpu - 2 values hourly", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, }, &Query{ - name: "top - cpu - time specified - 2 values hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, }, &Query{ - name: "top - cpu - time specified - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ name: "top - memory - 2 values, two tags", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 2), host, service FROM memory`, @@ -3483,10 +4353,10 @@ // Test various aggregates when different series only have data for the same timestamp. func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3551,10 +4421,10 @@ // but will only put the values in the bucket that match the time range func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3633,10 +4503,10 @@ func TestServer_Write_Precision(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3750,10 +4620,10 @@ func TestServer_Query_Wildcards(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3807,13 +4677,13 @@ name: "wildcard and field in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, }, &Query{ name: "field and wildcard in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, }, &Query{ name: "field and wildcard in group by", @@ -3868,10 +4738,10 @@ func TestServer_Query_WildcardExpansion(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -3915,23 +4785,16 @@ }, &Query{ - name: "only tags, no fields", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, region FROM wildcard`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - - &Query{ name: "no wildcard with alias", params: url.Values{"db": []string{"db0"}}, command: `SELECT cpu as c, host as h, region, value FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, }, &Query{ - name: "duplicate tag and field key, always favor field over tag", + name: "duplicate tag and field key", command: `SELECT * FROM dupnames`, params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`, + exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","day_1","region","value"],"values":[["2000-01-01T00:00:00Z",3,"1","us-east",10],["2000-01-01T00:00:10Z",2,"2","us-east",20],["2000-01-01T00:00:20Z",1,"3","us-west",30]]}]}]}`, }, }...) @@ -3955,10 +4818,10 @@ func TestServer_Query_AcrossShardsAndFields(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4029,10 +4892,10 @@ func TestServer_Query_Where_Fields(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4251,10 +5114,10 @@ func TestServer_Query_Where_With_Tags(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4282,10 +5145,137 @@ exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, }, &Query{ + name: "tag or field", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where tennant = 'paul' OR foo = 'bar'`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, + }, + &Query{ + name: "non-existant tag and field", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where tenant != 'paul' AND foo = 'bar'`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, + }, + &Query{ + name: "non-existant tag or field", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where tenant != 'paul' OR foo = 'bar'`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, + }, + &Query{ name: "where on tag that should be double quoted but isn't", params: url.Values{"db": []string{"db0"}}, command: `show series where data-center = 'foo'`, - exp: `{"error":"error parsing query: found DATA, expected identifier, string, number, bool at line 1, char 19"}`, + exp: `{"results":[{"error":"invalid tag comparison operator"}]}`, + }, + &Query{ + name: "where comparing tag and field", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where tennant != foo`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, + }, + &Query{ + name: "where comparing tag and tag", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where tennant = tennant`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_With_EmptyTags(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01 value=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "where empty tag", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host = ''`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, + }, + &Query{ + name: "where not empty tag", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host != ''`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, + }, + &Query{ + name: "where regex none", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host !~ /.*/`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "where regex exact", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host =~ /^server01$/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, + }, + &Query{ + name: "where regex exact (not)", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host !~ /^server01$/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, + }, + &Query{ + name: "where regex at least one char", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host =~ /.+/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, + }, + &Query{ + name: "where regex not at least one char", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu where host !~ /.+/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, + }, + &Query{ + name: "group by empty tag", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu group by host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]},{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, + }, + &Query{ + name: "group by missing tag", + params: url.Values{"db": []string{"db0"}}, + command: `select value from cpu group by region`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"region":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1],["2009-11-10T23:00:03Z",2]]}]}]}`, }, }...) @@ -4309,10 +5299,10 @@ func TestServer_Query_LimitAndOffset(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4359,7 +5349,7 @@ &Query{ name: "limit - offset higher than number of points", command: `select foo from "limited" LIMIT 2 OFFSET 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"]}]}]}`, + exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -4381,7 +5371,7 @@ params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: "limit + offset equal to the number of points with group by time", + name: "limit + offset equal to the number of points with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, params: url.Values{"db": []string{"db0"}}, @@ -4393,15 +5383,15 @@ params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: "limit higher than the number of data points should error", - command: `select mean(foo) from "limited" where time > '2000-01-01T00:00:00Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"error":"too many points in the group by interval. maybe you forgot to specify a where time clause?"}]}`, + name: "limit - group by tennant", + command: `select foo from "limited" group by tennant limit 1`, + exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: "limit1 higher than MaxGroupBy but the number of data points is less than MaxGroupBy", - command: `select mean(foo) from "limited" where time >= '2009-11-10T23:00:02Z' and time < '2009-11-10T23:00:03Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",0]]}]}]}`, + name: "limit and offset - group by tennant", + command: `select foo from "limited" group by tennant limit 1 offset 1`, + exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) @@ -4426,10 +5416,10 @@ func TestServer_Query_Fill(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4464,7 +5454,7 @@ &Query{ name: "fill with value, WHERE no values match condition", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",1],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, + exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -4525,10 +5515,10 @@ func TestServer_Query_Chunk(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4577,16 +5567,16 @@ func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil { @@ -4606,6 +5596,12 @@ test.addQueries([]*Query{ &Query{ + name: "verify cpu measurement exists in db1", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db1"}}, + }, + &Query{ name: "Drop Measurement, series tags preserved tests", command: `SHOW MEASUREMENTS`, exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, @@ -4614,7 +5610,7 @@ &Query{ name: "show series", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]},{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"],["memory,host=serverB,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -4638,7 +5634,7 @@ &Query{ name: "verify series", command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["memory,host=serverB,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -4697,22 +5693,94 @@ } } - test = NewTest("db0", "rp0") + test = NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: writes}, + } + + test.addQueries([]*Query{ + &Query{ + name: "verify measurements after recreation", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement has been re-inserted", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowQueries_Future(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, models.MaxNanoTime), + } + + test := NewTest("db0", "rp0") test.writes = Writes{ - &Write{data: writes}, + &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ - name: "verify measurements after recreation", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + name: `show measurements`, + command: "SHOW MEASUREMENTS", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: "verify cpu measurement has been re-inserted", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + name: `show series`, + command: "SHOW SERIES", + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag keys`, + command: "SHOW TAG KEYS FROM cpu", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values`, + command: "SHOW TAG VALUES WITH KEY = \"host\"", + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show field keys`, + command: "SHOW FIELD KEYS", + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["value","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) @@ -4737,10 +5805,10 @@ func TestServer_Query_ShowSeries(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4766,43 +5834,43 @@ &Query{ name: `show series`, command: "SHOW SERIES", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["disk,host=server03,region=caeast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series from measurement`, command: "SHOW SERIES FROM cpu", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series from regular expression`, command: "SHOW SERIES FROM /[cg]pu/", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with where tag`, command: "SHOW SERIES WHERE region = 'uswest'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=uswest","server01","uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series where tag matches regular expression`, command: "SHOW SERIES WHERE region =~ /ca.*/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series`, command: "SHOW SERIES WHERE host !~ /server0[12]/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with from and where`, command: "SHOW SERIES FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01,region=useast"],["cpu,host=server02,region=useast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -4814,7 +5882,7 @@ &Query{ name: `show series with WHERE field should fail`, command: "SHOW SERIES WHERE value > 10.0", - exp: `{"results":[{"error":"SHOW SERIES doesn't support fields in WHERE clause"}]}`, + exp: `{"results":[{"error":"invalid tag comparison operator"}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) @@ -4837,12 +5905,56 @@ } } +func TestServer_Query_ShowStats(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + if err := s.MetaClient.CreateSubscription("db0", "rp0", "foo", "ALL", []string{"udp://localhost:9000"}); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.addQueries([]*Query{ + &Query{ + name: `show shots`, + command: "SHOW STATS", + exp: "subscriber", // Should see a subscriber stat in the json + pattern: true, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + func TestServer_Query_ShowMeasurements(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4929,10 +6041,10 @@ func TestServer_Query_ShowTagKeys(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -4988,37 +6100,67 @@ &Query{ name: "show tag values with key", command: "SHOW TAG VALUES WITH KEY = host", - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag values with key regex", + command: "SHOW TAG VALUES WITH KEY =~ /ho/", + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where`, command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: `show tag values with key and where matches regular expression`, + name: `show tag values with key regex and where`, + command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /ho/ WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where matches the regular expression`, command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server03"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: `show tag values with key and where does not matche regular expression`, + name: `show tag values with key and where does not match the regular expression`, command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"series":[{"name":"regionTagValues","columns":["region"],"values":[["caeast"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["region","caeast"]]},{"name":"gpu","columns":["key","value"],"values":[["region","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where partially matches the regular expression`, + command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /us/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where partially does not match the regular expression`, + command: `SHOW TAG VALUES WITH KEY = host WHERE region !~ /us/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ - name: `show tag values with key in and where does not matche regular expression`, + name: `show tag values with key in and where does not match the regular expression`, command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]},{"name":"regionTagValues","columns":["region"],"values":[["uswest"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key regex and where does not match the regular expression`, + command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and measurement matches regular expression`, command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ @@ -5049,10 +6191,10 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5078,19 +6220,19 @@ &Query{ name: `show field keys`, command: `SHOW FIELD KEYS`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"disk","columns":["fieldKey"],"values":[["field8"],["field9"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"disk","columns":["fieldKey","fieldType"],"values":[["field8","float"],["field9","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show field keys from measurement`, command: `SHOW FIELD KEYS FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show field keys measurement with regex`, command: `SHOW FIELD KEYS FROM /[cg]pu/`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) @@ -5114,12 +6256,11 @@ } func TestServer_ContinuousQuery(t *testing.T) { - t.Skip() t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5197,32 +6338,24 @@ &Query{ name: `show continuous queries`, command: `SHOW CONTINUOUS QUERIES`, - exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp1\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp2\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, + exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO db0.rp1.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO db0.rp2.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, }, }...) // Run first test to create CQs. runTest(&test, t) - // Trigger CQs to run. - u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano()) - if _, err := s.HTTPPost(u, nil); err != nil { - t.Fatal(err) - } - - // Wait for CQs to run. TODO: fix this ugly hack - time.Sleep(time.Second * 5) - // Setup tests to check the CQ results. test2 := NewTest("db0", "rp1") test2.addQueries([]*Query{ &Query{ + skip: true, name: "check results of cq1", command: `SELECT * FROM "rp1"./[cg]pu/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, - // TODO: restore this test once this is fixed: https://github.com/influxdb/influxdb/issues/3968 + // TODO: restore this test once this is fixed: https://github.com/influxdata/influxdb/issues/3968 &Query{ skip: true, name: "check results of cq2", @@ -5245,7 +6378,7 @@ t.Skip("skipping CQ deadlock test") } t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer func() { s.Close() // Nil the server so our deadlock detector goroutine can determine if we completed writes @@ -5253,7 +6386,7 @@ s.Server = nil }() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5323,10 +6456,10 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5367,10 +6500,10 @@ func TestServer_Query_OrderByTime(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5429,10 +6562,10 @@ func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5483,10 +6616,10 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5537,10 +6670,10 @@ func TestServer_Query_IntoTarget(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5592,11 +6725,170 @@ } } +// Ensure that binary operators of aggregates of separate fields, when a field is sometimes missing and sometimes present, +// result in values that are still properly time-aligned. +func TestServer_Query_IntoTarget_Sparse(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + // All points have fields n and a. Field b is not present in all intervals. + // First 10s interval is missing field b. Result a_n should be (2+5)*(3+7) = 70, b_n is null. + fmt.Sprintf(`foo a=2,n=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`foo a=5,n=7 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + // Second 10s interval has field b. Result a_n = 11*17 = 187, b_n = 13*17 = 221. + fmt.Sprintf(`foo a=11,b=13,n=17 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:11Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "into", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(a) * sum(n) as a_n, sum(b) * sum(n) as b_n INTO baz FROM foo WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY time(10s)`, + exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "confirm results", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM baz`, + exp: `{"results":[{"series":[{"name":"baz","columns":["time","a_n","b_n"],"values":[["2000-01-01T00:00:00Z",70,null],["2000-01-01T00:00:10Z",187,221]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// This test ensures that data is not duplicated with measurements +// of the same name. +func TestServer_Query_DuplicateMeasurements(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + // Create a second database. + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + } + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + test = NewTest("db1", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano())}, + } + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + test.addQueries([]*Query{ + &Query{ + name: "select from both databases", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value FROM db0.rp0.cpu, db1.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:00:10Z",2]]}]}]}`, + }, + }...) + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_LargeTimestamp(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + writes := []string{ + fmt.Sprintf(`cpu value=100 %d`, models.MaxNanoTime), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + test.addQueries([]*Query{ + &Query{ + name: `select value at max nano time`, + params: url.Values{"db": []string{"db0"}}, + command: fmt.Sprintf(`SELECT value FROM cpu WHERE time <= %d`, models.MaxNanoTime), + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["` + time.Unix(0, models.MaxNanoTime).UTC().Format(time.RFC3339Nano) + `",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + // Open a new server with the same configuration file. + // This is to ensure the meta data was marshaled correctly. + s2 := OpenServer(s.Config) + defer s2.Close() + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + // This test reproduced a data race with closing the // Subscriber points channel while writes were in-flight in the PointsWriter. func TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) { t.Parallel() - s := OpenDefaultServer(NewConfig(), "") + s := OpenDefaultServer(NewConfig()) defer s.Close() // goroutine to write points @@ -5607,11 +6899,11 @@ case <-done: return default: - wpr := &cluster.WritePointsRequest{ + wpr := &coordinator.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp0", } - s.PointsWriter.WritePoints(wpr) + s.PointsWriter.WritePoints(wpr.Database, wpr.RetentionPolicy, models.ConsistencyLevelAny, wpr.Points) } } }() @@ -5625,10 +6917,10 @@ // Ensure time in where clause is inclusive func TestServer_WhereTimeInclusive(t *testing.T) { t.Parallel() - s := OpenServer(NewConfig(), "") + s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { @@ -5715,6 +7007,64 @@ }, }...) + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ImplicitEndTime(t *testing.T) { + t.Skip("flaky test") + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + now := time.Now().UTC().Truncate(time.Second) + past := now.Add(-10 * time.Second) + future := now.Add(10 * time.Minute) + writes := []string{ + fmt.Sprintf(`cpu value=1 %d`, past.UnixNano()), + fmt.Sprintf(`cpu value=2 %d`, future.UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "raw query", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1],["%s",2]]}]}]}`, past.Format(time.RFC3339Nano), future.Format(time.RFC3339Nano)), + }, + &Query{ + name: "aggregate query", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(value) FROM cpu WHERE time > now() - 1m GROUP BY time(1m) FILL(none)`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","mean"],"values":[["%s",1]]}]}]}`, now.Truncate(time.Minute).Format(time.RFC3339Nano)), + }, + }...) + if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/dumptsm/dumptsm.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/dumptsm/dumptsm.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/dumptsm/dumptsm.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/dumptsm/dumptsm.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,334 @@ +package dumptsm + +import ( + "encoding/binary" + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd dumptsm". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dumpIndex bool + dumpBlocks bool + dumpAll bool + filterKey string + path string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("file", flag.ExitOnError) + fs.BoolVar(&cmd.dumpIndex, "index", false, "Dump raw index data") + fs.BoolVar(&cmd.dumpBlocks, "blocks", false, "Dump raw block data") + fs.BoolVar(&cmd.dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") + fs.StringVar(&cmd.filterKey, "filter-key", "", "Only display index and block data match this key substring") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + if fs.Arg(0) == "" { + fmt.Printf("TSM file not specified\n\n") + fs.Usage() + return nil + } + cmd.path = fs.Args()[0] + cmd.dumpBlocks = cmd.dumpBlocks || cmd.dumpAll || cmd.filterKey != "" + cmd.dumpIndex = cmd.dumpIndex || cmd.dumpAll || cmd.filterKey != "" + return cmd.dump() +} + +func (cmd *Command) dump() error { + var errors []error + + f, err := os.Open(cmd.path) + if err != nil { + return err + } + + // Get the file size + stat, err := f.Stat() + if err != nil { + return err + } + b := make([]byte, 8) + + r, err := tsm1.NewTSMReader(f) + if err != nil { + return fmt.Errorf("Error opening TSM files: %s", err.Error()) + } + defer r.Close() + + minTime, maxTime := r.TimeRange() + keyCount := r.KeyCount() + + blockStats := &blockStats{} + + println("Summary:") + fmt.Printf(" File: %s\n", cmd.path) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) + fmt.Printf(" Series: %d ", keyCount) + fmt.Printf(" File Size: %d\n", stat.Size()) + println() + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + + if cmd.dumpIndex { + println("Index:") + tw.Flush() + println() + + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) + var pos int + for i := 0; i < keyCount; i++ { + key, _ := r.KeyAt(i) + for _, e := range r.Entries(string(key)) { + pos++ + split := strings.Split(string(key), "#!~#") + + // We dont' know know if we have fields so use an informative default + var measurement, field string = "UNKNOWN", "UNKNOWN" + + // Possible corruption? Try to read as much as we can and point to the problem. + measurement = split[0] + field = split[1] + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + continue + } + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(int64(pos), 10), + time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(e.Offset), 10), + strconv.FormatInt(int64(e.Size), 10), + measurement, + field, + }, "\t")) + tw.Flush() + } + } + } + + tw = tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) + + // Starting at 5 because the magic number is 4 bytes + 1 byte version + i := int64(5) + var blockCount, pointCount, blockSize int64 + indexSize := r.IndexSize() + + // Start at the beginning and read every block + for j := 0; j < keyCount; j++ { + key, _ := r.KeyAt(j) + for _, e := range r.Entries(string(key)) { + + f.Seek(int64(e.Offset), 0) + f.Read(b[:4]) + + chksum := binary.BigEndian.Uint32(b[:4]) + + buf := make([]byte, e.Size-4) + f.Read(buf) + + blockSize += int64(e.Size) + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + i += blockSize + blockCount++ + continue + } + + blockType := buf[0] + + encoded := buf[1:] + + var v []tsm1.Value + v, err := tsm1.DecodeBlock(buf, v) + if err != nil { + return err + } + startTime := time.Unix(0, v[0].UnixNano()) + + pointCount += int64(len(v)) + + // Length of the timestamp block + tsLen, j := binary.Uvarint(encoded) + + // Unpack the timestamp bytes + ts := encoded[int(j) : int(j)+int(tsLen)] + + // Unpack the value bytes + values := encoded[int(j)+int(tsLen):] + + tsEncoding := timeEnc[int(ts[0]>>4)] + vEncoding := encDescs[int(blockType+1)][values[0]>>4] + + typeDesc := blockTypes[blockType] + + blockStats.inc(0, ts[0]>>4) + blockStats.inc(int(blockType+1), values[0]>>4) + blockStats.size(len(buf)) + + if cmd.dumpBlocks { + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(blockCount, 10), + strconv.FormatUint(uint64(chksum), 10), + strconv.FormatInt(i, 10), + strconv.FormatInt(int64(len(buf)), 10), + typeDesc, + startTime.UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(len(v)), 10), + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), + fmt.Sprintf("%d/%d", len(ts), len(values)), + }, "\t")) + } + + i += blockSize + blockCount++ + } + } + + if cmd.dumpBlocks { + println("Blocks:") + tw.Flush() + println() + } + + var blockSizeAvg int64 + if blockCount > 0 { + blockSizeAvg = blockSize / blockCount + } + fmt.Printf("Statistics\n") + fmt.Printf(" Blocks:\n") + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", + blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) + fmt.Printf(" Index:\n") + fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) + fmt.Printf(" Points:\n") + fmt.Printf(" Total: %d", pointCount) + println() + + println(" Encoding:") + for i, counts := range blockStats.counts { + if len(counts) == 0 { + continue + } + fmt.Printf(" %s: ", strings.Title(fieldType[i])) + for j, v := range counts { + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) + } + println() + } + fmt.Printf(" Compression:\n") + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) + + if len(errors) > 0 { + println() + fmt.Printf("Errors (%d):\n", len(errors)) + for _, err := range errors { + fmt.Printf(" * %v\n", err) + } + println() + return fmt.Errorf("error count %d", len(errors)) + } + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Dumps low-level details about tsm1 files. + +Usage: influx_inspect dumptsm [flags] + Only display index and block data match this key substring +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +var ( + fieldType = []string{ + "timestamp", "float", "int", "bool", "string", + } + blockTypes = []string{ + "float64", "int64", "bool", "string", + } + timeEnc = []string{ + "none", "s8b", "rle", + } + floatEnc = []string{ + "none", "gor", + } + intEnc = []string{ + "none", "s8b", "rle", + } + boolEnc = []string{ + "none", "bp", + } + stringEnc = []string{ + "none", "snpy", + } + encDescs = [][]string{ + timeEnc, floatEnc, intEnc, boolEnc, stringEnc, + } +) + +type blockStats struct { + min, max int + counts [][]int +} + +func (b *blockStats) inc(typ int, enc byte) { + for len(b.counts) <= typ { + b.counts = append(b.counts, []int{}) + } + for len(b.counts[typ]) <= int(enc) { + b.counts[typ] = append(b.counts[typ], 0) + } + b.counts[typ][enc]++ +} + +func (b *blockStats) size(sz int) { + if b.min == 0 || sz < b.min { + b.min = sz + } + if b.min == 0 || sz > b.max { + b.max = sz + } +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/dumptsm/dumptsm_test.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/dumptsm/dumptsm_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/dumptsm/dumptsm_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/dumptsm/dumptsm_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +package dumptsm_test + +// TODO: write some tests diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/export/export.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/export/export.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/export/export.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/export/export.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,396 @@ +package export + +import ( + "compress/gzip" + "flag" + "fmt" + "io" + "log" + "math" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/escape" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influx_inspect export". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dataDir string + walDir string + out string + database string + retentionPolicy string + startTime int64 + endTime int64 + compress bool + + manifest map[string]struct{} + tsmFiles map[string][]string + walFiles map[string][]string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + + manifest: make(map[string]struct{}), + tsmFiles: make(map[string][]string), + walFiles: make(map[string][]string), + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var start, end string + fs := flag.NewFlagSet("export", flag.ExitOnError) + fs.StringVar(&cmd.dataDir, "datadir", os.Getenv("HOME")+"/.influxdb/data", "Data storage path") + fs.StringVar(&cmd.walDir, "waldir", os.Getenv("HOME")+"/.influxdb/wal", "WAL storage path") + fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "Destination file to export to") + fs.StringVar(&cmd.database, "database", "", "Optional: the database to export") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to export (requires -database)") + fs.StringVar(&start, "start", "", "Optional: the start time to export") + fs.StringVar(&end, "end", "", "Optional: the end time to export") + fs.BoolVar(&cmd.compress, "compress", false, "Compress the output") + + fs.SetOutput(cmd.Stdout) + fs.Usage = func() { + fmt.Fprintf(cmd.Stdout, "Exports TSM files into InfluxDB line protocol format.\n\n") + fmt.Fprintf(cmd.Stdout, "Usage: %s export [flags]\n\n", filepath.Base(os.Args[0])) + fs.PrintDefaults() + } + + if err := fs.Parse(args); err != nil { + return err + } + + // set defaults + if start != "" { + s, err := time.Parse(time.RFC3339, start) + if err != nil { + return err + } + cmd.startTime = s.UnixNano() + } else { + cmd.startTime = math.MinInt64 + } + if end != "" { + e, err := time.Parse(time.RFC3339, end) + if err != nil { + return err + } + cmd.endTime = e.UnixNano() + } else { + // set end time to max if it is not set. + cmd.endTime = math.MaxInt64 + } + + if err := cmd.validate(); err != nil { + return err + } + + return cmd.export() +} + +func (cmd *Command) validate() error { + // validate args + if cmd.retentionPolicy != "" && cmd.database == "" { + return fmt.Errorf("must specify a db") + } + if cmd.startTime != 0 && cmd.endTime != 0 && cmd.endTime < cmd.startTime { + return fmt.Errorf("end time before start time") + } + return nil +} + +func (cmd *Command) export() error { + if err := cmd.walkTSMFiles(); err != nil { + return err + } + if err := cmd.walkWALFiles(); err != nil { + return err + } + return cmd.writeFiles() +} + +func (cmd *Command) walkTSMFiles() error { + err := filepath.Walk(cmd.dataDir, func(dir string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a tsm file + ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension) + if filepath.Ext(dir) != ext { + return nil + } + + relPath, _ := filepath.Rel(cmd.dataDir, dir) + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", dir) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + files := cmd.tsmFiles[key] + if files == nil { + files = []string{} + } + cmd.manifest[key] = struct{}{} + cmd.tsmFiles[key] = append(files, dir) + } + } + return nil + }) + if err != nil { + return err + } + return nil +} + +func (cmd *Command) walkWALFiles() error { + err := filepath.Walk(cmd.walDir, func(dir string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a wal file + prefix := tsm1.WALFilePrefix + ext := fmt.Sprintf(".%s", tsm1.WALFileExtension) + _, fileName := path.Split(dir) + if filepath.Ext(dir) != ext || !strings.HasPrefix(fileName, prefix) { + return nil + } + + relPath, _ := filepath.Rel(cmd.walDir, dir) + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", dir) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + files := cmd.walFiles[key] + if files == nil { + files = []string{} + } + cmd.manifest[key] = struct{}{} + cmd.walFiles[key] = append(files, dir) + } + } + return nil + }) + if err != nil { + return err + } + return nil +} + +func (cmd *Command) writeFiles() error { + // open our output file and create an output buffer + var w io.WriteCloser + w, err := os.Create(cmd.out) + if err != nil { + return err + } + defer w.Close() + if cmd.compress { + w = gzip.NewWriter(w) + defer w.Close() + } + + s, e := time.Unix(0, cmd.startTime).Format(time.RFC3339), time.Unix(0, cmd.endTime).Format(time.RFC3339) + fmt.Fprintf(w, "# INFLUXDB EXPORT: %s - %s\n", s, e) + + // Write out all the DDL + fmt.Fprintln(w, "# DDL") + for key := range cmd.manifest { + keys := strings.Split(key, string(byte(os.PathSeparator))) + db, rp := influxql.QuoteIdent(keys[0]), influxql.QuoteIdent(keys[1]) + fmt.Fprintf(w, "CREATE DATABASE %s WITH NAME %s\n", db, rp) + } + + fmt.Fprintln(w, "# DML") + for key := range cmd.manifest { + keys := strings.Split(key, string(byte(os.PathSeparator))) + fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", keys[0]) + fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1]) + if files, ok := cmd.tsmFiles[key]; ok { + fmt.Printf("writing out tsm file data for %s...", key) + if err := cmd.writeTsmFiles(w, files); err != nil { + return err + } + fmt.Println("complete.") + } + if _, ok := cmd.walFiles[key]; ok { + fmt.Printf("writing out wal file data for %s...", key) + if err := cmd.writeWALFiles(w, cmd.walFiles[key], key); err != nil { + return err + } + fmt.Println("complete.") + } + } + return nil +} + +func (cmd *Command) writeTsmFiles(w io.WriteCloser, files []string) error { + fmt.Fprintln(w, "# writing tsm data") + + // we need to make sure we write the same order that the files were written + sort.Strings(files) + + // use a function here to close the files in the defers and not let them accumulate in the loop + write := func(f string) error { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + return fmt.Errorf("%v", err) + } + defer file.Close() + reader, err := tsm1.NewTSMReader(file) + if err != nil { + log.Printf("unable to read %s, skipping\n", f) + return nil + } + defer reader.Close() + + if sgStart, sgEnd := reader.TimeRange(); sgStart > cmd.endTime || sgEnd < cmd.startTime { + return nil + } + + for i := 0; i < reader.KeyCount(); i++ { + var pairs string + key, typ := reader.KeyAt(i) + values, _ := reader.ReadAll(string(key)) + measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) + // measurements are stored escaped, field names are not + field = escape.String(field) + + for _, value := range values { + if (value.UnixNano() < cmd.startTime) || (value.UnixNano() > cmd.endTime) { + continue + } + + switch typ { + case tsm1.BlockFloat64: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + case tsm1.BlockInteger: + pairs = field + "=" + fmt.Sprintf("%vi", value.Value()) + case tsm1.BlockBoolean: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + case tsm1.BlockString: + pairs = field + "=" + fmt.Sprintf("%q", models.EscapeStringField(fmt.Sprintf("%s", value.Value()))) + default: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + } + + fmt.Fprintln(w, string(measurement), pairs, value.UnixNano()) + } + } + return nil + } + + for _, f := range files { + if err := write(f); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) writeWALFiles(w io.WriteCloser, files []string, key string) error { + fmt.Fprintln(w, "# writing wal data") + + // we need to make sure we write the same order that the wal received the data + sort.Strings(files) + + var once sync.Once + warn := func() { + msg := fmt.Sprintf(`WARNING: detected deletes in wal file. + Some series for %q may be brought back by replaying this data. + To resolve, you can either let the shard snapshot prior to exporting the data + or manually editing the exported file. + `, key) + fmt.Fprintln(cmd.Stderr, msg) + } + + // use a function here to close the files in the defers and not let them accumulate in the loop + write := func(f string) error { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + return fmt.Errorf("%v", err) + } + defer file.Close() + + reader := tsm1.NewWALSegmentReader(file) + defer reader.Close() + for reader.Next() { + entry, err := reader.Read() + if err != nil { + n := reader.Count() + fmt.Fprintf(os.Stderr, "file %s corrupt at position %d", file.Name(), n) + break + } + + switch t := entry.(type) { + case *tsm1.DeleteWALEntry: + once.Do(warn) + continue + case *tsm1.DeleteRangeWALEntry: + once.Do(warn) + continue + case *tsm1.WriteWALEntry: + var pairs string + + for key, values := range t.Values { + measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) + // measurements are stored escaped, field names are not + field = escape.String(field) + + for _, value := range values { + if (value.UnixNano() < cmd.startTime) || (value.UnixNano() > cmd.endTime) { + continue + } + + switch value.Value().(type) { + case float64: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + case int64: + pairs = field + "=" + fmt.Sprintf("%vi", value.Value()) + case bool: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + case string: + pairs = field + "=" + fmt.Sprintf("%q", models.EscapeStringField(fmt.Sprintf("%s", value.Value()))) + default: + pairs = field + "=" + fmt.Sprintf("%v", value.Value()) + } + fmt.Fprintln(w, string(measurement), pairs, value.UnixNano()) + } + } + } + } + return nil + } + + for _, f := range files { + if err := write(f); err != nil { + return err + } + } + + return nil +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/export/export_test.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/export/export_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/export/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/export/export_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +package export_test + +// #TODO: write some tests diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/help/help.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/help/help.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/help/help.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/help/help.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,41 @@ +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Usage: influx_inspect [[command] [arguments]] + +The commands are: + + dumptsm dumps low-level details about tsm1 files. + export exports raw data from a shard to line protocol + help display this help message + report displays a shard level report + +"help" is the default command. + +Use "influx_inspect [command] -help" for more information about a command. +` diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/help/help_test.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/help/help_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/help/help_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/help/help_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +package help_test + +// TODO: write some tests diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/info.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/info.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/info.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/info.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -package main - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - "text/tabwriter" - - "github.com/influxdb/influxdb/tsdb" -) - -func cmdInfo(path string) { - tstore := tsdb.NewStore(filepath.Join(path, "data")) - tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) - tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") - tstore.EngineOptions.Config.WALLoggingEnabled = false - tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") - if err := tstore.Open(); err != nil { - fmt.Printf("Failed to open dir: %v\n", err) - os.Exit(1) - } - - size, err := tstore.DiskSize() - if err != nil { - fmt.Printf("Failed to determine disk usage: %v\n", err) - } - - // Summary stats - fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n\n", - tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) - - tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) - - fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) - - shardIDs := tstore.ShardIDs() - - databases := tstore.Databases() - sort.Strings(databases) - - for _, db := range databases { - index := tstore.DatabaseIndex(db) - measurements := index.Measurements() - sort.Sort(measurements) - for _, m := range measurements { - tags := m.TagKeys() - tagValues := 0 - for _, tag := range tags { - tagValues += len(m.TagValues(tag)) - } - fields := m.FieldNames() - sort.Strings(fields) - series := m.SeriesKeys() - sort.Strings(series) - sort.Sort(ShardIDs(shardIDs)) - - // Sample a point from each measurement to determine the field types - for _, shardID := range shardIDs { - shard := tstore.Shard(shardID) - codec := shard.FieldCodec(m.Name) - for _, field := range codec.Fields() { - ft := fmt.Sprintf("%s:%s", field.Name, field.Type) - fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, - len(fields), ft, len(series)) - - } - - } - } - } - tw.Flush() -} - -func countSeries(tstore *tsdb.Store) int { - var count int - for _, shardID := range tstore.ShardIDs() { - shard := tstore.Shard(shardID) - cnt, err := shard.SeriesCount() - if err != nil { - fmt.Printf("series count failed: %v\n", err) - continue - } - count += cnt - } - return count -} - -func btou64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -func btou32(b []byte) uint32 { - return binary.BigEndian.Uint32(b) -} - -// u32tob converts a uint32 into an 4-byte slice. -func u32tob(v uint32) []byte { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, v) - return b -} - -// ShardIDs is a collection of UINT 64 that represent shard ids. -type ShardIDs []uint64 - -func (a ShardIDs) Len() int { return len(a) } -func (a ShardIDs) Less(i, j int) bool { return a[i] < a[j] } -func (a ShardIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/main.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/main.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/main.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/main.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,120 +1,84 @@ package main import ( - "flag" "fmt" + "io" + "log" "os" - _ "github.com/influxdb/influxdb/tsdb/engine" + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" + "github.com/influxdata/influxdb/cmd/influx_inspect/export" + "github.com/influxdata/influxdb/cmd/influx_inspect/help" + "github.com/influxdata/influxdb/cmd/influx_inspect/report" + "github.com/influxdata/influxdb/cmd/influx_inspect/verify" + _ "github.com/influxdata/influxdb/tsdb/engine" ) -func usage() { - println(`Usage: influx_inspect [options] - -Displays detailed information about InfluxDB data files. -`) - - println(`Commands: - info - displays series meta-data for all shards. Default location [$HOME/.influxdb] - dumptsm - dumps low-level details about tsm1 files. - dumptsmdev - dumps low-level details about tsm1dev files.`) - println() -} - func main() { - flag.Usage = usage - flag.Parse() - - if len(flag.Args()) == 0 { - flag.Usage() - os.Exit(0) + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) } +} - switch flag.Args()[0] { - case "info": - var path string - fs := flag.NewFlagSet("info", flag.ExitOnError) - fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") - - fs.Usage = func() { - println("Usage: influx_inspect info [options]\n\n Displays series meta-data for all shards..") - println() - println("Options:") - fs.PrintDefaults() - } - - if err := fs.Parse(flag.Args()[1:]); err != nil { - fmt.Printf("%v", err) - os.Exit(1) - } - cmdInfo(path) - case "dumptsm": - var dumpAll bool - opts := &tsdmDumpOpts{} - fs := flag.NewFlagSet("file", flag.ExitOnError) - fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data") - fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data") - fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") - fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring") - - fs.Usage = func() { - println("Usage: influx_inspect dumptsm [options] \n\n Dumps low-level details about tsm1 files.") - println() - println("Options:") - fs.PrintDefaults() - os.Exit(0) - } +// Main represents the program execution. +type Main struct { + Logger *log.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} - if err := fs.Parse(flag.Args()[1:]); err != nil { - fmt.Printf("%v", err) - os.Exit(1) - } +// NewMain return a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: log.New(os.Stderr, "[influx_inspect] ", log.LstdFlags), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} - if len(fs.Args()) == 0 || fs.Args()[0] == "" { - fmt.Printf("TSM file not specified\n\n") - fs.Usage() - fs.PrintDefaults() - os.Exit(1) +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) } - opts.path = fs.Args()[0] - opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != "" - opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != "" - cmdDumpTsm1(opts) case "dumptsmdev": - var dumpAll bool - opts := &tsdmDumpOpts{} - fs := flag.NewFlagSet("file", flag.ExitOnError) - fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data") - fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data") - fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") - fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring") - - fs.Usage = func() { - println("Usage: influx_inspect dumptsm [options] \n\n Dumps low-level details about tsm1 files.") - println() - println("Options:") - fs.PrintDefaults() - os.Exit(0) - } - - if err := fs.Parse(flag.Args()[1:]); err != nil { - fmt.Printf("%v", err) - os.Exit(1) - } - - if len(fs.Args()) == 0 || fs.Args()[0] == "" { - fmt.Printf("TSM file not specified\n\n") - fs.Usage() - fs.PrintDefaults() - os.Exit(1) + fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n") + fallthrough + case "dumptsm": + name := dumptsm.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsm: %s", err) + } + case "export": + name := export.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("export: %s", err) + } + case "report": + name := report.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("report: %s", err) + } + case "verify": + name := verify.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("verify: %s", err) } - opts.path = fs.Args()[0] - opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != "" - opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != "" - cmdDumpTsm1dev(opts) default: - flag.Usage() - os.Exit(1) + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx_inspect help' for usage`+"\n\n", name) } + + return nil } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/README.md influxdb-1.1.1+dfsg1/cmd/influx_inspect/README.md --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,107 @@ +# `influx_inspect` + +## Ways to run + +### `influx_inspect` +Will print usage for the tool. + +### `influx_inspect report` +Displays series meta-data for all shards. Default location [$HOME/.influxdb] + +### `influx_inspect dumptsm` +Dumps low-level details about tsm1 files + +#### Flags + +##### `-index` bool +Dump raw index data. + +`default` = false + +#### `-blocks` bool +Dump raw block data. + +`default` = false + +#### `-all` +Dump all data. Caution: This may print a lot of information. + +`default` = false + +#### `-filter-key` +Only display index and block data match this key substring. + +`default` = "" + + +### `influx_inspect export` +Exports all tsm files to line protocol. This output file can be imported via the [influx](https://github.com/influxdata/influxdb/tree/master/importer#running-the-import-command) command. + + +#### `-datadir` string +Data storage path. + +`default` = "$HOME/.influxdb/data" + +#### `-waldir` string +WAL storage path. + +`default` = "$HOME/.influxdb/wal" + +#### `-out` string +Destination file to export to + +`default` = "$HOME/.influxdb/export" + +#### `-database` string (optional) +Database to export. + +`default` = "" + +#### `-retention` string (optional) +Retention policy to export. + +`default` = "" + +#### `-start` string (optional) +Optional. The time range to start at. + +#### `-end` string (optional) +Optional. The time range to end at. + +#### `-compress` bool (optional) +Compress the output. + +`default` = false + +#### Sample Commands + +Export entire database and compress output: +``` +influx_inspect export --compress +``` + +Export specific retention policy: +``` +influx_inspect export --db mydb --rp autogen +``` + +##### Sample Data +This is a sample of what the output will look like. + +``` +# DDL +CREATE DATABASE MY_DB_NAME +CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1 + +# DML +# CONTEXT-DATABASE:MY_DB_NAME +# CONTEXT-RETENTION-POLICY:autogen +randset value=97.9296104805 1439856000000000000 +randset value=25.3849066842 1439856100000000000 +``` + +# Caveats + +The system does not have access to the meta store when exporting TSM shards. As such, it always creates the retention policy with infinite duration and replication factor of 1. +End users may want to change this prior to re-importing if they are importing to a cluster or want a different duration for retention. diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/report/report.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/report/report.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/report/report.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/report/report.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,184 @@ +package report + +import ( + "flag" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/retailnext/hllpp" +) + +// Command represents the program execution for "influxd report". +type Command struct { + Stderr io.Writer + Stdout io.Writer + + dir string + pattern string + detailed bool +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("report", flag.ExitOnError) + fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern") + fs.BoolVar(&cmd.detailed, "detailed", false, "Report detailed cardinality estimates") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + cmd.dir = fs.Arg(0) + + start := time.Now() + + files, err := filepath.Glob(filepath.Join(cmd.dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) + if err != nil { + return err + } + + var filtered []string + if cmd.pattern != "" { + for _, f := range files { + if strings.Contains(f, cmd.pattern) { + filtered = append(filtered, f) + } + } + files = filtered + } + + if len(files) == 0 { + return fmt.Errorf("no tsm files at %v\n", cmd.dir) + } + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t")) + + totalSeries := hllpp.New() + tagCardialities := map[string]*hllpp.HLLPP{} + measCardinalities := map[string]*hllpp.HLLPP{} + fieldCardinalities := map[string]*hllpp.HLLPP{} + + ordering := make([]chan struct{}, 0, len(files)) + for range files { + ordering = append(ordering, make(chan struct{})) + } + + for _, f := range files { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", f, err) + continue + } + + loadStart := time.Now() + reader, err := tsm1.NewTSMReader(file) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) + continue + } + loadTime := time.Since(loadStart) + + seriesCount := reader.KeyCount() + for i := 0; i < seriesCount; i++ { + key, _ := reader.KeyAt(i) + totalSeries.Add([]byte(key)) + + if cmd.detailed { + sep := strings.Index(string(key), "#!~#") + seriesKey, field := key[:sep], key[sep+4:] + measurement, tags, _ := models.ParseKey(seriesKey) + + measCount, ok := measCardinalities[measurement] + if !ok { + measCount = hllpp.New() + measCardinalities[measurement] = measCount + } + measCount.Add([]byte(key)) + + fieldCount, ok := fieldCardinalities[measurement] + if !ok { + fieldCount = hllpp.New() + fieldCardinalities[measurement] = fieldCount + } + fieldCount.Add([]byte(field)) + + for _, t := range tags { + tagCount, ok := tagCardialities[string(t.Key)] + if !ok { + tagCount = hllpp.New() + tagCardialities[string(t.Key)] = tagCount + } + tagCount.Add(t.Value) + } + } + } + reader.Close() + + fmt.Fprintln(tw, strings.Join([]string{ + filepath.Base(file.Name()), + strconv.FormatInt(int64(seriesCount), 10), + loadTime.String(), + }, "\t")) + tw.Flush() + } + + tw.Flush() + println() + fmt.Printf("Statistics\n") + fmt.Printf(" Series:\n") + fmt.Printf(" Total (est): %d\n", totalSeries.Count()) + if cmd.detailed { + fmt.Printf(" Measurements (est):\n") + for t, card := range measCardinalities { + fmt.Printf(" %v: %d (%d%%)\n", t, card.Count(), int((float64(card.Count())/float64(totalSeries.Count()))*100)) + } + + fmt.Printf(" Fields (est):\n") + for t, card := range fieldCardinalities { + fmt.Printf(" %v: %d\n", t, card.Count()) + } + + fmt.Printf(" Tags (est):\n") + for t, card := range tagCardialities { + fmt.Printf(" %v: %d\n", t, card.Count()) + } + } + + fmt.Printf("Completed in %s\n", time.Since(start)) + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Displays shard level report. + +Usage: influx_inspect report [flags] + + -pattern + Include only files matching a pattern. + -detailed + Report detailed cardinality estimates. + Defaults to "false". +` + + fmt.Fprintf(cmd.Stdout, usage) +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/report/report_test.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/report/report_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/report/report_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/report/report_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +package report_test + +// TODO: write some tests diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/tsm.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/tsm.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/tsm.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/tsm.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,652 +0,0 @@ -package main - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/golang/snappy" - "github.com/influxdb/influxdb/tsdb" - "github.com/influxdb/influxdb/tsdb/engine/tsm1" -) - -// these consts are for the old tsm format. They can be removed once we remove -// the inspection for the original tsm1 files. -const ( - //IDsFileExtension is the extension for the file that keeps the compressed map - // of keys to uint64 IDs. - IDsFileExtension = "ids" - - // FieldsFileExtension is the extension for the file that stores compressed field - // encoding data for this db - FieldsFileExtension = "fields" - - // SeriesFileExtension is the extension for the file that stores the compressed - // series metadata for series in this db - SeriesFileExtension = "series" -) - -type tsdmDumpOpts struct { - dumpIndex bool - dumpBlocks bool - filterKey string - path string -} - -type tsmIndex struct { - series int - offset int64 - minTime time.Time - maxTime time.Time - blocks []*block -} - -type block struct { - id uint64 - offset int64 -} - -type blockStats struct { - min, max int - counts [][]int -} - -func (b *blockStats) inc(typ int, enc byte) { - for len(b.counts) <= typ { - b.counts = append(b.counts, []int{}) - } - for len(b.counts[typ]) <= int(enc) { - b.counts[typ] = append(b.counts[typ], 0) - } - b.counts[typ][enc]++ -} - -func (b *blockStats) size(sz int) { - if b.min == 0 || sz < b.min { - b.min = sz - } - if b.min == 0 || sz > b.max { - b.max = sz - } -} - -var ( - fieldType = []string{ - "timestamp", "float", "int", "bool", "string", - } - blockTypes = []string{ - "float64", "int64", "bool", "string", - } - timeEnc = []string{ - "none", "s8b", "rle", - } - floatEnc = []string{ - "none", "gor", - } - intEnc = []string{ - "none", "s8b", "rle", - } - boolEnc = []string{ - "none", "bp", - } - stringEnc = []string{ - "none", "snpy", - } - encDescs = [][]string{ - timeEnc, floatEnc, intEnc, boolEnc, stringEnc, - } -) - -func readFields(path string) (map[string]*tsdb.MeasurementFields, error) { - fields := make(map[string]*tsdb.MeasurementFields) - - f, err := os.OpenFile(filepath.Join(path, FieldsFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return fields, nil - } else if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - data, err := snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(data, &fields); err != nil { - return nil, err - } - return fields, nil -} - -func readSeries(path string) (map[string]*tsdb.Series, error) { - series := make(map[string]*tsdb.Series) - - f, err := os.OpenFile(filepath.Join(path, SeriesFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return series, nil - } else if err != nil { - return nil, err - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - data, err := snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(data, &series); err != nil { - return nil, err - } - - return series, nil -} - -func readIds(path string) (map[string]uint64, error) { - f, err := os.OpenFile(filepath.Join(path, IDsFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - b, err = snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - ids := make(map[string]uint64) - if b != nil { - if err := json.Unmarshal(b, &ids); err != nil { - return nil, err - } - } - return ids, err -} -func readIndex(f *os.File) (*tsmIndex, error) { - // Get the file size - stat, err := f.Stat() - if err != nil { - return nil, err - } - - // Seek to the series count - f.Seek(-4, os.SEEK_END) - b := make([]byte, 8) - _, err = f.Read(b[:4]) - if err != nil { - return nil, err - } - - seriesCount := binary.BigEndian.Uint32(b) - - // Get the min time - f.Seek(-20, os.SEEK_END) - f.Read(b) - minTime := time.Unix(0, int64(btou64(b))) - - // Get max time - f.Seek(-12, os.SEEK_END) - f.Read(b) - maxTime := time.Unix(0, int64(btou64(b))) - - // Figure out where the index starts - indexStart := stat.Size() - int64(seriesCount*12+20) - - // Seek to the start of the index - f.Seek(indexStart, os.SEEK_SET) - count := int(seriesCount) - index := &tsmIndex{ - offset: indexStart, - minTime: minTime, - maxTime: maxTime, - series: count, - } - - if indexStart < 0 { - return nil, fmt.Errorf("index corrupt: offset=%d", indexStart) - } - - // Read the index entries - for i := 0; i < count; i++ { - f.Read(b) - id := binary.BigEndian.Uint64(b) - f.Read(b[:4]) - pos := binary.BigEndian.Uint32(b[:4]) - index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)}) - } - - return index, nil -} - -func cmdDumpTsm1(opts *tsdmDumpOpts) { - var errors []error - - f, err := os.Open(opts.path) - if err != nil { - println(err.Error()) - os.Exit(1) - } - - // Get the file size - stat, err := f.Stat() - if err != nil { - println(err.Error()) - os.Exit(1) - } - - b := make([]byte, 8) - f.Read(b[:4]) - - // Verify magic number - if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 { - println("Not a tsm1 file.") - os.Exit(1) - } - - ids, err := readIds(filepath.Dir(opts.path)) - if err != nil { - println("Failed to read series:", err.Error()) - os.Exit(1) - } - - invIds := map[uint64]string{} - for k, v := range ids { - invIds[v] = k - } - - index, err := readIndex(f) - if err != nil { - println("Failed to readIndex:", err.Error()) - - // Create a stubbed out index so we can still try and read the block data directly - // w/o panicing ourselves. - index = &tsmIndex{ - minTime: time.Unix(0, 0), - maxTime: time.Unix(0, 0), - offset: stat.Size(), - } - } - - blockStats := &blockStats{} - - println("Summary:") - fmt.Printf(" File: %s\n", opts.path) - fmt.Printf(" Time Range: %s - %s\n", - index.minTime.UTC().Format(time.RFC3339Nano), - index.maxTime.UTC().Format(time.RFC3339Nano), - ) - fmt.Printf(" Duration: %s ", index.maxTime.Sub(index.minTime)) - fmt.Printf(" Series: %d ", index.series) - fmt.Printf(" File Size: %d\n", stat.Size()) - println() - - tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t")) - for i, block := range index.blocks { - key := invIds[block.id] - split := strings.Split(key, "#!~#") - - // We dont' know know if we have fields so use an informative default - var measurement, field string = "UNKNOWN", "UNKNOWN" - - // We read some IDs from the ids file - if len(invIds) > 0 { - // Change the default to error until we know we have a valid key - measurement = "ERR" - field = "ERR" - - // Possible corruption? Try to read as much as we can and point to the problem. - if key == "" { - errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id", i, block.id)) - } else if len(split) < 2 { - errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key)) - } else { - measurement = split[0] - field = split[1] - } - } - - if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { - continue - } - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(int64(i), 10), - strconv.FormatUint(block.id, 10), - strconv.FormatInt(int64(block.offset), 10), - measurement, - field, - }, "\t")) - } - - if opts.dumpIndex { - println("Index:") - tw.Flush() - println() - } - - tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) - - // Staring at 4 because the magic number is 4 bytes - i := int64(4) - var blockCount, pointCount, blockSize int64 - indexSize := stat.Size() - index.offset - - // Start at the beginning and read every block - for i < index.offset { - f.Seek(int64(i), 0) - - f.Read(b) - id := btou64(b) - f.Read(b[:4]) - length := binary.BigEndian.Uint32(b[:4]) - buf := make([]byte, length) - f.Read(buf) - - blockSize += int64(len(buf)) + 12 - - startTime := time.Unix(0, int64(btou64(buf[:8]))) - blockType := buf[8] - - encoded := buf[9:] - - cnt := tsm1.BlockCount(buf) - pointCount += int64(cnt) - - // Length of the timestamp block - tsLen, j := binary.Uvarint(encoded) - - // Unpack the timestamp bytes - ts := encoded[int(j) : int(j)+int(tsLen)] - - // Unpack the value bytes - values := encoded[int(j)+int(tsLen):] - - tsEncoding := timeEnc[int(ts[0]>>4)] - vEncoding := encDescs[int(blockType+1)][values[0]>>4] - - typeDesc := blockTypes[blockType] - - blockStats.inc(0, ts[0]>>4) - blockStats.inc(int(blockType+1), values[0]>>4) - blockStats.size(len(buf)) - - if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) { - i += (12 + int64(length)) - blockCount++ - continue - } - - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(blockCount, 10), - strconv.FormatInt(i, 10), - strconv.FormatInt(int64(len(buf)), 10), - strconv.FormatUint(id, 10), - typeDesc, - startTime.UTC().Format(time.RFC3339Nano), - strconv.FormatInt(int64(cnt), 10), - fmt.Sprintf("%s/%s", tsEncoding, vEncoding), - fmt.Sprintf("%d/%d", len(ts), len(values)), - }, "\t")) - - i += (12 + int64(length)) - blockCount++ - } - if opts.dumpBlocks { - println("Blocks:") - tw.Flush() - println() - } - - fmt.Printf("Statistics\n") - fmt.Printf(" Blocks:\n") - fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", - blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount) - fmt.Printf(" Index:\n") - fmt.Printf(" Total: %d Size: %d\n", len(index.blocks), indexSize) - fmt.Printf(" Points:\n") - fmt.Printf(" Total: %d", pointCount) - println() - - println(" Encoding:") - for i, counts := range blockStats.counts { - if len(counts) == 0 { - continue - } - fmt.Printf(" %s: ", strings.Title(fieldType[i])) - for j, v := range counts { - fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) - } - println() - } - fmt.Printf(" Compression:\n") - fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) - fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) - - if len(errors) > 0 { - println() - fmt.Printf("Errors (%d):\n", len(errors)) - for _, err := range errors { - fmt.Printf(" * %v\n", err) - } - println() - } -} - -func cmdDumpTsm1dev(opts *tsdmDumpOpts) { - var errors []error - - f, err := os.Open(opts.path) - if err != nil { - println(err.Error()) - os.Exit(1) - } - - // Get the file size - stat, err := f.Stat() - if err != nil { - println(err.Error()) - os.Exit(1) - } - b := make([]byte, 8) - - r, err := tsm1.NewTSMReaderWithOptions(tsm1.TSMReaderOptions{ - MMAPFile: f, - }) - if err != nil { - println("Error opening TSM files: ", err.Error()) - } - defer r.Close() - - minTime, maxTime := r.TimeRange() - keys := r.Keys() - - blockStats := &blockStats{} - - println("Summary:") - fmt.Printf(" File: %s\n", opts.path) - fmt.Printf(" Time Range: %s - %s\n", - minTime.UTC().Format(time.RFC3339Nano), - maxTime.UTC().Format(time.RFC3339Nano), - ) - fmt.Printf(" Duration: %s ", maxTime.Sub(minTime)) - fmt.Printf(" Series: %d ", len(keys)) - fmt.Printf(" File Size: %d\n", stat.Size()) - println() - - tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) - var pos int - for _, key := range keys { - for _, e := range r.Entries(key) { - pos++ - split := strings.Split(key, "#!~#") - - // We dont' know know if we have fields so use an informative default - var measurement, field string = "UNKNOWN", "UNKNOWN" - - // Possible corruption? Try to read as much as we can and point to the problem. - measurement = split[0] - field = split[1] - - if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { - continue - } - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(int64(pos), 10), - e.MinTime.UTC().Format(time.RFC3339Nano), - e.MaxTime.UTC().Format(time.RFC3339Nano), - strconv.FormatInt(int64(e.Offset), 10), - strconv.FormatInt(int64(e.Size), 10), - measurement, - field, - }, "\t")) - } - } - - if opts.dumpIndex { - println("Index:") - tw.Flush() - println() - } - - tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) - - // Starting at 5 because the magic number is 4 bytes + 1 byte version - i := int64(5) - var blockCount, pointCount, blockSize int64 - indexSize := r.IndexSize() - - // Start at the beginning and read every block - for _, key := range keys { - for _, e := range r.Entries(key) { - - f.Seek(int64(e.Offset), 0) - f.Read(b[:4]) - - chksum := btou32(b[:4]) - - buf := make([]byte, e.Size-4) - f.Read(buf) - - blockSize += int64(e.Size) - - blockType := buf[0] - - encoded := buf[1:] - - var v []tsm1.Value - v, err := tsm1.DecodeBlock(buf, v) - if err != nil { - fmt.Printf("error: %v\n", err.Error()) - os.Exit(1) - } - startTime := v[0].Time() - - pointCount += int64(len(v)) - - // Length of the timestamp block - tsLen, j := binary.Uvarint(encoded) - - // Unpack the timestamp bytes - ts := encoded[int(j) : int(j)+int(tsLen)] - - // Unpack the value bytes - values := encoded[int(j)+int(tsLen):] - - tsEncoding := timeEnc[int(ts[0]>>4)] - vEncoding := encDescs[int(blockType+1)][values[0]>>4] - - typeDesc := blockTypes[blockType] - - blockStats.inc(0, ts[0]>>4) - blockStats.inc(int(blockType+1), values[0]>>4) - blockStats.size(len(buf)) - - if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { - i += blockSize - blockCount++ - continue - } - - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(blockCount, 10), - strconv.FormatUint(uint64(chksum), 10), - strconv.FormatInt(i, 10), - strconv.FormatInt(int64(len(buf)), 10), - typeDesc, - startTime.UTC().Format(time.RFC3339Nano), - strconv.FormatInt(int64(len(v)), 10), - fmt.Sprintf("%s/%s", tsEncoding, vEncoding), - fmt.Sprintf("%d/%d", len(ts), len(values)), - }, "\t")) - - i += blockSize - blockCount++ - } - } - - if opts.dumpBlocks { - println("Blocks:") - tw.Flush() - println() - } - - var blockSizeAvg int64 - if blockCount > 0 { - blockSizeAvg = blockSize / blockCount - } - fmt.Printf("Statistics\n") - fmt.Printf(" Blocks:\n") - fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", - blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) - fmt.Printf(" Index:\n") - fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) - fmt.Printf(" Points:\n") - fmt.Printf(" Total: %d", pointCount) - println() - - println(" Encoding:") - for i, counts := range blockStats.counts { - if len(counts) == 0 { - continue - } - fmt.Printf(" %s: ", strings.Title(fieldType[i])) - for j, v := range counts { - fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) - } - println() - } - fmt.Printf(" Compression:\n") - fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) - fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) - - if len(errors) > 0 { - println() - fmt.Printf("Errors (%d):\n", len(errors)) - for _, err := range errors { - fmt.Printf(" * %v\n", err) - } - println() - } -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/verify/verify.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/verify/verify.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/verify/verify.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/verify/verify.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,119 @@ +package verify + +import ( + "flag" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influx_inspect verify". +type Command struct { + Stderr io.Writer + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var path string + fs := flag.NewFlagSet("verify", flag.ExitOnError) + fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + start := time.Now() + dataPath := filepath.Join(path, "data") + + brokenBlocks := 0 + totalBlocks := 0 + + // No need to do this in a loop + ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension) + + // Get all TSM files by walking through the data dir + files := []string{} + err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) == ext { + files = append(files, path) + } + return nil + }) + if err != nil { + panic(err) + } + + tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0) + + // Verify the checksums of every block in every file + for _, f := range files { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + return err + } + + reader, err := tsm1.NewTSMReader(file) + if err != nil { + return err + } + + blockItr := reader.BlockIterator() + brokenFileBlocks := 0 + count := 0 + for blockItr.Next() { + totalBlocks++ + key, _, _, checksum, buf, err := blockItr.Read() + if err != nil { + brokenBlocks++ + fmt.Fprintf(tw, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err) + } else if expected := crc32.ChecksumIEEE(buf); checksum != expected { + brokenBlocks++ + fmt.Fprintf(tw, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count) + } + count++ + } + if brokenFileBlocks == 0 { + fmt.Fprintf(tw, "%s: healthy\n", f) + } + reader.Close() + } + + fmt.Fprintf(tw, "Broken Blocks: %d / %d, in %vs\n", brokenBlocks, totalBlocks, time.Since(start).Seconds()) + tw.Flush() + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := fmt.Sprintf(`Verifies the the checksum of shards. + +Usage: influx_inspect verify [flags] + + -dir + Root storage path + Defaults to "%[1]s/.influxdb". + `, os.Getenv("HOME")) + + fmt.Fprintf(cmd.Stdout, usage) +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_inspect/verify/verify_test.go influxdb-1.1.1+dfsg1/cmd/influx_inspect/verify/verify_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_inspect/verify/verify_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_inspect/verify/verify_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +package verify_test + +// TODO: write some tests diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_stress/influx_stress.go influxdb-1.1.1+dfsg1/cmd/influx_stress/influx_stress.go --- influxdb-0.10.0+dfsg1/cmd/influx_stress/influx_stress.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_stress/influx_stress.go 2016-12-06 21:36:15.000000000 +0000 @@ -3,13 +3,16 @@ import ( "flag" "fmt" + "log" "os" "runtime/pprof" - "github.com/influxdb/influxdb/stress" + "github.com/influxdata/influxdb/stress" + v2 "github.com/influxdata/influxdb/stress/v2" ) var ( + useV2 = flag.Bool("v2", false, "Use version 2 of stress tool") config = flag.String("config", "", "The stress test file") cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`") db = flag.String("db", "", "target database within test system for write and query load") @@ -29,32 +32,39 @@ defer pprof.StopCPUProfile() } - c, err := stress.NewConfig(*config) - if err != nil { - fmt.Println(err) - return - } - - if *db != "" { - c.Provision.Basic.Database = *db - c.Write.InfluxClients.Basic.Database = *db - c.Read.QueryClients.Basic.Database = *db - } - - w := stress.NewWriter(&c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) - r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) - s := stress.NewStressTest(&c.Provision.Basic, w, r) + if *useV2 { + if *config != "" { + v2.RunStress(*config) + } else { + v2.RunStress("stress/v2/iql/file.iql") + } + } else { - bw := stress.NewBroadcastChannel() - bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) - bw.Register(o.HTTPHandler("write")) + c, err := stress.NewConfig(*config) + if err != nil { + log.Fatal(err) + return + } - br := stress.NewBroadcastChannel() - br.Register(c.Read.QueryClients.Basic.BasicReadHandler) - br.Register(o.HTTPHandler("read")) + if *db != "" { + c.Provision.Basic.Database = *db + c.Write.InfluxClients.Basic.Database = *db + c.Read.QueryClients.Basic.Database = *db + } - s.Start(bw.Handle, br.Handle) + w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) + r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) + s := stress.NewStressTest(&c.Provision.Basic, w, r) + + bw := stress.NewBroadcastChannel() + bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) + bw.Register(o.HTTPHandler("write")) + + br := stress.NewBroadcastChannel() + br.Register(c.Read.QueryClients.Basic.BasicReadHandler) + br.Register(o.HTTPHandler("read")) - return + s.Start(bw.Handle, br.Handle) + } } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_stress/README.md influxdb-1.1.1+dfsg1/cmd/influx_stress/README.md --- influxdb-0.10.0+dfsg1/cmd/influx_stress/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_stress/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,13 +1,18 @@ # `influx_stress` +If you run into any issues with this tool please mention @jackzampolin when you create an issue. + ## Ways to run ### `influx_stress` -This runs a basic stress test with the [default config](https://github.com/influxdb/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. For additional questions please contact @mjdesa +This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. ### `influx_stress -config someConfig.toml` This runs the stress test with a valid configuration file located at `someConfig.tom` +### `influx_stress -v2 -config someConfig.iql` +This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md). + ## Flags If flags are defined they overwrite the config from any file passed in. @@ -20,7 +25,7 @@ ### `-config` string The relative path to the stress test configuration file. -`default` = [config](https://github.com/influxdb/influxdb/blob/master/stress/stress.toml) +`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) ### `-cpuprofile` filename Writes the result of Go's cpu profile to filename @@ -35,4 +40,4 @@ ### `-tags` value A comma separated list of tags to add to write and query response times. -`default` = "" \ No newline at end of file +`default` = "" diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/b1/reader.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/b1/reader.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/b1/reader.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/b1/reader.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,19 +1,26 @@ -package b1 +package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1" import ( "encoding/binary" + "math" "sort" - "sync/atomic" "time" "github.com/boltdb/bolt" - "github.com/influxdb/influxdb/cmd/influx_tsm/tsdb" - "github.com/influxdb/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" ) -const DefaultChunkSize = 1000 +// DefaultChunkSize is the size of chunks read from the b1 shard +const DefaultChunkSize int = 1000 -var NoFieldsFiltered uint64 +var excludedBuckets = map[string]bool{ + "fields": true, + "meta": true, + "series": true, + "wal": true, +} // Reader is used to read all data from a b1 shard. type Reader struct { @@ -24,24 +31,32 @@ cursors []*cursor currCursor int - keyBuf string - valuesBuf []tsm1.Value + keyBuf string + values []tsm1.Value + valuePos int - series map[string]*tsdb.Series fields map[string]*tsdb.MeasurementFields codecs map[string]*tsdb.FieldCodec - ChunkSize int + stats *stats.Stats } // NewReader returns a reader for the b1 shard at path. -func NewReader(path string) *Reader { - return &Reader{ +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ path: path, - series: make(map[string]*tsdb.Series), fields: make(map[string]*tsdb.MeasurementFields), codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize } + + r.values = make([]tsm1.Value, chunkSize) + + return r } // Open opens the reader. @@ -71,37 +86,33 @@ return err } - // Load series - if err := r.db.View(func(tx *bolt.Tx) error { - meta := tx.Bucket([]byte("series")) - c := meta.Cursor() + seriesSet := make(map[string]bool) - for k, v := c.First(); k != nil; k, v = c.Next() { - series := &tsdb.Series{} - if err := series.UnmarshalBinary(v); err != nil { - return err + // ignore series index and find all series in this shard + if err := r.db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + key := string(name) + if !excludedBuckets[key] { + seriesSet[key] = true } - r.series[string(k)] = series - } + return nil + }) return nil }); err != nil { return err } - // Create cursor for each field of each series. r.tx, err = r.db.Begin(false) if err != nil { return err } - for s, _ := range r.series { - if err != nil { - return err - } + // Create cursor for each field of each series. + for s := range seriesSet { measurement := tsdb.MeasurementFromSeriesKey(s) - fields := r.fields[tsdb.MeasurementFromSeriesKey(s)] + fields := r.fields[measurement] if fields == nil { - atomic.AddUint64(&NoFieldsFiltered, 1) + r.stats.IncrFiltered() continue } for _, f := range fields.Fields { @@ -118,49 +129,66 @@ // Next returns whether any data remains to be read. It must be called before // the next call to Read(). func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: for { - if r.currCursor == len(r.cursors) { + if r.currCursor >= len(r.cursors) { // All cursors drained. No more data remains. return false } cc := r.cursors[r.currCursor] - k, v := cc.Next() - if k == -1 { - // Go to next cursor and try again. - r.currCursor++ - if len(r.valuesBuf) == 0 { - // The previous cursor had no data. Instead of returning - // just go immediately to the next cursor. - continue + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true } - // There is some data available. Indicate that it should be read. - return true - } - r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) - r.valuesBuf = append(r.valuesBuf, tsdb.ConvertToValue(k, v)) - if len(r.valuesBuf) == r.ChunkSize { - return true + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } } } - } // Read returns the next chunk of data in the shard, converted to tsm1 values. Data is // emitted completely for every field, in every series, before the next field is processed. // Data from Read() adheres to the requirements for writing to tsm1 shards func (r *Reader) Read() (string, []tsm1.Value, error) { - defer func() { - r.valuesBuf = nil - }() - - return r.keyBuf, r.valuesBuf, nil + return r.keyBuf, r.values[:r.valuePos], nil } // Close closes the reader. func (r *Reader) Close() error { - return r.tx.Rollback() + r.tx.Rollback() + return r.db.Close() } // cursor provides ordered iteration across a series. @@ -194,8 +222,10 @@ } // Seek moves the cursor to a position. -func (c cursor) SeekTo(seek int64) { - k, v := c.cursor.Seek(u64tob(uint64(seek))) +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + k, v := c.cursor.Seek(seekBytes[:]) c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v) } @@ -232,15 +262,8 @@ func (a cursors) Len() int { return len(a) } func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a cursors) Less(i, j int) bool { - return tsm1.SeriesFieldKey(a[i].series, a[i].field) < tsm1.SeriesFieldKey(a[j].series, a[j].field) -} - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series } - -// btou64 converts an 8-byte slice to a uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/bz1/reader.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/bz1/reader.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/bz1/reader.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/bz1/reader.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,24 +1,24 @@ -package bz1 +package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" import ( "bytes" "encoding/binary" "encoding/json" "fmt" + "math" "sort" - "sync/atomic" "time" "github.com/boltdb/bolt" "github.com/golang/snappy" - "github.com/influxdb/influxdb/cmd/influx_tsm/tsdb" - tsm "github.com/influxdb/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" ) +// DefaultChunkSize is the size of chunks read from the bz1 shard const DefaultChunkSize = 1000 -var NoFieldsFiltered uint64 - // Reader is used to read all data from a bz1 shard. type Reader struct { path string @@ -28,25 +28,32 @@ cursors []*cursor currCursor int - keyBuf string - valuesBuf []tsm.Value + keyBuf string + values []tsm1.Value + valuePos int - series map[string]*tsdb.Series fields map[string]*tsdb.MeasurementFields codecs map[string]*tsdb.FieldCodec - ChunkSize int + stats *stats.Stats } // NewReader returns a reader for the bz1 shard at path. -func NewReader(path string) *Reader { - return &Reader{ - path: path, - series: make(map[string]*tsdb.Series), - fields: make(map[string]*tsdb.MeasurementFields), - codecs: make(map[string]*tsdb.FieldCodec), - ChunkSize: DefaultChunkSize, +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize } + + r.values = make([]tsm1.Value, chunkSize) + + return r } // Open opens the reader. @@ -58,6 +65,8 @@ } r.db = db + seriesSet := make(map[string]bool) + if err := r.db.View(func(tx *bolt.Tx) error { var data []byte @@ -66,20 +75,20 @@ // No data in this shard. return nil } - buf := meta.Get([]byte("series")) - if buf == nil { - // No data in this shard. + + pointsBucket := tx.Bucket([]byte("points")) + if pointsBucket == nil { return nil } - data, err = snappy.Decode(nil, buf) - if err != nil { - return err - } - if err := json.Unmarshal(data, &r.series); err != nil { + + if err := pointsBucket.ForEach(func(key, _ []byte) error { + seriesSet[string(key)] = true + return nil + }); err != nil { return err } - buf = meta.Get([]byte("fields")) + buf := meta.Get([]byte("fields")) if buf == nil { // No data in this shard. return nil @@ -102,20 +111,17 @@ r.codecs[k] = tsdb.NewFieldCodec(v.Fields) } - // Create cursor for each field of each series. r.tx, err = r.db.Begin(false) if err != nil { return err } - for s, _ := range r.series { - if err != nil { - return err - } + // Create cursor for each field of each series. + for s := range seriesSet { measurement := tsdb.MeasurementFromSeriesKey(s) - fields := r.fields[tsdb.MeasurementFromSeriesKey(s)] + fields := r.fields[measurement] if fields == nil { - atomic.AddUint64(&NoFieldsFiltered, 1) + r.stats.IncrFiltered() continue } for _, f := range fields.Fields { @@ -134,30 +140,51 @@ // Next returns whether there is any more data to be read. func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: for { - if r.currCursor == len(r.cursors) { + if r.currCursor >= len(r.cursors) { // All cursors drained. No more data remains. return false } cc := r.cursors[r.currCursor] - k, v := cc.Next() - if k == -1 { - // Go to next cursor and try again. - r.currCursor++ - if len(r.valuesBuf) == 0 { - // The previous cursor had no data. Instead of returning - // just go immediately to the next cursor. - continue + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true } - // There is some data available. Indicate that it should be read. - return true - } - r.keyBuf = tsm.SeriesFieldKey(cc.series, cc.field) - r.valuesBuf = append(r.valuesBuf, tsdb.ConvertToValue(k, v)) - if len(r.valuesBuf) == r.ChunkSize { - return true + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } } } } @@ -165,17 +192,14 @@ // Read returns the next chunk of data in the shard, converted to tsm1 values. Data is // emitted completely for every field, in every series, before the next field is processed. // Data from Read() adheres to the requirements for writing to tsm1 shards -func (r *Reader) Read() (string, []tsm.Value, error) { - defer func() { - r.valuesBuf = nil - }() - - return r.keyBuf, r.valuesBuf, nil +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.values[:r.valuePos], nil } // Close closes the reader. func (r *Reader) Close() error { - return r.tx.Rollback() + r.tx.Rollback() + return r.db.Close() } // cursor provides ordered iteration across a series. @@ -213,24 +237,25 @@ // Seek moves the cursor to a position. func (c *cursor) SeekTo(seek int64) { - seekBytes := u64tob(uint64(seek)) + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) // Move cursor to appropriate block and set to buffer. - k, v := c.cursor.Seek(seekBytes) + k, v := c.cursor.Seek(seekBytes[:]) if v == nil { // get the last block, it might have this time _, v = c.cursor.Last() - } else if seek < int64(btou64(k)) { // the seek key is less than this block, go back one and check + } else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check _, v = c.cursor.Prev() // if the previous block max time is less than the seek value, reset to where we were originally - if v == nil || seek > int64(btou64(v[0:8])) { - _, v = c.cursor.Seek(seekBytes) + if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) { + _, v = c.cursor.Seek(seekBytes[:]) } } c.setBuf(v) // Read current block up to seek position. - c.seekBuf(seekBytes) + c.seekBuf(seekBytes[:]) // Return current entry. c.keyBuf, c.valBuf = c.read() @@ -332,17 +357,10 @@ func (a cursors) Len() int { return len(a) } func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a cursors) Less(i, j int) bool { - return tsm.SeriesFieldKey(a[i].series, a[i].field) < tsm.SeriesFieldKey(a[j].series, a[j].field) -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series } // entryHeaderSize is the number of bytes required for the header. diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/converter.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/converter.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/converter.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/converter.go 2016-12-06 21:36:15.000000000 +0000 @@ -2,13 +2,18 @@ import ( "fmt" - "math" "os" "path/filepath" - "github.com/influxdb/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" ) +const ( + maxBlocksPerKey = 65535 +) + +// KeyIterator is used to iterate over b* keys for conversion to tsm keys type KeyIterator interface { Next() bool Read() (string, []tsm1.Value, error) @@ -19,15 +24,15 @@ path string maxTSMFileSize uint32 sequence int - tracker *tracker + stats *stats.Stats } // NewConverter returns a new instance of the Converter. -func NewConverter(path string, sz uint32, t *tracker) *Converter { +func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter { return &Converter{ path: path, maxTSMFileSize: sz, - tracker: t, + stats: stats, } } @@ -40,33 +45,36 @@ // Iterate until no more data remains. var w tsm1.TSMWriter + var keyCount map[string]int + for iter.Next() { k, v, err := iter.Read() if err != nil { return err } - scrubbed := c.scrubValues(v) if w == nil { w, err = c.nextTSMWriter() if err != nil { return err } + keyCount = map[string]int{} } - if err := w.Write(k, scrubbed); err != nil { + if err := w.Write(k, v); err != nil { return err } + keyCount[k]++ - c.tracker.AddPointsRead(len(v)) - c.tracker.AddPointsWritten(len(scrubbed)) + c.stats.AddPointsRead(len(v)) + c.stats.AddPointsWritten(len(v)) // If we have a max file size configured and we're over it, start a new TSM file. - if w.Size() > c.maxTSMFileSize { + if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey { if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { return err } - c.tracker.AddTSMBytes(w.Size()) + c.stats.AddTSMBytes(w.Size()) if err := w.Close(); err != nil { return err @@ -79,7 +87,7 @@ if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { return err } - c.tracker.AddTSMBytes(w.Size()) + c.stats.AddTSMBytes(w.Size()) if err := w.Close(); err != nil { return err @@ -105,49 +113,6 @@ return nil, err } - c.tracker.IncrTSMFileCount() + c.stats.IncrTSMFileCount() return w, nil } - -// scrubValues takes a slice and removes float64 NaN and Inf. If neither is -// present in the slice, the original slice is returned. This is to avoid -// copying slices unnecessarily. -func (c *Converter) scrubValues(values []tsm1.Value) []tsm1.Value { - var scrubbed []tsm1.Value - - if values == nil { - return nil - } - - for i, v := range values { - if f, ok := v.Value().(float64); ok { - var filter bool - if math.IsNaN(f) { - filter = true - c.tracker.IncrNaN() - } - if math.IsInf(f, 0) { - filter = true - c.tracker.IncrInf() - } - - if filter { - if scrubbed == nil { - // Take every value up to the NaN, indicating that scrubbed - // should now be used. - scrubbed = values[:i] - } - } else { - if scrubbed != nil { - // We've filtered at least 1 value, so add value to filtered slice. - scrubbed = append(scrubbed, v) - } - } - } - } - - if scrubbed != nil { - return scrubbed - } - return values -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/converter_test.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/converter_test.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/converter_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/converter_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -package main - -import ( - "fmt" - "math" - "reflect" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/tsdb/engine/tsm1" -) - -func TestScrubValues(t *testing.T) { - dummy := Converter{ - tracker: new(tracker), - } - - epoch := time.Unix(0, 0) - simple := []tsm1.Value{tsm1.NewValue(epoch, 1.0)} - - for _, tt := range []struct { - input, expected []tsm1.Value - }{ - { - input: simple, - expected: simple, - }, { - input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.NaN())}, - expected: simple, - }, { - input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.Inf(-1))}, - expected: simple, - }, { - input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.Inf(1)), tsm1.NewValue(epoch, math.NaN())}, - expected: simple, - }, - } { - out := dummy.scrubValues(tt.input) - if !reflect.DeepEqual(out, tt.expected) { - t.Errorf("Failed to scrub '%s': Got '%s', Expected '%s'", pretty(tt.input), pretty(out), pretty(tt.expected)) - } - } -} - -func pretty(vals []tsm1.Value) string { - if len(vals) == 0 { - return "[]" - } - - strs := make([]string, len(vals)) - - for i := range vals { - strs[i] = fmt.Sprintf("{%v: %v}", vals[i].UnixNano(), vals[i].Value()) - } - - return "[" + strings.Join(strs, ", ") + "]" -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/main.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/main.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/main.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/main.go 2016-12-06 21:36:15.000000000 +0000 @@ -11,6 +11,7 @@ "os" "path/filepath" "runtime" + "runtime/pprof" "sort" "strings" "text/tabwriter" @@ -19,11 +20,12 @@ "net/http" _ "net/http/pprof" - "github.com/influxdb/influxdb/cmd/influx_tsm/b1" - "github.com/influxdb/influxdb/cmd/influx_tsm/bz1" - "github.com/influxdb/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" ) +// ShardReader reads b* shards and converts to tsm shards type ShardReader interface { KeyIterator Open() error @@ -42,7 +44,7 @@ node again to make sure all of data has been converted correctly. To restore a backup: - Shut down the node, remove the converted directory, and + Shut down the node, remove the converted directory, and copy the backed-up directory to the original location.` type options struct { @@ -54,7 +56,8 @@ Parallel bool SkipBackup bool UpdateInterval time.Duration - // Quiet bool + Yes bool + CPUFile string } func (o *options) Parse() error { @@ -66,10 +69,11 @@ fs.Uint64Var(&opts.TSMSize, "sz", maxTSMSz, "Maximum size of individual TSM files.") fs.BoolVar(&opts.Parallel, "parallel", false, "Perform parallel conversion. (up to GOMAXPROCS shards at once)") fs.BoolVar(&opts.SkipBackup, "nobackup", false, "Disable database backups. Not recommended.") - fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directoryi.") - // fs.BoolVar(&opts.Quiet, "quiet", false, "Suppresses the regular status updates.") + fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directory.") fs.StringVar(&opts.DebugAddr, "debug", "", "If set, http debugging endpoints will be enabled on the given address") fs.DurationVar(&opts.UpdateInterval, "interval", 5*time.Second, "How often status updates are printed.") + fs.BoolVar(&opts.Yes, "y", false, "Don't ask, just convert") + fs.StringVar(&opts.CPUFile, "profile", "", "CPU Profile location") fs.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %v [options] \n", os.Args[0]) fmt.Fprintf(os.Stderr, "%v\n\nOptions:\n", description) @@ -118,7 +122,7 @@ if strings.HasPrefix(o.BackupPath, o.DataPath) { fmt.Println(o.BackupPath, o.DataPath) - return errors.New("backup directory cannot be contained within data directory.") + return errors.New("backup directory cannot be contained within data directory") } } @@ -170,11 +174,13 @@ // Dump summary of what is about to happen. fmt.Println("b1 and bz1 shard conversion.") fmt.Println("-----------------------------------") - fmt.Println("Data directory is: ", opts.DataPath) - fmt.Println("Backup directory is: ", opts.BackupPath) - fmt.Println("Databases specified: ", allDBs(opts.DBs)) - fmt.Println("Database backups enabled:", yesno(!opts.SkipBackup), badUser) - fmt.Println("Parallel mode enabled: ", yesno(opts.Parallel), runtime.GOMAXPROCS(0)) + fmt.Println("Data directory is: ", opts.DataPath) + if !opts.SkipBackup { + fmt.Println("Backup directory is: ", opts.BackupPath) + } + fmt.Println("Databases specified: ", allDBs(opts.DBs)) + fmt.Println("Database backups enabled: ", yesno(!opts.SkipBackup), badUser) + fmt.Printf("Parallel mode enabled (GOMAXPROCS): %s (%d)\n", yesno(opts.Parallel), runtime.GOMAXPROCS(0)) fmt.Println() shards := collectShards(dbs) @@ -196,19 +202,32 @@ } w.Flush() - // Get confirmation from user. - fmt.Printf("\nThese shards will be converted. Proceed? y/N: ") - liner := bufio.NewReader(os.Stdin) - yn, err := liner.ReadString('\n') - if err != nil { - log.Fatalf("failed to read response: %v", err) - } - yn = strings.TrimRight(strings.ToLower(yn), "\n") - if yn != "y" { - log.Fatal("Conversion aborted.") + if !opts.Yes { + // Get confirmation from user. + fmt.Printf("\nThese shards will be converted. Proceed? y/N: ") + liner := bufio.NewReader(os.Stdin) + yn, err := liner.ReadString('\n') + if err != nil { + log.Fatalf("failed to read response: %v", err) + } + yn = strings.TrimRight(strings.ToLower(yn), "\n") + if yn != "y" { + log.Fatal("Conversion aborted.") + } } fmt.Println("Conversion starting....") + if opts.CPUFile != "" { + f, err := os.Create(opts.CPUFile) + if err != nil { + log.Fatal(err) + } + if err = pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + defer pprof.StopCPUProfile() + } + tr := newTracker(shards, opts) if err := tr.Run(); err != nil { @@ -316,19 +335,19 @@ var reader ShardReader switch si.Format { case tsdb.BZ1: - reader = bz1.NewReader(src) + reader = bz1.NewReader(src, &tr.Stats, 0) case tsdb.B1: - reader = b1.NewReader(src) + reader = b1.NewReader(src, &tr.Stats, 0) default: return fmt.Errorf("Unsupported shard format: %v", si.FormatAsString()) } - defer reader.Close() // Open the shard, and create a converter. if err := reader.Open(); err != nil { return fmt.Errorf("Failed to open %v for conversion: %v", src, err) } - converter := NewConverter(dst, uint32(opts.TSMSize), tr) + defer reader.Close() + converter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats) // Perform the conversion. if err := converter.Process(reader); err != nil { @@ -359,6 +378,7 @@ return make(chan struct{}, n) } +// Do executes one operation of the ParallelGroup func (p ParallelGroup) Do(f func()) { p <- struct{}{} // acquire working slot defer func() { <-p }() diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/README.md influxdb-1.1.1+dfsg1/cmd/influx_tsm/README.md --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,13 +1,25 @@ # Converting b1 and bz1 shards to tsm1 -`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 format. Converting shards to tsm1 format results in a very significant reduction in disk usage, and significantly improved write-throughput, when writing data into those shards. -Conversion can be controlled on a database-by-database basis. By default a database is backed up before it is converted, allowing you to roll back any changes. Because of the backup process, ensure the host system has at least as much free disk space as the disk space consumed by the _data_ directory of your InfluxDB system. - -The tool automatically ignores tsm1 shards, and can be run idempotently on any database. - -Conversion is an offline process, and the InfluxDB system must be stopped during conversion. However the conversion process reads and writes shards directly on disk and should be fast. +`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 +format. Converting shards to tsm1 format results in a very significant +reduction in disk usage, and significantly improved write-throughput, +when writing data into those shards. + +Conversion can be controlled on a database-by-database basis. By +default a database is backed up before it is converted, allowing you +to roll back any changes. Because of the backup process, ensure the +host system has at least as much free disk space as the disk space +consumed by the _data_ directory of your InfluxDB system. + +The tool automatically ignores tsm1 shards, and can be run +idempotently on any database. + +Conversion is an offline process, and the InfluxDB system must be +stopped during conversion. However the conversion process reads and +writes shards directly on disk and should be fast. ## Steps + Follow these steps to perform a conversion. * Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted. @@ -15,41 +27,43 @@ * Stop all write-traffic to your InfluxDB system. * Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards. * Stop the InfluxDB service. It should not be restarted until conversion is complete. -* Run conversion tool. +* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions. * Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories. * Restart node and ensure data looks correct. * If everything looks OK, you may then wish to remove or archive the backed-up databases. * Restart write traffic. ## Example session + Below is an example session, showing a database being converted. ``` -$ mkdir ~/influxdb_backup -$ influx_tsm -backup ~/influxdb_backup -parallel ~/.influxdb/data +$ # Create a backup location that the `influxdb` user has full access to +$ mkdir -m 0777 /path/to/influxdb_backup +$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data b1 and bz1 shard conversion. ----------------------------------- -Data directory is: /home/user/.influxdb/data -Backup directory is: /home/user/influxdb_backup -Databases specified: all -Database backups enabled: yes -Parallel mode enabled: yes 8 +Data directory is: /var/lib/influxdb/data +Backup directory is: /path/to/influxdb_backup +Databases specified: all +Database backups enabled: yes +Parallel mode enabled (GOMAXPROCS): yes (8) Found 1 shards that will be converted. Database Retention Path Engine Size -_internal monitor /home/user/.influxdb/data/_internal/monitor/1 bz1 65536 +_internal monitor /var/lib/influxdb/data/_internal/monitor/1 bz1 65536 These shards will be converted. Proceed? y/N: y Conversion starting.... Backing up 1 databases... 2016/01/28 12:23:43.699266 Backup of databse '_internal' started -2016/01/28 12:23:43.699883 Backing up file /home/user/.influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1 2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs) -2016/01/28 12:23:43.700320 Starting conversion of shard: /home/user/.influxdb/data/_internal/monitor/1 -2016/01/28 12:23:43.706276 Conversion of /home/user/.influxdb/data/_internal/monitor/1 successful (6.040148ms) +2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms) Summary statistics ======================================== @@ -68,22 +82,71 @@ Total conversion time: 7.330443ms $ # restart node, verify data - -$ rm -r ~/influxdb_backup +$ sudo rm -r /path/to/influxdb_backup ``` -Note that the tool first lists the shards that will be converted, before asking for confirmation. You can abort the conversion process at this step if you just wish to see what would be converted, or if the list of shards does not look correct. +Note that the tool first lists the shards that will be converted, +before asking for confirmation. You can abort the conversion process +at this step if you just wish to see what would be converted, or if +the list of shards does not look correct. + +__WARNING:__ If you run the `influx_tsm` tool as a user other than the +`influxdb` user (or the user that the InfluxDB process runs under), +please make sure to verify the shard permissions are correct prior to +starting InfluxDB. If needed, shard permissions can be corrected with +the `chown` command. For example: + +``` +sudo chown -R influxdb:influxdb /var/lib/influxdb +``` ## Rolling back a conversion -After a successful backup (the message `Database XYZ backed up` was logged), you have a duplicate of that database in the _backup_ directory you provided on the command line. If, when checking your data after a successful conversion, you notice things missing or something just isn't right, you can "undo" the conversion: + +After a successful backup (the message `Database XYZ backed up` was +logged), you have a duplicate of that database in the _backup_ +directory you provided on the command line. If, when checking your +data after a successful conversion, you notice things missing or +something just isn't right, you can "undo" the conversion: - Shut down your node (this is very important) -- Remove the database's directory from the influxdb `data` directory (default: ~/.influxdb/data/XYZ) +- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations) - Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory. Using the same directories as above, and assuming a database named `stats`: + ``` -$ rm -r ~/.influxdb/data/stats -$ cp -r ~/influxdb_backup/stats ~/.influxdb/data/ +$ sudo rm -r /var/lib/influxdb/data/stats +$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/ $ # restart influxd node ``` + +#### How to avoid downtime when upgrading shards + +*Identify non-`tsm1` shards* + +Non-`tsm1` shards are files of the form: `data///`. + +`tsm1` shards are files of the form: `data////.tsm`. + +*Determine which `bz`/`bz1` shards are cold for writes* + +Run the `SHOW SHARDS` query to see the start and end dates for shards. +If the date range for a shard does not span the current time then the shard is said to be cold for writes. +This means that no new points are expected to be added to the shard. +The shard whose date range spans now is said to be hot for writes. +You can only safely convert cold shards without stopping the InfluxDB process. + +*Convert cold shards* + +1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data///`. +2. Run the `influx_tsm` tool on the copied files: +``` +influx_tsm -parallel /tmp/data/ +``` +3. Remove the existing cold `b1`/`bz1` shards from the production data directory. +4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors. +5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards. + +> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard. +Nothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible. +It is your responsibility to prevent writes to cold shards to prevent data loss. diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/stats/stats.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/stats/stats.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/stats/stats.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/stats/stats.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,54 @@ +package stats + +import ( + "sync/atomic" + "time" +) + +// Stats are the statistics captured while converting non-TSM shards to TSM +type Stats struct { + NanFiltered uint64 + InfFiltered uint64 + FieldsFiltered uint64 + PointsWritten uint64 + PointsRead uint64 + TsmFilesCreated uint64 + TsmBytesWritten uint64 + CompletedShards uint64 + TotalTime time.Duration +} + +// AddPointsRead increments the number of read points. +func (s *Stats) AddPointsRead(n int) { + atomic.AddUint64(&s.PointsRead, uint64(n)) +} + +// AddPointsWritten increments the number of written points. +func (s *Stats) AddPointsWritten(n int) { + atomic.AddUint64(&s.PointsWritten, uint64(n)) +} + +// AddTSMBytes increments the number of TSM Bytes. +func (s *Stats) AddTSMBytes(n uint32) { + atomic.AddUint64(&s.TsmBytesWritten, uint64(n)) +} + +// IncrTSMFileCount increments the number of TSM files created. +func (s *Stats) IncrTSMFileCount() { + atomic.AddUint64(&s.TsmFilesCreated, 1) +} + +// IncrNaN increments the number of NaNs filtered. +func (s *Stats) IncrNaN() { + atomic.AddUint64(&s.NanFiltered, 1) +} + +// IncrInf increments the number of Infs filtered. +func (s *Stats) IncrInf() { + atomic.AddUint64(&s.InfFiltered, 1) +} + +// IncrFiltered increments the number of fields filtered. +func (s *Stats) IncrFiltered() { + atomic.AddUint64(&s.FieldsFiltered, 1) +} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/tracker.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/tracker.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/tracker.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/tracker.go 2016-12-06 21:36:15.000000000 +0000 @@ -8,14 +8,13 @@ "sync/atomic" "time" - "github.com/influxdb/influxdb/cmd/influx_tsm/b1" - "github.com/influxdb/influxdb/cmd/influx_tsm/bz1" - "github.com/influxdb/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" ) // tracker will orchestrate and track the conversions of non-TSM shards to TSM type tracker struct { - stats Stats + Stats stats.Stats shards tsdb.ShardInfos opts options @@ -24,17 +23,6 @@ wg sync.WaitGroup } -type Stats struct { - NanFiltered uint64 - InfFiltered uint64 - PointsWritten uint64 - PointsRead uint64 - TsmFilesCreated uint64 - TsmBytesWritten uint64 - CompletedShards uint64 - TotalTime time.Duration -} - // newTracker will setup and return a clean tracker instance func newTracker(shards tsdb.ShardInfos, opts options) *tracker { t := &tracker{ @@ -46,10 +34,6 @@ return t } -func (t *tracker) Errorf(str string, args ...interface{}) { - -} - func (t *tracker) Run() error { conversionStart := time.Now() @@ -64,7 +48,7 @@ defer t.wg.Done() start := time.Now() - log.Printf("Backup of databse '%v' started", db) + log.Printf("Backup of database '%v' started", db) err := backupDatabase(db) if err != nil { log.Fatalf("Backup of database %v failed: %v\n", db, err) @@ -82,7 +66,7 @@ si := t.shards[i] go t.pg.Do(func() { defer func() { - atomic.AddUint64(&t.stats.CompletedShards, 1) + atomic.AddUint64(&t.Stats.CompletedShards, 1) t.wg.Done() }() @@ -111,60 +95,36 @@ } } - t.stats.TotalTime = time.Since(conversionStart) + t.Stats.TotalTime = time.Since(conversionStart) return nil } func (t *tracker) StatusUpdate() { - shardCount := atomic.LoadUint64(&t.stats.CompletedShards) - pointCount := atomic.LoadUint64(&t.stats.PointsRead) - pointWritten := atomic.LoadUint64(&t.stats.PointsWritten) + shardCount := atomic.LoadUint64(&t.Stats.CompletedShards) + pointCount := atomic.LoadUint64(&t.Stats.PointsRead) + pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten) log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten) } func (t *tracker) PrintStats() { preSize := t.shards.Size() - postSize := int64(t.stats.TsmBytesWritten) + postSize := int64(t.Stats.TsmBytesWritten) fmt.Printf("\nSummary statistics\n========================================\n") fmt.Printf("Databases converted: %d\n", len(t.shards.Databases())) fmt.Printf("Shards converted: %d\n", len(t.shards)) - fmt.Printf("TSM files created: %d\n", t.stats.TsmFilesCreated) - fmt.Printf("Points read: %d\n", t.stats.PointsRead) - fmt.Printf("Points written: %d\n", t.stats.PointsWritten) - fmt.Printf("NaN filtered: %d\n", t.stats.NanFiltered) - fmt.Printf("Inf filtered: %d\n", t.stats.InfFiltered) - fmt.Printf("Points without fields filtered: %d\n", b1.NoFieldsFiltered+bz1.NoFieldsFiltered) + fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated) + fmt.Printf("Points read: %d\n", t.Stats.PointsRead) + fmt.Printf("Points written: %d\n", t.Stats.PointsWritten) + fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered) + fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered) + fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered) fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize) fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize) fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize) - fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.stats.PointsWritten)) - fmt.Printf("Total conversion time: %v\n", t.stats.TotalTime) + fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten)) + fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime) fmt.Println() } - -func (t *tracker) AddPointsRead(n int) { - atomic.AddUint64(&t.stats.PointsRead, uint64(n)) -} - -func (t *tracker) AddPointsWritten(n int) { - atomic.AddUint64(&t.stats.PointsWritten, uint64(n)) -} - -func (t *tracker) AddTSMBytes(n uint32) { - atomic.AddUint64(&t.stats.TsmBytesWritten, uint64(n)) -} - -func (t *tracker) IncrTSMFileCount() { - atomic.AddUint64(&t.stats.TsmFilesCreated, 1) -} - -func (t *tracker) IncrNaN() { - atomic.AddUint64(&t.stats.NanFiltered, 1) -} - -func (t *tracker) IncrInf() { - atomic.AddUint64(&t.stats.InfFiltered, 1) -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/codec.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/codec.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/codec.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/codec.go 2016-12-06 21:36:15.000000000 +0000 @@ -2,15 +2,17 @@ import ( "encoding/binary" - "encoding/json" "errors" "fmt" "math" - - "github.com/influxdb/influxdb/influxql" ) -const maxStringLength = 64 * 1024 +const ( + fieldFloat = 1 + fieldInteger = 2 + fieldBoolean = 3 + fieldString = 4 +) var ( // ErrFieldNotFound is returned when a field cannot be found. @@ -40,79 +42,6 @@ return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName} } -// EncodeFields converts a map of values with string keys to a byte slice of field -// IDs and values. -// -// If a field exists in the codec, but its type is different, an error is returned. If -// a field is not present in the codec, the system panics. -func (f *FieldCodec) EncodeFields(values map[string]interface{}) ([]byte, error) { - // Allocate byte slice - b := make([]byte, 0, 10) - - for k, v := range values { - field := f.fieldsByName[k] - if field == nil { - panic(fmt.Sprintf("field does not exist for %s", k)) - } else if influxql.InspectDataType(v) != field.Type { - return nil, fmt.Errorf("field \"%s\" is type %T, mapped as type %s", k, v, field.Type) - } - - var buf []byte - - switch field.Type { - case influxql.Float: - value := v.(float64) - buf = make([]byte, 9) - binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(value)) - case influxql.Integer: - var value uint64 - switch v.(type) { - case int: - value = uint64(v.(int)) - case int32: - value = uint64(v.(int32)) - case int64: - value = uint64(v.(int64)) - default: - panic(fmt.Sprintf("invalid integer type: %T", v)) - } - buf = make([]byte, 9) - binary.BigEndian.PutUint64(buf[1:9], value) - case influxql.Boolean: - value := v.(bool) - - // Only 1 byte need for a boolean. - buf = make([]byte, 2) - if value { - buf[1] = byte(1) - } - case influxql.String: - value := v.(string) - if len(value) > maxStringLength { - value = value[:maxStringLength] - } - // Make a buffer for field ID (1 bytes), the string length (2 bytes), and the string. - buf = make([]byte, len(value)+3) - - // Set the string length, then copy the string itself. - binary.BigEndian.PutUint16(buf[1:3], uint16(len(value))) - for i, c := range []byte(value) { - buf[i+3] = byte(c) - } - default: - panic(fmt.Sprintf("unsupported value type during encode fields: %T", v)) - } - - // Always set the field ID as the leading byte. - buf[0] = field.ID - - // Append temp buffer to the end. - b = append(b, buf...) - } - - return b, nil -} - // FieldIDByName returns the ID for the given field. func (f *FieldCodec) FieldIDByName(s string) (uint8, error) { fi := f.fieldsByName[s] @@ -122,132 +51,56 @@ return fi.ID, nil } -// DecodeFields decodes a byte slice into a set of field ids and values. -func (f *FieldCodec) DecodeFields(b []byte) (map[uint8]interface{}, error) { - if len(b) == 0 { - return nil, nil - } - - // Create a map to hold the decoded data. - values := make(map[uint8]interface{}, 0) - - for { - if len(b) < 1 { - // No more bytes. - break - } - - // First byte is the field identifier. - fieldID := b[0] - field := f.fieldsByID[fieldID] - if field == nil { - // See note in DecodeByID() regarding field-mapping failures. - return nil, ErrFieldUnmappedID - } - - var value interface{} - switch field.Type { - case influxql.Float: - value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) - // Move bytes forward. - b = b[9:] - case influxql.Integer: - value = int64(binary.BigEndian.Uint64(b[1:9])) - // Move bytes forward. - b = b[9:] - case influxql.Boolean: - if b[1] == 1 { - value = true - } else { - value = false - } - // Move bytes forward. - b = b[2:] - case influxql.String: - size := binary.BigEndian.Uint16(b[1:3]) - value = string(b[3 : size+3]) - // Move bytes forward. - b = b[size+3:] - default: - panic(fmt.Sprintf("unsupported value type during decode fields: %T", f.fieldsByID[fieldID])) - } - - values[fieldID] = value - } - - return values, nil -} - -// DecodeFieldsWithNames decodes a byte slice into a set of field names and values -func (f *FieldCodec) DecodeFieldsWithNames(b []byte) (map[string]interface{}, error) { - fields, err := f.DecodeFields(b) - if err != nil { - return nil, err - } - m := make(map[string]interface{}) - for id, v := range fields { - field := f.fieldsByID[id] - if field != nil { - m[field.Name] = v - } - } - return m, nil -} - // DecodeByID scans a byte slice for a field with the given ID, converts it to its // expected type, and return that value. func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) { - if len(b) == 0 { - return 0, ErrFieldNotFound - } - + var value interface{} for { - if len(b) < 1 { + if len(b) == 0 { // No more bytes. - break + return nil, ErrFieldNotFound } - field, ok := f.fieldsByID[b[0]] - if !ok { + + field := f.fieldsByID[b[0]] + if field == nil { // This can happen, though is very unlikely. If this node receives encoded data, to be written // to disk, and is queried for that data before its metastore is updated, there will be no field // mapping for the data during decode. All this can happen because data is encoded by the node // that first received the write request, not the node that actually writes the data to disk. // So if this happens, the read must be aborted. - return 0, ErrFieldUnmappedID + return nil, ErrFieldUnmappedID } - var value interface{} switch field.Type { - case influxql.Float: - // Move bytes forward. - value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) + case fieldFloat: + if field.ID == targetID { + value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) + } b = b[9:] - case influxql.Integer: - value = int64(binary.BigEndian.Uint64(b[1:9])) + case fieldInteger: + if field.ID == targetID { + value = int64(binary.BigEndian.Uint64(b[1:9])) + } b = b[9:] - case influxql.Boolean: - if b[1] == 1 { - value = true - } else { - value = false + case fieldBoolean: + if field.ID == targetID { + value = b[1] == 1 } - // Move bytes forward. b = b[2:] - case influxql.String: - size := binary.BigEndian.Uint16(b[1:3]) - value = string(b[3 : 3+size]) - // Move bytes forward. - b = b[size+3:] + case fieldString: + length := binary.BigEndian.Uint16(b[1:3]) + if field.ID == targetID { + value = string(b[3 : 3+length]) + } + b = b[3+length:] default: panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type)) } - if field.ID == targetID { + if value != nil { return value, nil } } - - return 0, ErrFieldNotFound } // DecodeByName scans a byte slice for a field with the given name, converts it to its @@ -260,34 +113,7 @@ return f.DecodeByID(fi.ID, b) } -func (f *FieldCodec) Fields() (a []*Field) { - for _, f := range f.fieldsByID { - a = append(a, f) - } - return -} - // FieldByName returns the field by its name. It will return a nil if not found func (f *FieldCodec) FieldByName(name string) *Field { return f.fieldsByName[name] } - -// mustMarshal encodes a value to JSON. -// This will panic if an error occurs. This should only be used internally when -// an invalid marshal will cause corruption and a panic is appropriate. -func mustMarshalJSON(v interface{}) []byte { - b, err := json.Marshal(v) - if err != nil { - panic("marshal: " + err.Error()) - } - return b -} - -// mustUnmarshalJSON decodes a value from JSON. -// This will panic if an error occurs. This should only be used internally when -// an invalid unmarshal will cause corruption and a panic is appropriate. -func mustUnmarshalJSON(b []byte, v interface{}) { - if err := json.Unmarshal(b, v); err != nil { - panic("unmarshal: " + err.Error()) - } -} diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/database.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/database.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/database.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/database.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,4 +1,4 @@ -package tsdb +package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" import ( "fmt" @@ -9,15 +9,17 @@ "time" "github.com/boltdb/bolt" - "github.com/influxdb/influxdb/pkg/slices" + "github.com/influxdata/influxdb/pkg/slices" ) +// Flags for differentiating between engines const ( B1 = iota BZ1 TSM1 ) +// EngineFormat holds the flag for the engine type EngineFormat int // String returns the string format of the engine. @@ -53,6 +55,7 @@ return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path) } +// ShardInfos is an array of ShardInfo type ShardInfos []*ShardInfo func (s ShardInfos) Len() int { return len(s) } @@ -61,10 +64,11 @@ if s[i].Database == s[j].Database { if s[i].RetentionPolicy == s[j].RetentionPolicy { return s[i].Path < s[i].Path - } else { - return s[i].RetentionPolicy < s[j].RetentionPolicy } + + return s[i].RetentionPolicy < s[j].RetentionPolicy } + return s[i].Database < s[j].Database } @@ -76,7 +80,7 @@ } var dbs []string - for k, _ := range dbm { + for k := range dbm { dbs = append(dbs, k) } sort.Strings(dbs) @@ -192,12 +196,7 @@ // shardFormat returns the format and size on disk of the shard at path. func shardFormat(path string) (EngineFormat, int64, error) { // If it's a directory then it's a tsm1 engine - f, err := os.Open(path) - if err != nil { - return 0, 0, err - } - fi, err := f.Stat() - f.Close() + fi, err := os.Stat(path) if err != nil { return 0, 0, err } @@ -224,13 +223,13 @@ } // There is an actual format indicator. - switch string(b.Get([]byte("format"))) { + switch f := string(b.Get([]byte("format"))); f { case "b1", "v1": format = B1 case "bz1": format = BZ1 default: - return fmt.Errorf("unrecognized engine format: %s", string(b.Get([]byte("format")))) + return fmt.Errorf("unrecognized engine format: %s", f) } return nil diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/types.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/types.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/types.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/types.go 2016-12-06 21:36:15.000000000 +0000 @@ -4,18 +4,12 @@ "encoding/binary" "strings" - "github.com/influxdb/influxdb/cmd/influx_tsm/tsdb/internal" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal" + "github.com/influxdata/influxdb/influxql" "github.com/gogo/protobuf/proto" ) -// Cursor represents an iterator over a series. -type Cursor interface { - SeekTo(seek int64) (key int64, value interface{}) - Next() (key int64, value interface{}) -} - // Field represents an encoded field. type Field struct { ID uint8 `json:"id,omitempty"` @@ -29,18 +23,6 @@ Codec *FieldCodec } -// MarshalBinary encodes the object to a binary format. -func (m *MeasurementFields) MarshalBinary() ([]byte, error) { - var pb internal.MeasurementFields - for _, f := range m.Fields { - id := int32(f.ID) - name := f.Name - t := int32(f.Type) - pb.Fields = append(pb.Fields, &internal.Field{ID: &id, Name: &name, Type: &t}) - } - return proto.Marshal(&pb) -} - // UnmarshalBinary decodes the object from a binary format. func (m *MeasurementFields) UnmarshalBinary(buf []byte) error { var pb internal.MeasurementFields @@ -60,45 +42,15 @@ Tags map[string]string } -// MarshalBinary encodes the object to a binary format. -func (s *Series) MarshalBinary() ([]byte, error) { - var pb internal.Series - pb.Key = &s.Key - for k, v := range s.Tags { - key := k - value := v - pb.Tags = append(pb.Tags, &internal.Tag{Key: &key, Value: &value}) - } - return proto.Marshal(&pb) -} - -// UnmarshalBinary decodes the object from a binary format. -func (s *Series) UnmarshalBinary(buf []byte) error { - var pb internal.Series - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - s.Key = pb.GetKey() - s.Tags = make(map[string]string) - for _, t := range pb.Tags { - s.Tags[t.GetKey()] = t.GetValue() - } - return nil -} - // MeasurementFromSeriesKey returns the Measurement name for a given series. func MeasurementFromSeriesKey(key string) string { - idx := strings.Index(key, ",") - if idx == -1 { - return key - } - return key[:strings.Index(key, ",")] + return strings.SplitN(key, ",", 2)[0] } // DecodeKeyValue decodes the key and value from bytes. func DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) { // Convert key to a timestamp. - key := int64(btou64(k[0:8])) + key := int64(binary.BigEndian.Uint64(k[0:8])) decValue, err := dec.DecodeByName(field, v) if err != nil { @@ -106,6 +58,3 @@ } return key, decValue } - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff -Nru influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/values.go influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/values.go --- influxdb-0.10.0+dfsg1/cmd/influx_tsm/tsdb/values.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/influx_tsm/tsdb/values.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -package tsdb - -import ( - "fmt" - "time" - - tsm "github.com/influxdb/influxdb/tsdb/engine/tsm1" -) - -type FloatValue struct { - T time.Time - V float64 -} - -func (f *FloatValue) Time() time.Time { - return f.T -} - -func (f *FloatValue) UnixNano() int64 { - return f.T.UnixNano() -} - -func (f *FloatValue) Value() interface{} { - return f.V -} - -func (f *FloatValue) Size() int { - return 16 -} - -func (f *FloatValue) String() string { - return fmt.Sprintf("%v %v", f.Time(), f.Value()) -} - -type BoolValue struct { - T time.Time - V bool -} - -func (b *BoolValue) Time() time.Time { - return b.T -} - -func (b *BoolValue) Size() int { - return 9 -} - -func (b *BoolValue) UnixNano() int64 { - return b.T.UnixNano() -} - -func (b *BoolValue) Value() interface{} { - return b.V -} - -func (f *BoolValue) String() string { - return fmt.Sprintf("%v %v", f.Time(), f.Value()) -} - -type Int64Value struct { - T time.Time - V int64 -} - -func (v *Int64Value) Time() time.Time { - return v.T -} - -func (v *Int64Value) Value() interface{} { - return v.V -} - -func (v *Int64Value) UnixNano() int64 { - return v.T.UnixNano() -} - -func (v *Int64Value) Size() int { - return 16 -} - -func (f *Int64Value) String() string { - return fmt.Sprintf("%v %v", f.Time(), f.Value()) -} - -type StringValue struct { - T time.Time - V string -} - -func (v *StringValue) Time() time.Time { - return v.T -} - -func (v *StringValue) Value() interface{} { - return v.V -} - -func (v *StringValue) UnixNano() int64 { - return v.T.UnixNano() -} - -func (v *StringValue) Size() int { - return 8 + len(v.V) -} - -func (f *StringValue) String() string { - return fmt.Sprintf("%v %v", f.Time(), f.Value()) -} - -func ConvertToValue(k int64, v interface{}) tsm.Value { - var value tsm.Value - - switch v := v.(type) { - case int64: - value = &Int64Value{ - T: time.Unix(0, k), - V: v, - } - case float64: - value = &FloatValue{ - T: time.Unix(0, k), - V: v, - } - case bool: - value = &BoolValue{ - T: time.Unix(0, k), - V: v, - } - case string: - value = &StringValue{ - T: time.Unix(0, k), - V: v, - } - default: - panic(fmt.Sprintf("value type %T unsupported for conversion", v)) - } - - return value -} diff -Nru influxdb-0.10.0+dfsg1/cmd/parse.go influxdb-1.1.1+dfsg1/cmd/parse.go --- influxdb-0.10.0+dfsg1/cmd/parse.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/cmd/parse.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,28 @@ +package cmd + +import "strings" + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 { + if !strings.HasPrefix(args[0], "-") { + name = args[0] + } else if args[0] == "-h" || args[0] == "-help" || args[0] == "--help" { + // Special case -h immediately following binary name + name = "help" + } + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 2 && !strings.HasPrefix(args[1], "-") { + return args[1], []string{"-h"} + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} diff -Nru influxdb-0.10.0+dfsg1/CODING_GUIDELINES.md influxdb-1.1.1+dfsg1/CODING_GUIDELINES.md --- influxdb-0.10.0+dfsg1/CODING_GUIDELINES.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/CODING_GUIDELINES.md 2016-12-06 21:36:15.000000000 +0000 @@ -8,7 +8,7 @@ # The Guidelines -## Try not to use third-party libaries +## Try not to use third-party libraries A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. diff -Nru influxdb-0.10.0+dfsg1/CONTRIBUTING.md influxdb-1.1.1+dfsg1/CONTRIBUTING.md --- influxdb-0.10.0+dfsg1/CONTRIBUTING.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/CONTRIBUTING.md 2016-12-06 21:36:15.000000000 +0000 @@ -57,7 +57,7 @@ - [ ] CHANGELOG.md updated - [ ] Rebased/mergable - [ ] Tests pass -- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed) +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) ``` Signing the CLA @@ -65,11 +65,11 @@ If you are going to be contributing back to InfluxDB please take a second to sign our CLA, which can be found -[on our website](http://influxdb.com/community/cla.html). +[on our website](https://influxdata.com/community/cla/). Installing Go ------------- -InfluxDB requires Go 1.4.3. +InfluxDB requires Go 1.7.4. At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). @@ -77,8 +77,14 @@ After installing gvm you can install and set the default go version by running the following: - gvm install go1.4.3 - gvm use go1.4.3 --default + gvm install go1.7.4 + gvm use go1.7.4 --default + +Installing GDM +------------- +InfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies. Install it by running the following: + + go get github.com/sparrc/gdm Revision Control Systems ------------- @@ -95,7 +101,7 @@ ```bash mkdir $HOME/gocodez export GOPATH=$HOME/gocodez - go get github.com/influxdb/influxdb + go get github.com/influxdata/influxdb ``` You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. @@ -106,12 +112,12 @@ ```bash export GOPATH=$HOME/gocodez - mkdir -p $GOPATH/src/github.com/influxdb - cd $GOPATH/src/github.com/influxdb + mkdir -p $GOPATH/src/github.com/influxdata + cd $GOPATH/src/github.com/influxdata git clone git@github.com:/influxdb ``` -Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly. +Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly. Build and Test ----- @@ -119,8 +125,8 @@ Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands: ```bash -cd $GOPATH/src/github.com/influxdb/influxdb -go get -u -f -t ./... +cd $GOPATH/src/github.com/influxdata/influxdb +gdm restore ``` To then build and install the binaries, run the following command. @@ -133,20 +139,24 @@ To set the version and commit flags during the build pass the following to the **install** command: ```bash --ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT -X main.buildTime=$TIME" +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" ``` -where `$VERSION` is the version, `$BRANCH` is the branch, `$COMMIT` is the git commit hash, and `$TIME` is the build timestamp. +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `build.py` usage information: -If you want to build packages, see `package.sh` help: ```bash -package.sh -h +python build.py --help + +# Or to build a package for your current system +python build.py --package ``` To run the tests, execute the following command: ```bash -cd $GOPATH/src/github.com/influxdb/influxdb +cd $GOPATH/src/github.com/influxdata/influxdb go test -v ./... # run tests that match some pattern @@ -187,12 +197,37 @@ * Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. * Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + +Generated Go Templates +---------------------- + +The query engine requires optimizes data structures for each data type so +instead of writing each implementation several times we use templates. _Do not +change code that ends in a `.gen.go` extension!_ Instead you must edit the +`.gen.go.tmpl` file that was used to generate it. + +Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility +to generate the code: + +```sh +$ go get github.com/benbjohnson/tmpl +``` + +Then you can regenerate all templates in the project: + +```sh +$ go generate ./... +``` + +[tmpl]: https://github.com/benbjohnson/tmpl + + Pre-commit checks ------------- We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: ```bash - cd $GOPATH/src/github.com/influxdb/influxdb + cd $GOPATH/src/github.com/influxdata/influxdb cp .hooks/pre-commit .git/hooks/ ``` In case the commit is rejected because it's not formatted you can run @@ -218,7 +253,7 @@ ```sh # start influx with profiling -./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof +./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof # run queries, writes, whatever you're testing # Quit out of influxd and influxd.prof will then be written. # open up pprof to examine the profiling data. @@ -228,6 +263,18 @@ ``` Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. +If you are profiling benchmarks built with the `testing` package, you may wish +to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit +the code being profiled: + +```go +func BenchmarkSomething(b *testing.B) { + // do something intensive like fill database with data... + defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop() + // do something that you want to profile... +} +``` + Continuous Integration testing ----- InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. diff -Nru influxdb-0.10.0+dfsg1/coordinator/config.go influxdb-1.1.1+dfsg1/coordinator/config.go --- influxdb-0.10.0+dfsg1/coordinator/config.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/config.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,47 @@ +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultWriteTimeout is the default timeout for a complete write to succeed. + DefaultWriteTimeout = 10 * time.Second + + // DefaultMaxConcurrentQueries is the maximum number of running queries. + // A value of zero will make the maximum query limit unlimited. + DefaultMaxConcurrentQueries = 0 + + // DefaultMaxSelectPointN is the maximum number of points a SELECT can process. + // A value of zero will make the maximum point count unlimited. + DefaultMaxSelectPointN = 0 + + // DefaultMaxSelectSeriesN is the maximum number of series a SELECT can run. + // A value of zero will make the maximum series count unlimited. + DefaultMaxSelectSeriesN = 0 +) + +// Config represents the configuration for the clustering service. +type Config struct { + WriteTimeout toml.Duration `toml:"write-timeout"` + MaxConcurrentQueries int `toml:"max-concurrent-queries"` + QueryTimeout toml.Duration `toml:"query-timeout"` + LogQueriesAfter toml.Duration `toml:"log-queries-after"` + MaxSelectPointN int `toml:"max-select-point"` + MaxSelectSeriesN int `toml:"max-select-series"` + MaxSelectBucketsN int `toml:"max-select-buckets"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + WriteTimeout: toml.Duration(DefaultWriteTimeout), + QueryTimeout: toml.Duration(influxql.DefaultQueryTimeout), + MaxConcurrentQueries: DefaultMaxConcurrentQueries, + MaxSelectPointN: DefaultMaxSelectPointN, + MaxSelectSeriesN: DefaultMaxSelectSeriesN, + } +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/config_test.go influxdb-1.1.1+dfsg1/coordinator/config_test.go --- influxdb-0.10.0+dfsg1/coordinator/config_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/config_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,24 @@ +package coordinator_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c coordinator.Config + if _, err := toml.Decode(` +write-timeout = "20s" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.WriteTimeout) != 20*time.Second { + t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) + } +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/meta_client.go influxdb-1.1.1+dfsg1/coordinator/meta_client.go --- influxdb-0.10.0+dfsg1/coordinator/meta_client.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/meta_client.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,36 @@ +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is an interface for accessing meta data. +type MetaClient interface { + CreateContinuousQuery(database, name, query string) error + CreateDatabase(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) + CreateSubscription(database, rp, name, mode string, destinations []string) error + CreateUser(name, password string, admin bool) (*meta.UserInfo, error) + Database(name string) *meta.DatabaseInfo + Databases() []meta.DatabaseInfo + DropShard(id uint64) error + DropContinuousQuery(database, name string) error + DropDatabase(name string) error + DropRetentionPolicy(database, name string) error + DropSubscription(database, rp, name string) error + DropUser(name string) error + RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilege(username string, admin bool) error + SetDefaultRetentionPolicy(database, name string) error + SetPrivilege(username, database string, p influxql.Privilege) error + ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate) error + UpdateUser(name, password string) error + UserPrivilege(username, database string) (*influxql.Privilege, error) + UserPrivileges(username string) (map[string]influxql.Privilege, error) + Users() []meta.UserInfo +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/meta_client_test.go influxdb-1.1.1+dfsg1/coordinator/meta_client_test.go --- influxdb-0.10.0+dfsg1/coordinator/meta_client_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/meta_client_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,165 @@ +package coordinator_test + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is a mockable implementation of cluster.MetaClient. +type MetaClient struct { + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (*meta.UserInfo, error) + DatabaseFn func(name string) *meta.DatabaseInfo + DatabasesFn func() []meta.DatabaseInfo + DataNodeFn func(id uint64) (*meta.NodeInfo, error) + DataNodesFn func() ([]meta.NodeInfo, error) + DeleteDataNodeFn func(id uint64) error + DeleteMetaNodeFn func(id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropShardFn func(id uint64) error + DropUserFn func(name string) error + MetaNodesFn func() ([]meta.NodeInfo, error) + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilegeFn func(username string, admin bool) error + SetDefaultRetentionPolicyFn func(database, name string) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardsByTimeRangeFn func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClient) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (c *MetaClient) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, spec) +} + +func (c *MetaClient) DropShard(id uint64) error { + return c.DropShardFn(id) +} + +func (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClient) CreateUser(name, password string, admin bool) (*meta.UserInfo, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClient) Database(name string) *meta.DatabaseInfo { + return c.DatabaseFn(name) +} + +func (c *MetaClient) Databases() []meta.DatabaseInfo { + return c.DatabasesFn() +} + +func (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) { + return c.DataNodeFn(id) +} + +func (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) { + return c.DataNodesFn() +} + +func (c *MetaClient) DeleteDataNode(id uint64) error { + return c.DeleteDataNodeFn(id) +} + +func (c *MetaClient) DeleteMetaNode(id uint64) error { + return c.DeleteMetaNodeFn(id) +} + +func (c *MetaClient) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClient) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClient) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClient) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClient) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) { + return c.MetaNodesFn() +} + +func (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClient) SetDefaultRetentionPolicy(database, name string) error { + return c.SetDefaultRetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClient) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return c.ShardsByTimeRangeFn(sources, tmin, tmax) +} + +func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate) error { + return c.UpdateRetentionPolicyFn(database, name, rpu) +} + +func (c *MetaClient) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClient) Users() []meta.UserInfo { + return c.UsersFn() +} + +// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy. +func DefaultMetaClientDatabaseFn(name string) *meta.DatabaseInfo { + return &meta.DatabaseInfo{ + Name: DefaultDatabase, + DefaultRetentionPolicy: DefaultRetentionPolicy, + } +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/points_writer.go influxdb-1.1.1+dfsg1/coordinator/points_writer.go --- influxdb-0.10.0+dfsg1/coordinator/points_writer.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/points_writer.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,377 @@ +package coordinator + +import ( + "errors" + "io" + "log" + "os" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// The statistics generated by the "write" mdoule +const ( + statWriteReq = "req" + statPointWriteReq = "pointReq" + statPointWriteReqLocal = "pointReqLocal" + statWriteOK = "writeOk" + statWriteDrop = "writeDrop" + statWriteTimeout = "writeTimeout" + statWriteErr = "writeError" + statSubWriteOK = "subWriteOk" + statSubWriteDrop = "subWriteDrop" +) + +var ( + // ErrTimeout is returned when a write times out. + ErrTimeout = errors.New("timeout") + + // ErrPartialWrite is returned when a write partially succeeds but does + // not meet the requested consistency level. + ErrPartialWrite = errors.New("partial write") + + // ErrWriteFailed is returned when no writes succeeded. + ErrWriteFailed = errors.New("write failed") +) + +// PointsWriter handles writes across multiple local and remote data nodes. +type PointsWriter struct { + mu sync.RWMutex + closing chan struct{} + WriteTimeout time.Duration + Logger *log.Logger + + Node *influxdb.Node + + MetaClient interface { + Database(name string) (di *meta.DatabaseInfo) + RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) + CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) + } + + TSDBStore interface { + CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + } + + ShardWriter interface { + WriteShard(shardID, ownerID uint64, points []models.Point) error + } + + Subscriber interface { + Points() chan<- *WritePointsRequest + } + subPoints chan<- *WritePointsRequest + + stats *WriteStatistics +} + +// WritePointsRequest represents a request to write point data to the cluster +type WritePointsRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// AddPoint adds a point to the WritePointRequest with field key 'value' +func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + pt, err := models.NewPoint( + name, models.NewTags(tags), map[string]interface{}{"value": value}, timestamp, + ) + if err != nil { + return + } + w.Points = append(w.Points, pt) +} + +// NewPointsWriter returns a new instance of PointsWriter for a node. +func NewPointsWriter() *PointsWriter { + return &PointsWriter{ + closing: make(chan struct{}), + WriteTimeout: DefaultWriteTimeout, + Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), + stats: &WriteStatistics{}, + } +} + +// ShardMapping contains a mapping of a shards to a points. +type ShardMapping struct { + Points map[uint64][]models.Point // The points associated with a shard ID + Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID +} + +// NewShardMapping creates an empty ShardMapping +func NewShardMapping() *ShardMapping { + return &ShardMapping{ + Points: map[uint64][]models.Point{}, + Shards: map[uint64]*meta.ShardInfo{}, + } +} + +// MapPoint maps a point to shard +func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { + s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p) + s.Shards[shardInfo.ID] = shardInfo +} + +// Open opens the communication channel with the point writer +func (w *PointsWriter) Open() error { + w.mu.Lock() + defer w.mu.Unlock() + w.closing = make(chan struct{}) + if w.Subscriber != nil { + w.subPoints = w.Subscriber.Points() + } + return nil +} + +// Close closes the communication channel with the point writer +func (w *PointsWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing != nil { + close(w.closing) + } + if w.subPoints != nil { + // 'nil' channels always block so this makes the + // select statement in WritePoints hit its default case + // dropping any in-flight writes. + w.subPoints = nil + } + return nil +} + +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (w *PointsWriter) SetLogOutput(lw io.Writer) { + w.Logger = log.New(lw, "[write] ", log.LstdFlags) +} + +// WriteStatistics keeps statistics related to the PointsWriter. +type WriteStatistics struct { + WriteReq int64 + PointWriteReq int64 + PointWriteReqLocal int64 + WriteOK int64 + WriteDropped int64 + WriteTimeout int64 + WriteErr int64 + SubWriteOK int64 + SubWriteDrop int64 +} + +// Statistics returns statistics for periodic monitoring. +func (w *PointsWriter) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "write", + Tags: tags, + Values: map[string]interface{}{ + statWriteReq: atomic.LoadInt64(&w.stats.WriteReq), + statPointWriteReq: atomic.LoadInt64(&w.stats.PointWriteReq), + statPointWriteReqLocal: atomic.LoadInt64(&w.stats.PointWriteReqLocal), + statWriteOK: atomic.LoadInt64(&w.stats.WriteOK), + statWriteDrop: atomic.LoadInt64(&w.stats.WriteDropped), + statWriteTimeout: atomic.LoadInt64(&w.stats.WriteTimeout), + statWriteErr: atomic.LoadInt64(&w.stats.WriteErr), + statSubWriteOK: atomic.LoadInt64(&w.stats.SubWriteOK), + statSubWriteDrop: atomic.LoadInt64(&w.stats.SubWriteDrop), + }, + }} +} + +// MapShards maps the points contained in wp to a ShardMapping. If a point +// maps to a shard group or shard that does not currently exist, it will be +// created before returning the mapping. +func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { + rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) + if err != nil { + return nil, err + } else if rp == nil { + return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) + } + + // Holds all the shard groups and shards that are required for writes. + list := make(sgList, 0, 8) + min := time.Unix(0, models.MinNanoTime) + if rp.Duration > 0 { + min = time.Now().Add(-rp.Duration) + } + + for _, p := range wp.Points { + // Either the point is outside the scope of the RP, or we already have + // a suitable shard group for the point. + if p.Time().Before(min) || list.Covers(p.Time()) { + continue + } + + // No shard groups overlap with the point's time, so we will create + // a new shard group for this point. + sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, p.Time()) + if err != nil { + return nil, err + } + + if sg == nil { + return nil, errors.New("nil shard group") + } + list = list.Append(*sg) + } + + mapping := NewShardMapping() + for _, p := range wp.Points { + sg := list.ShardGroupAt(p.Time()) + if sg == nil { + // We didn't create a shard group because the point was outside the + // scope of the RP. + atomic.AddInt64(&w.stats.WriteDropped, 1) + continue + } + + sh := sg.ShardFor(p.HashID()) + mapping.MapPoint(&sh, p) + } + return mapping, nil +} + +// sgList is a wrapper around a meta.ShardGroupInfos where we can also check +// if a given time is covered by any of the shard groups in the list. +type sgList meta.ShardGroupInfos + +func (l sgList) Covers(t time.Time) bool { + if len(l) == 0 { + return false + } + return l.ShardGroupAt(t) != nil +} + +func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { + // Attempt to find a shard group that could contain this point. + // Shard groups are sorted first according to end time, and then according + // to start time. Therefore, if there are multiple shard groups that match + // this point's time they will be preferred in this order: + // + // - a shard group with the earliest end time; + // - (assuming identical end times) the shard group with the earliest start + // time. + idx := sort.Search(len(l), func(i int) bool { return l[i].EndTime.After(t) }) + + // We couldn't find a shard group the point falls into. + if idx == len(l) || t.Before(l[idx].StartTime) { + return nil + } + return &l[idx] +} + +// Append appends a shard group to the list, and returns a sorted list. +func (l sgList) Append(sgi meta.ShardGroupInfo) sgList { + next := append(l, sgi) + sort.Sort(meta.ShardGroupInfos(next)) + return next +} + +// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of +// a cluster structure for information. This is to avoid a circular dependency +func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error { + return w.WritePoints(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points) +} + +// WritePoints writes across multiple local and remote data nodes according the consistency level. +func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + atomic.AddInt64(&w.stats.WriteReq, 1) + atomic.AddInt64(&w.stats.PointWriteReq, int64(len(points))) + + if retentionPolicy == "" { + db := w.MetaClient.Database(database) + if db == nil { + return influxdb.ErrDatabaseNotFound(database) + } + retentionPolicy = db.DefaultRetentionPolicy + } + + shardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}) + if err != nil { + return err + } + + // Write each shard in it's own goroutine and return as soon as one fails. + ch := make(chan error, len(shardMappings.Points)) + for shardID, points := range shardMappings.Points { + go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { + ch <- w.writeToShard(shard, database, retentionPolicy, points) + }(shardMappings.Shards[shardID], database, retentionPolicy, points) + } + + // Send points to subscriptions if possible. + ok := false + // We need to lock just in case the channel is about to be nil'ed + w.mu.RLock() + select { + case w.subPoints <- &WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}: + ok = true + default: + } + w.mu.RUnlock() + if ok { + atomic.AddInt64(&w.stats.SubWriteOK, 1) + } else { + atomic.AddInt64(&w.stats.SubWriteDrop, 1) + } + + timeout := time.NewTimer(w.WriteTimeout) + defer timeout.Stop() + for range shardMappings.Points { + select { + case <-w.closing: + return ErrWriteFailed + case <-timeout.C: + atomic.AddInt64(&w.stats.WriteTimeout, 1) + // return timeout error to caller + return ErrTimeout + case err := <-ch: + if err != nil { + return err + } + } + } + return nil +} + +// writeToShards writes points to a shard. +func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error { + atomic.AddInt64(&w.stats.PointWriteReqLocal, int64(len(points))) + + err := w.TSDBStore.WriteToShard(shard.ID, points) + if err == nil { + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil + } + + // If we've written to shard that should exist on the current node, but the store has + // not actually created this shard, tell it to create it and retry the write + if err == tsdb.ErrShardNotFound { + err = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true) + if err != nil { + w.Logger.Printf("write failed for shard %d: %v", shard.ID, err) + + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + } + err = w.TSDBStore.WriteToShard(shard.ID, points) + if err != nil { + w.Logger.Printf("write failed for shard %d: %v", shard.ID, err) + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/points_writer_internal_test.go influxdb-1.1.1+dfsg1/coordinator/points_writer_internal_test.go --- influxdb-0.10.0+dfsg1/coordinator/points_writer_internal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/points_writer_internal_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,46 @@ +package coordinator + +import ( + "testing" + "time" +) + +func TestSgList_ShardGroupAt(t *testing.T) { + base := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC) + day := func(n int) time.Time { + return base.Add(time.Duration(24*n) * time.Hour) + } + + list := sgList{ + {ID: 1, StartTime: day(0), EndTime: day(1)}, + {ID: 2, StartTime: day(1), EndTime: day(2)}, + {ID: 3, StartTime: day(2), EndTime: day(3)}, + // SG day 3 to day 4 missing... + {ID: 4, StartTime: day(4), EndTime: day(5)}, + {ID: 5, StartTime: day(5), EndTime: day(6)}, + } + + examples := []struct { + T time.Time + ShardGroupID uint64 // 0 will indicate we don't expect a shard group + }{ + {T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG + {T: day(0), ShardGroupID: 1}, + {T: day(0).Add(time.Minute), ShardGroupID: 1}, + {T: day(1), ShardGroupID: 2}, + {T: day(3).Add(time.Minute), ShardGroupID: 0}, // No matching SG + {T: day(5).Add(time.Hour), ShardGroupID: 5}, + } + + for i, example := range examples { + sg := list.ShardGroupAt(example.T) + var id uint64 + if sg != nil { + id = sg.ID + } + + if got, exp := id, example.ShardGroupID; got != exp { + t.Errorf("[Example %d] got %v, expected %v", i+1, got, exp) + } + } +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/points_writer_test.go influxdb-1.1.1+dfsg1/coordinator/points_writer_test.go --- influxdb-0.10.0+dfsg1/coordinator/points_writer_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/points_writer_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,636 @@ +package coordinator_test + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" +) + +// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. + +// Ensures the points writer maps a single point to a single shard. +func TestPointsWriter_MapShards_One(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.PointsWriter{MetaClient: ms} + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, time.Now(), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 1; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +// Ensures the points writer maps to a new shard group when the shard duration +// is changed. +func TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + var ( + i int + now = time.Now() + ) + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + sg := []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now, EndTime: now.Add(rp.Duration).Add(-1), + }, + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now.Add(time.Hour), EndTime: now.Add(3 * time.Hour).Add(rp.Duration).Add(-1), + }, + }[i] + i++ + return &sg, nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now, nil) + pr.AddPoint("cpu", 2.0, now.Add(2*time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if got, exp := len(shardMappings.Points[0]), 2; got != exp { + t.Fatalf("got %d point(s), expected %d", got, exp) + } + + if got, exp := len(shardMappings.Shards), 1; got != exp { + t.Errorf("got %d shard(s), expected %d", got, exp) + } + + // Now we alter the retention policy duration. + rp.ShardGroupDuration = 3 * time.Hour + + pr = &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now.Add(2*time.Hour), nil) + + // Point is beyond previous shard group so a new shard group should be + // created. + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + // We can check value of i since it's only incremeneted when a shard group + // is created. + if got, exp := i, 2; got != exp { + t.Fatal("new shard group was not created, expected it to be") + } +} + +// Ensures the points writer maps a multiple points across shard group boundaries. +func TestPointsWriter_MapShards_Multiple(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + rp.ShardGroupDuration = time.Hour + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 2; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } + + for _, points := range shardMappings.Points { + // First shard should have 1 point w/ first point added + if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) + } + + // Second shard should have the last two points added + if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) + } + + if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) + } + } +} + +// Ensures the points writer does not map points beyond the retention policy. +func TestPointsWriter_MapShards_Invalid(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Add a point that goes beyond the current retention policy. + pr.AddPoint("cpu", 1.0, time.Now().Add(-2*time.Hour), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 0; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +func TestPointsWriter_WritePoints(t *testing.T) { + tests := []struct { + name string + database string + retentionPolicy string + + // the responses returned by each shard write call. node ID 1 = pos 0 + err []error + expErr error + }{ + { + name: "write one success", + database: "mydb", + retentionPolicy: "myrp", + err: []error{nil, nil, nil}, + expErr: nil, + }, + + // Write to non-existent database + { + name: "write to non-existent database", + database: "doesnt_exist", + retentionPolicy: "", + err: []error{nil, nil, nil}, + expErr: fmt.Errorf("database not found: doesnt_exist"), + }, + } + + for _, test := range tests { + + pr := &coordinator.WritePointsRequest{ + Database: test.database, + RetentionPolicy: test.retentionPolicy, + } + + // Ensure that the test shard groups are created before the points + // are created. + ms := NewPointsWriterMetaClient() + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + // copy to prevent data race + theTest := test + sm := coordinator.NewShardMapping() + sm.MapPoint( + &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[0]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[1]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[2]) + + // Local coordinator.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + sw := &fakeShardWriter{ + ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[int(nodeID)-1] + }, + } + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[0] + }, + } + + ms.DatabaseFn = func(database string) *meta.DatabaseInfo { + return nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *coordinator.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest { + return subPoints + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + c.ShardWriter = sw + c.TSDBStore = store + c.Subscriber = sub + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePoints(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) + if err == nil && test.expErr != nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + + if err != nil && test.expErr == nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if test.expErr == nil { + select { + case p := <-subPoints: + if !reflect.DeepEqual(p, pr) { + t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) + } + default: + t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name) + } + } + } +} + +type fakePointsWriter struct { + WritePointsIntoFn func(*coordinator.IntoWriteRequest) error +} + +func (f *fakePointsWriter) WritePointsInto(req *coordinator.IntoWriteRequest) error { + return f.WritePointsIntoFn(req) +} + +func TestBufferedPointsWriter(t *testing.T) { + db := "db0" + rp := "rp0" + capacity := 10000 + + writePointsIntoCnt := 0 + pointsWritten := []models.Point{} + + reset := func() { + writePointsIntoCnt = 0 + pointsWritten = pointsWritten[:0] + } + + fakeWriter := &fakePointsWriter{ + WritePointsIntoFn: func(req *coordinator.IntoWriteRequest) error { + writePointsIntoCnt++ + pointsWritten = append(pointsWritten, req.Points...) + return nil + }, + } + + w := coordinator.NewBufferedPointsWriter(fakeWriter, db, rp, capacity) + + // Test that capacity and length are correct for new buffered writer. + if w.Cap() != capacity { + t.Fatalf("exp %d, got %d", capacity, w.Cap()) + } else if w.Len() != 0 { + t.Fatalf("exp %d, got %d", 0, w.Len()) + } + + // Test flushing an empty buffer. + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } + + // Test writing zero points. + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: []models.Point{}, + }); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } else if w.Len() > 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } + + // Test writing single large bunch of points points. + req := coordinator.WritePointsRequest{ + Database: db, + RetentionPolicy: rp, + } + + numPoints := int(float64(capacity) * 5.5) + for i := 0; i < numPoints; i++ { + req.AddPoint("cpu", float64(i), time.Now().Add(time.Duration(i)*time.Second), nil) + } + + r := coordinator.IntoWriteRequest(req) + if err := w.WritePointsInto(&r); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 5 { + t.Fatalf("exp 5, got %d", writePointsIntoCnt) + } else if w.Len() != capacity/2 { + t.Fatalf("exp %d, got %d", capacity/2, w.Len()) + } else if len(pointsWritten) != numPoints-capacity/2 { + t.Fatalf("exp %d, got %d", numPoints-capacity/2, len(pointsWritten)) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } + + reset() + + // Test writing points one at a time. + for i, _ := range r.Points { + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: r.Points[i : i+1], + }); err != nil { + t.Fatal(err) + } + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } +} + +var shardID uint64 + +type fakeShardWriter struct { + ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error +} + +func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error { + return f.ShardWriteFn(shardID, nodeID, points) +} + +type fakeStore struct { + WriteFn func(shardID uint64, points []models.Point) error + CreateShardfn func(database, retentionPolicy string, shardID uint64, enabled bool) error +} + +func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { + return f.WriteFn(shardID, points) +} + +func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error { + return f.CreateShardfn(database, retentionPolicy, shardID, enabled) +} + +func NewPointsWriterMetaClient() *PointsWriterMetaClient { + ms := &PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + return ms +} + +type PointsWriterMetaClient struct { + NodeIDFn func() uint64 + RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + DatabaseFn func(database string) *meta.DatabaseInfo + ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) +} + +func (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() } + +func (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { + return m.RetentionPolicyFn(database, name) +} + +func (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) +} + +func (m PointsWriterMetaClient) Database(database string) *meta.DatabaseInfo { + return m.DatabaseFn(database) +} + +func (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { + return m.ShardOwnerFn(shardID) +} + +type Subscriber struct { + PointsFn func() chan<- *coordinator.WritePointsRequest +} + +func (s Subscriber) Points() chan<- *coordinator.WritePointsRequest { + return s.PointsFn() +} + +func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { + shards := []meta.ShardInfo{} + owners := []meta.ShardOwner{} + for i := 1; i <= nodeCount; i++ { + owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) + } + + // each node is fully replicated with each other + shards = append(shards, meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }) + + start := time.Now() + rp := &meta.RetentionPolicyInfo{ + Name: "myrp", + ReplicaN: nodeCount, + Duration: duration, + ShardGroupDuration: duration, + ShardGroups: []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + ID: nextShardID(), + StartTime: start, + EndTime: start.Add(duration).Add(-1), + Shards: shards, + }, + }, + } + return rp +} + +func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { + var startTime, endTime time.Time + if len(rp.ShardGroups) == 0 { + startTime = time.Now() + } else { + startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) + } + endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) + + sh := meta.ShardGroupInfo{ + ID: uint64(len(rp.ShardGroups) + 1), + StartTime: startTime, + EndTime: endTime, + Shards: []meta.ShardInfo{ + meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }, + }, + } + rp.ShardGroups = append(rp.ShardGroups, sh) +} + +func nextShardID() uint64 { + return atomic.AddUint64(&shardID, 1) +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/statement_executor.go influxdb-1.1.1+dfsg1/coordinator/statement_executor.go --- influxdb-0.10.0+dfsg1/coordinator/statement_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/statement_executor.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1249 @@ +package coordinator + +import ( + "bytes" + "errors" + "fmt" + "io" + "sort" + "strconv" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +var ErrDatabaseNameRequired = errors.New("database name required") + +type pointsWriter interface { + WritePointsInto(*IntoWriteRequest) error +} + +// StatementExecutor executes a statement in the query. +type StatementExecutor struct { + MetaClient MetaClient + + // TaskManager holds the StatementExecutor that handles task-related commands. + TaskManager influxql.StatementExecutor + + // TSDB storage for local node. + TSDBStore TSDBStore + + // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. + Monitor *monitor.Monitor + + // Used for rewriting points back into system for SELECT INTO statements. + PointsWriter pointsWriter + + // Select statement limits + MaxSelectPointN int + MaxSelectSeriesN int + MaxSelectBucketsN int +} + +func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + // Select statements are handled separately so that they can be streamed. + if stmt, ok := stmt.(*influxql.SelectStatement); ok { + return e.executeSelectStatement(stmt, &ctx) + } + + var rows models.Rows + var messages []*influxql.Message + var err error + switch stmt := stmt.(type) { + case *influxql.AlterRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeAlterRetentionPolicyStatement(stmt) + case *influxql.CreateContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateContinuousQueryStatement(stmt) + case *influxql.CreateDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateDatabaseStatement(stmt) + case *influxql.CreateRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateRetentionPolicyStatement(stmt) + case *influxql.CreateSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateSubscriptionStatement(stmt) + case *influxql.CreateUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateUserStatement(stmt) + case *influxql.DeleteSeriesStatement: + err = e.executeDeleteSeriesStatement(stmt, ctx.Database) + case *influxql.DropContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropContinuousQueryStatement(stmt) + case *influxql.DropDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropDatabaseStatement(stmt) + case *influxql.DropMeasurementStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropMeasurementStatement(stmt, ctx.Database) + case *influxql.DropSeriesStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSeriesStatement(stmt, ctx.Database) + case *influxql.DropRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropRetentionPolicyStatement(stmt) + case *influxql.DropShardStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropShardStatement(stmt) + case *influxql.DropSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSubscriptionStatement(stmt) + case *influxql.DropUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropUserStatement(stmt) + case *influxql.GrantStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantStatement(stmt) + case *influxql.GrantAdminStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantAdminStatement(stmt) + case *influxql.RevokeStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeStatement(stmt) + case *influxql.RevokeAdminStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeAdminStatement(stmt) + case *influxql.ShowContinuousQueriesStatement: + rows, err = e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowDatabasesStatement: + rows, err = e.executeShowDatabasesStatement(stmt) + case *influxql.ShowDiagnosticsStatement: + rows, err = e.executeShowDiagnosticsStatement(stmt) + case *influxql.ShowGrantsForUserStatement: + rows, err = e.executeShowGrantsForUserStatement(stmt) + case *influxql.ShowMeasurementsStatement: + return e.executeShowMeasurementsStatement(stmt, &ctx) + case *influxql.ShowRetentionPoliciesStatement: + rows, err = e.executeShowRetentionPoliciesStatement(stmt) + case *influxql.ShowShardsStatement: + rows, err = e.executeShowShardsStatement(stmt) + case *influxql.ShowShardGroupsStatement: + rows, err = e.executeShowShardGroupsStatement(stmt) + case *influxql.ShowStatsStatement: + rows, err = e.executeShowStatsStatement(stmt) + case *influxql.ShowSubscriptionsStatement: + rows, err = e.executeShowSubscriptionsStatement(stmt) + case *influxql.ShowTagValuesStatement: + return e.executeShowTagValues(stmt, &ctx) + case *influxql.ShowUsersStatement: + rows, err = e.executeShowUsersStatement(stmt) + case *influxql.SetPasswordUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeSetPasswordUserStatement(stmt) + case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: + // Send query related statements to the task manager. + return e.TaskManager.ExecuteStatement(stmt, ctx) + default: + return influxql.ErrInvalidQuery + } + + if err != nil { + return err + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: rows, + Messages: messages, + }) +} + +func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error { + rpu := &meta.RetentionPolicyUpdate{ + Duration: stmt.Duration, + ReplicaN: stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Update the retention policy. + if err := e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu); err != nil { + return err + } + + // If requested, set as default retention policy. + if stmt.Default { + if err := e.MetaClient.SetDefaultRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + } + + return nil +} + +func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { + // Verify that retention policies exist. + var err error + verifyRPFn := func(n influxql.Node) { + if err != nil { + return + } + switch m := n.(type) { + case *influxql.Measurement: + var rp *meta.RetentionPolicyInfo + if rp, err = e.MetaClient.RetentionPolicy(m.Database, m.RetentionPolicy); err != nil { + return + } else if rp == nil { + err = fmt.Errorf("%s: %s.%s", meta.ErrRetentionPolicyNotFound, m.Database, m.RetentionPolicy) + } + default: + return + } + } + + influxql.WalkFunc(q, verifyRPFn) + + if err != nil { + return err + } + + return e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String()) +} + +func (e *StatementExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error { + if !stmt.RetentionPolicyCreate { + _, err := e.MetaClient.CreateDatabase(stmt.Name) + return err + } + + spec := meta.RetentionPolicySpec{ + Name: stmt.RetentionPolicyName, + Duration: stmt.RetentionPolicyDuration, + ReplicaN: stmt.RetentionPolicyReplication, + ShardGroupDuration: stmt.RetentionPolicyShardGroupDuration, + } + _, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, &spec) + return err +} + +func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error { + spec := meta.RetentionPolicySpec{ + Name: stmt.Name, + Duration: &stmt.Duration, + ReplicaN: &stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Create new retention policy. + rp, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec) + if err != nil { + return err + } + + // If requested, set new policy as the default. + if stmt.Default { + if err := e.MetaClient.SetDefaultRetentionPolicy(stmt.Database, rp.Name); err != nil { + return err + } + } + return nil +} + +func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { + return e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations) +} + +func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error { + _, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin) + return err +} + +func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Convert "now()" to current time. + stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) + + // Locally delete the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error { + return e.MetaClient.DropContinuousQuery(q.Database, q.Name) +} + +// executeDropDatabaseStatement drops a database from the cluster. +// It does not return an error if the database was not found on any of +// the nodes, or in the Meta store. +func (e *StatementExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error { + // Locally delete the datababse. + if err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil { + return err + } + + // Remove the database from the Meta Store. + return e.MetaClient.DropDatabase(stmt.Name) +} + +func (e *StatementExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Locally drop the measurement + return e.TSDBStore.DeleteMeasurement(database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return errors.New("DROP SERIES doesn't support time in WHERE clause") + } + + // Locally drop the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error { + // Locally delete the shard. + if err := e.TSDBStore.DeleteShard(stmt.ID); err != nil { + return err + } + + // Remove the shard reference from the Meta Store. + return e.MetaClient.DropShard(stmt.ID) +} + +func (e *StatementExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error { + // Locally drop the retention policy. + if err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + + return e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error { + return e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name) +} + +func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error { + return e.MetaClient.DropUser(q.Name) +} + +func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) +} + +func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, true) +} + +func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error { + priv := influxql.NoPrivileges + + // Revoking all privileges means there's no need to look at existing user privileges. + if stmt.Privilege != influxql.AllPrivileges { + p, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On) + if err != nil { + return err + } + // Bit clear (AND NOT) the user's privilege with the revoked privilege. + priv = *p &^ stmt.Privilege + } + + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv) +} + +func (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, false) +} + +func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error { + return e.MetaClient.UpdateUser(q.Name, q.Password) +} + +func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error { + itrs, stmt, err := e.createIterators(stmt, ctx) + if err != nil { + return err + } + + // Generate a row emitter from the iterator set. + em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize) + em.Columns = stmt.ColumnNames() + em.OmitTime = stmt.OmitTime + defer em.Close() + + // Emit rows to the results channel. + var writeN int64 + var emitted bool + + var pointsWriter *BufferedPointsWriter + if stmt.Target != nil { + pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000) + } + + for { + row, err := em.Emit() + if err != nil { + return err + } else if row == nil { + // Check if the query was interrupted while emitting. + select { + case <-ctx.InterruptCh: + return influxql.ErrQueryInterrupted + default: + } + break + } + + // Write points back into system for INTO statements. + if stmt.Target != nil { + if err := e.writeInto(pointsWriter, stmt, row); err != nil { + return err + } + writeN += int64(len(row.Values)) + continue + } + + result := &influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{row}, + } + + // Send results or exit if closing. + if err := ctx.Send(result); err != nil { + return err + } + + emitted = true + } + + // Flush remaining points and emit write count if an INTO statement. + if stmt.Target != nil { + if err := pointsWriter.Flush(); err != nil { + return err + } + + var messages []*influxql.Message + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Messages: messages, + Series: []*models.Row{{ + Name: "result", + Columns: []string{"time", "written"}, + Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, + }}, + }) + } + + // Always emit at least one result. + if !emitted { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: make([]*models.Row, 0), + }) + } + + return nil +} + +func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) ([]influxql.Iterator, *influxql.SelectStatement, error) { + // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` + now := time.Now().UTC() + opt := influxql.SelectOptions{ + InterruptCh: ctx.InterruptCh, + NodeID: ctx.ExecutionOptions.NodeID, + MaxSeriesN: e.MaxSelectSeriesN, + } + + // Replace instances of "now()" with the current time, and check the resultant times. + nowValuer := influxql.NowValuer{Now: now} + stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer) + // Replace instances of "now()" with the current time in the dimensions. + for _, d := range stmt.Dimensions { + d.Expr = influxql.Reduce(d.Expr, &nowValuer) + } + + var err error + opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition) + if err != nil { + return nil, stmt, err + } + + if opt.MaxTime.IsZero() { + // In the case that we're executing a meta query where the user cannot + // specify a time condition, then we expand the default max time + // to the maximum possible value, to ensure that data where all points + // are in the future are returned. + if influxql.Sources(stmt.Sources).HasSystemSource() { + opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() + } else { + if interval, err := stmt.GroupByInterval(); err != nil { + return nil, stmt, err + } else if interval > 0 { + opt.MaxTime = now + } else { + opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() + } + } + } + if opt.MinTime.IsZero() { + opt.MinTime = time.Unix(0, influxql.MinTime).UTC() + } + + // Convert DISTINCT into a call. + stmt.RewriteDistinct() + + // Remove "time" from fields list. + stmt.RewriteTimeFields() + + // Rewrite any regex conditions that could make use of the index. + stmt.RewriteRegexConditions() + + // Create an iterator creator based on the shards in the cluster. + ic, err := e.iteratorCreator(stmt, &opt) + if err != nil { + return nil, stmt, err + } + + // Expand regex sources to their actual source names. + if stmt.Sources.HasRegex() { + sources, err := ic.ExpandSources(stmt.Sources) + if err != nil { + return nil, stmt, err + } + stmt.Sources = sources + } + + // Rewrite wildcards, if any exist. + tmp, err := stmt.RewriteFields(ic) + if err != nil { + return nil, stmt, err + } + stmt = tmp + + if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery { + interval, err := stmt.GroupByInterval() + if err != nil { + return nil, stmt, err + } + + if interval > 0 { + // Determine the start and end time matched to the interval (may not match the actual times). + min := opt.MinTime.Truncate(interval) + max := opt.MaxTime.Truncate(interval).Add(interval) + + // Determine the number of buckets by finding the time span and dividing by the interval. + buckets := int64(max.Sub(min)) / int64(interval) + if int(buckets) > e.MaxSelectBucketsN { + return nil, stmt, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, e.MaxSelectBucketsN) + } + } + } + + // Create a set of iterators from a selection. + itrs, err := influxql.Select(stmt, ic, &opt) + if err != nil { + return nil, stmt, err + } + + if e.MaxSelectPointN > 0 { + monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN) + ctx.Query.Monitor(monitor) + } + return itrs, stmt, nil +} + +// iteratorCreator returns a new instance of IteratorCreator based on stmt. +func (e *StatementExecutor) iteratorCreator(stmt *influxql.SelectStatement, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { + // Retrieve a list of shard IDs. + shards, err := e.MetaClient.ShardsByTimeRange(stmt.Sources, opt.MinTime, opt.MaxTime) + if err != nil { + return nil, err + } + return e.TSDBStore.IteratorCreator(shards, opt) +} + +func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} + for _, cqi := range di.ContinuousQueries { + row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + row := &models.Row{Name: "databases", Columns: []string{"name"}} + for _, di := range dis { + row.Values = append(row.Values, []interface{}{di.Name}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) { + diags, err := e.Monitor.Diagnostics() + if err != nil { + return nil, err + } + + // Get a sorted list of diagnostics keys. + sortedKeys := make([]string, 0, len(diags)) + for k := range diags { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + rows := make([]*models.Row, 0, len(diags)) + for _, k := range sortedKeys { + if stmt.Module != "" && k != stmt.Module { + continue + } + + row := &models.Row{Name: k} + + row.Columns = diags[k].Columns + row.Values = diags[k].Rows + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) { + priv, err := e.MetaClient.UserPrivileges(q.Name) + if err != nil { + return nil, err + } + + row := &models.Row{Columns: []string{"database", "privilege"}} + for d, p := range priv { + row.Values = append(row.Values, []interface{}{d, p.String()}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *influxql.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + measurements, err := e.TSDBStore.Measurements(q.Database, q.Condition) + if err != nil || len(measurements) == 0 { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Err: err, + }) + } + + if q.Offset > 0 { + if q.Offset >= len(measurements) { + measurements = nil + } else { + measurements = measurements[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(measurements) { + measurements = measurements[:q.Limit] + } + } + + values := make([][]interface{}, len(measurements)) + for i, m := range measurements { + values[i] = []interface{}{m} + } + + if len(values) == 0 { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + }) + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{{ + Name: "measurements", + Columns: []string{"name"}, + Values: values, + }}, + }) +} + +func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) { + if q.Database == "" { + return nil, ErrDatabaseNameRequired + } + + di := e.MetaClient.Database(q.Database) + if di == nil { + return nil, influxdb.ErrDatabaseNotFound(q.Database) + } + + row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} + for _, rpi := range di.RetentionPolicies { + row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ShardGroupDuration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + di.Name, + rpi.Name, + sgi.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "start_time", "end_time", "expiry_time"}, Name: "shard groups"} + for _, di := range dis { + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + row.Values = append(row.Values, []interface{}{ + sgi.ID, + di.Name, + rpi.Name, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + }) + } + } + } + + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) { + stats, err := e.Monitor.Statistics(nil) + if err != nil { + return nil, err + } + + var rows []*models.Row + for _, stat := range stats { + if stmt.Module != "" && stat.Name != stmt.Module { + continue + } + row := &models.Row{Name: stat.Name, Tags: stat.Tags} + + values := make([]interface{}, 0, len(stat.Values)) + for _, k := range stat.ValueNames() { + row.Columns = append(row.Columns, k) + values = append(values, stat.Values[k]) + } + row.Values = [][]interface{}{values} + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, si := range rpi.Subscriptions { + row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations}) + } + } + if len(row.Values) > 0 { + rows = append(rows, row) + } + } + return rows, nil +} + +func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *influxql.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + tagValues, err := e.TSDBStore.TagValues(q.Database, q.Condition) + if err != nil { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Err: err, + }) + } + + emitted := false + for _, m := range tagValues { + values := m.Values + + if q.Offset > 0 { + if q.Offset >= len(values) { + values = nil + } else { + values = values[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(values) { + values = values[:q.Limit] + } + } + + if len(values) == 0 { + continue + } + + row := &models.Row{ + Name: m.Measurement, + Columns: []string{"key", "value"}, + Values: make([][]interface{}, len(values)), + } + for i, v := range values { + row.Values[i] = []interface{}{v.Key, v.Value} + } + + if err := ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{row}, + }); err != nil { + return err + } + emitted = true + } + + // Ensure at least one result is emitted. + if !emitted { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + }) + } + return nil +} + +func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) { + row := &models.Row{Columns: []string{"user", "admin"}} + for _, ui := range e.MetaClient.Users() { + row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) + } + return []*models.Row{row}, nil +} + +type BufferedPointsWriter struct { + w pointsWriter + buf []models.Point + database string + retentionPolicy string +} + +func NewBufferedPointsWriter(w pointsWriter, database, retentionPolicy string, capacity int) *BufferedPointsWriter { + return &BufferedPointsWriter{ + w: w, + buf: make([]models.Point, 0, capacity), + database: database, + retentionPolicy: retentionPolicy, + } +} + +func (w *BufferedPointsWriter) WritePointsInto(req *IntoWriteRequest) error { + // Make sure we're buffering points only for the expected destination. + if req.Database != w.database || req.RetentionPolicy != w.retentionPolicy { + return fmt.Errorf("writer for %s.%s can't write into %s.%s", w.database, w.retentionPolicy, req.Database, req.RetentionPolicy) + } + + for i := 0; i < len(req.Points); { + // Get the available space in the buffer. + avail := cap(w.buf) - len(w.buf) + + // Calculate number of points to copy into the buffer. + n := len(req.Points[i:]) + if n > avail { + n = avail + } + + // Copy points into buffer. + w.buf = append(w.buf, req.Points[i:n+i]...) + + // Advance the index by number of points copied. + i += n + + // If buffer is full, flush points to underlying writer. + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + } + + return nil +} + +// Flush writes all buffered points to the underlying writer. +func (w *BufferedPointsWriter) Flush() error { + if len(w.buf) == 0 { + return nil + } + + if err := w.w.WritePointsInto(&IntoWriteRequest{ + Database: w.database, + RetentionPolicy: w.retentionPolicy, + Points: w.buf, + }); err != nil { + return err + } + + // Clear the buffer. + w.buf = w.buf[:0] + + return nil +} + +// Len returns the number of points buffered. +func (w *BufferedPointsWriter) Len() int { return len(w.buf) } + +// Cap returns the capacity (in points) of the buffer. +func (w *BufferedPointsWriter) Cap() int { return cap(w.buf) } + +func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) error { + if stmt.Target.Measurement.Database == "" { + return errNoDatabaseInTarget + } + + // It might seem a bit weird that this is where we do this, since we will have to + // convert rows back to points. The Executors (both aggregate and raw) are complex + // enough that changing them to write back to the DB is going to be clumsy + // + // it might seem weird to have the write be in the QueryExecutor, but the interweaving of + // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the + // results will be the same as when queried normally. + name := stmt.Target.Measurement.Name + if name == "" { + name = row.Name + } + + points, err := convertRowToPoints(name, row) + if err != nil { + return err + } + + if err := w.WritePointsInto(&IntoWriteRequest{ + Database: stmt.Target.Measurement.Database, + RetentionPolicy: stmt.Target.Measurement.RetentionPolicy, + Points: points, + }); err != nil { + return err + } + + return nil +} + +var errNoDatabaseInTarget = errors.New("no database in target") + +// convertRowToPoints will convert a query result Row into Points that can be written back in. +func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { + // figure out which parts of the result are the time and which are the fields + timeIndex := -1 + fieldIndexes := make(map[string]int) + for i, c := range row.Columns { + if c == "time" { + timeIndex = i + } else { + fieldIndexes[c] = i + } + } + + if timeIndex == -1 { + return nil, errors.New("error finding time index in result") + } + + points := make([]models.Point, 0, len(row.Values)) + for _, v := range row.Values { + vals := make(map[string]interface{}) + for fieldName, fieldIndex := range fieldIndexes { + val := v[fieldIndex] + if val != nil { + vals[fieldName] = v[fieldIndex] + } + } + + p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time)) + if err != nil { + // Drop points that can't be stored + continue + } + + points = append(points, p) + } + + return points, nil +} + +// NormalizeStatement adds a default database and policy to the measurements in statement. +func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultDatabase string) (err error) { + influxql.WalkFunc(stmt, func(node influxql.Node) { + if err != nil { + return + } + switch node := node.(type) { + case *influxql.ShowRetentionPoliciesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowMeasurementsStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowTagValuesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.Measurement: + switch stmt.(type) { + case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement: + // DB and RP not supported by these statements so don't rewrite into invalid + // statements + default: + err = e.normalizeMeasurement(node, defaultDatabase) + } + } + }) + return +} + +func (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase string) error { + // Targets (measurements in an INTO clause) can have blank names, which means it will be + // the same as the measurement name it came from in the FROM clause. + if !m.IsTarget && m.Name == "" && m.Regex == nil { + return errors.New("invalid measurement") + } + + // Measurement does not have an explicit database? Insert default. + if m.Database == "" { + m.Database = defaultDatabase + } + + // The database must now be specified by this point. + if m.Database == "" { + return ErrDatabaseNameRequired + } + + // Find database. + di := e.MetaClient.Database(m.Database) + if di == nil { + return influxdb.ErrDatabaseNotFound(m.Database) + } + + // If no retention policy was specified, use the default. + if m.RetentionPolicy == "" { + if di.DefaultRetentionPolicy == "" { + return fmt.Errorf("default retention policy not set for: %s", di.Name) + } + m.RetentionPolicy = di.DefaultRetentionPolicy + } + + return nil +} + +// IntoWriteRequest is a partial copy of cluster.WriteRequest +type IntoWriteRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// TSDBStore is an interface for accessing the time series data store. +type TSDBStore interface { + CreateShard(database, policy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + + RestoreShard(id uint64, r io.Reader) error + BackupShard(id uint64, since time.Time, w io.Writer) error + + DeleteDatabase(name string) error + DeleteMeasurement(database, name string) error + DeleteRetentionPolicy(database, name string) error + DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error + DeleteShard(id uint64) error + IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) + + Measurements(database string, cond influxql.Expr) ([]string, error) + TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) +} + +type LocalTSDBStore struct { + *tsdb.Store +} + +func (s LocalTSDBStore) IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { + shardIDs := make([]uint64, len(shards)) + for i, sh := range shards { + shardIDs[i] = sh.ID + } + return s.Store.IteratorCreator(shardIDs, opt) +} + +// ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard. +type ShardIteratorCreator interface { + ShardIteratorCreator(id uint64) influxql.IteratorCreator +} + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} + +// stringSet represents a set of strings. +type stringSet map[string]struct{} + +// newStringSet returns an empty stringSet. +func newStringSet() stringSet { + return make(map[string]struct{}) +} + +// add adds strings to the set. +func (s stringSet) add(ss ...string) { + for _, n := range ss { + s[n] = struct{}{} + } +} + +// contains returns whether the set contains the given string. +func (s stringSet) contains(ss string) bool { + _, ok := s[ss] + return ok +} + +// list returns the current elements in the set, in sorted order. +func (s stringSet) list() []string { + l := make([]string, 0, len(s)) + for k := range s { + l = append(l, k) + } + sort.Strings(l) + return l +} + +// union returns the union of this set and another. +func (s stringSet) union(o stringSet) stringSet { + ns := newStringSet() + for k := range s { + ns[k] = struct{}{} + } + for k := range o { + ns[k] = struct{}{} + } + return ns +} + +// intersect returns the intersection of this set and another. +func (s stringSet) intersect(o stringSet) stringSet { + shorter, longer := s, o + if len(longer) < len(shorter) { + shorter, longer = longer, shorter + } + + ns := newStringSet() + for k := range shorter { + if _, ok := longer[k]; ok { + ns[k] = struct{}{} + } + } + return ns +} diff -Nru influxdb-0.10.0+dfsg1/coordinator/statement_executor_test.go influxdb-1.1.1+dfsg1/coordinator/statement_executor_test.go --- influxdb-0.10.0+dfsg1/coordinator/statement_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/coordinator/statement_executor_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,371 @@ +package coordinator_test + +import ( + "bytes" + "errors" + "io" + "log" + "os" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +const ( + // DefaultDatabase is the default database name used in tests. + DefaultDatabase = "db0" + + // DefaultRetentionPolicy is the default retention policy name used in tests. + DefaultRetentionPolicy = "rp0" +) + +// Ensure query executor can execute a simple SELECT statement. +func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { + e := DefaultQueryExecutor() + + // The meta client should return a single shard owned by the local node. + e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return []meta.ShardInfo{{ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}}, nil + } + + // The TSDB store should return an IteratorCreator for shard. + // This IteratorCreator returns a single iterator with "value" in the aux fields. + e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { + if id != 100 { + t.Fatalf("unexpected shard id: %d", id) + } + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, + {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &ic + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT * FROM cpu`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(100)}, + {time.Unix(1, 0).UTC(), float64(200)}, + }, + }}, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +// Ensure query executor can enforce a maximum bucket selection count. +func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) { + e := DefaultQueryExecutor() + e.StatementExecutor.MaxSelectBucketsN = 3 + + // The meta client should return a single shards on the local node. + e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }, nil + } + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{ + Points: []influxql.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, + }, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { return &ic } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Err: errors.New("max-select-buckets limit exceeded: (4/3)"), + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +func TestStatementExecutor_NormalizeDropSeries(t *testing.T) { + q, err := influxql.ParseQuery("DROP SERIES FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DropSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DROP SERIES FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +func TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) { + q, err := influxql.ParseQuery("DELETE FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DeleteSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DELETE FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +// QueryExecutor is a test wrapper for coordinator.QueryExecutor. +type QueryExecutor struct { + *influxql.QueryExecutor + + MetaClient MetaClient + TSDBStore TSDBStore + StatementExecutor *coordinator.StatementExecutor + LogOutput bytes.Buffer +} + +// NewQueryExecutor returns a new instance of QueryExecutor. +// This query executor always has a node id of 0. +func NewQueryExecutor() *QueryExecutor { + e := &QueryExecutor{ + QueryExecutor: influxql.NewQueryExecutor(), + } + e.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: &e.MetaClient, + TSDBStore: &e.TSDBStore, + } + e.QueryExecutor.StatementExecutor = e.StatementExecutor + + var out io.Writer = &e.LogOutput + if testing.Verbose() { + out = io.MultiWriter(out, os.Stderr) + } + e.QueryExecutor.Logger = log.New(out, "[query] ", log.LstdFlags) + + return e +} + +// DefaultQueryExecutor returns a QueryExecutor with a database (db0) and retention policy (rp0). +func DefaultQueryExecutor() *QueryExecutor { + e := NewQueryExecutor() + e.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn + return e +} + +// ExecuteQuery parses query and executes against the database. +func (e *QueryExecutor) ExecuteQuery(query, database string, chunkSize int) <-chan *influxql.Result { + return e.QueryExecutor.ExecuteQuery(MustParseQuery(query), influxql.ExecutionOptions{ + Database: database, + ChunkSize: chunkSize, + }, make(chan struct{})) +} + +// TSDBStore is a mockable implementation of coordinator.TSDBStore. +type TSDBStore struct { + CreateShardFn func(database, policy string, shardID uint64, enabled bool) error + WriteToShardFn func(shardID uint64, points []models.Point) error + + RestoreShardFn func(id uint64, r io.Reader) error + BackupShardFn func(id uint64, since time.Time, w io.Writer) error + + DeleteDatabaseFn func(name string) error + DeleteMeasurementFn func(database, name string) error + DeleteRetentionPolicyFn func(database, name string) error + DeleteShardFn func(id uint64) error + DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error + DatabaseIndexFn func(name string) *tsdb.DatabaseIndex + ShardIteratorCreatorFn func(id uint64) influxql.IteratorCreator +} + +func (s *TSDBStore) CreateShard(database, policy string, shardID uint64, enabled bool) error { + if s.CreateShardFn == nil { + return nil + } + return s.CreateShardFn(database, policy, shardID, enabled) +} + +func (s *TSDBStore) WriteToShard(shardID uint64, points []models.Point) error { + return s.WriteToShardFn(shardID, points) +} + +func (s *TSDBStore) RestoreShard(id uint64, r io.Reader) error { + return s.RestoreShardFn(id, r) +} + +func (s *TSDBStore) BackupShard(id uint64, since time.Time, w io.Writer) error { + return s.BackupShardFn(id, since, w) +} + +func (s *TSDBStore) DeleteDatabase(name string) error { + return s.DeleteDatabaseFn(name) +} + +func (s *TSDBStore) DeleteMeasurement(database, name string) error { + return s.DeleteMeasurementFn(database, name) +} + +func (s *TSDBStore) DeleteRetentionPolicy(database, name string) error { + return s.DeleteRetentionPolicyFn(database, name) +} + +func (s *TSDBStore) DeleteShard(id uint64) error { + return s.DeleteShardFn(id) +} + +func (s *TSDBStore) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error { + return s.DeleteSeriesFn(database, sources, condition) +} + +func (s *TSDBStore) IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { + // Generate iterators for each node. + ics := make([]influxql.IteratorCreator, 0) + if err := func() error { + for _, shard := range shards { + ic := s.ShardIteratorCreator(shard.ID) + if ic == nil { + continue + } + ics = append(ics, ic) + } + + return nil + }(); err != nil { + influxql.IteratorCreators(ics).Close() + return nil, err + } + + return influxql.IteratorCreators(ics), nil +} + +func (s *TSDBStore) ShardIteratorCreator(id uint64) influxql.IteratorCreator { + return s.ShardIteratorCreatorFn(id) +} + +func (s *TSDBStore) DatabaseIndex(name string) *tsdb.DatabaseIndex { + return s.DatabaseIndexFn(name) +} + +func (s *TSDBStore) Measurements(database string, cond influxql.Expr) ([]string, error) { + return nil, nil +} + +func (s *TSDBStore) TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) { + return nil, nil +} + +// MustParseQuery parses s into a query. Panic on error. +func MustParseQuery(s string) *influxql.Query { + q, err := influxql.ParseQuery(s) + if err != nil { + panic(err) + } + return q +} + +// ReadAllResults reads all results from c and returns as a slice. +func ReadAllResults(c <-chan *influxql.Result) []*influxql.Result { + var a []*influxql.Result + for result := range c { + a = append(a, result) + } + return a +} + +// IteratorCreator is a mockable implementation of IteratorCreator. +type IteratorCreator struct { + CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) +} + +func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(opt) +} + +func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(sources) +} + +func (ic *IteratorCreator) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return ic.ExpandSourcesFn(sources) +} + +// FloatIterator is a represents an iterator that reads from a slice. +type FloatIterator struct { + Points []influxql.FloatPoint + stats influxql.IteratorStats +} + +func (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*influxql.FloatPoint, error) { + if len(itr.Points) == 0 { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} diff -Nru influxdb-0.10.0+dfsg1/debian/changelog influxdb-1.1.1+dfsg1/debian/changelog --- influxdb-0.10.0+dfsg1/debian/changelog 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/changelog 2017-02-28 23:20:09.000000000 +0000 @@ -1,3 +1,127 @@ +influxdb (1.1.1+dfsg1-4) unstable; urgency=medium + + * Disable TestEngine_Backup. (Closes: #850282) + * Re-enable i386 tests. + + -- Alexandre Viau Tue, 28 Feb 2017 18:20:09 -0500 + +influxdb (1.1.1+dfsg1-3) unstable; urgency=medium + + * Disable tests on i386 (Closes: #850282) + + -- Alexandre Viau Tue, 28 Feb 2017 15:24:05 -0500 + +influxdb (1.1.1+dfsg1-2) unstable; urgency=medium + + * Lintian overrides. + + -- Alexandre Viau Thu, 22 Dec 2016 00:50:08 -0500 + +influxdb (1.1.1+dfsg1-1) unstable; urgency=medium + + [ Alexandre Viau ] + * Import Guillem's work. + * Refresh jwt-v2.patch. + * Update influxdb.conf. + * Depend on newer golang-github-jwilder-encoding. + * Adapt disable-reporting.patch. + * Copy over iql files. + * Fix tests in jwt-v2.patch. + + [ Guillem Jover ] + * New upstream release. (Closes: #846576) + * Fix build to work on github.com/dgrijalva/jwt-go v2. + * Pass --no-close to start-stop-daemon --start to get actual logging. + + -- Alexandre Viau Tue, 20 Dec 2016 21:25:35 -0500 + +influxdb (1.0.2+dfsg1-1) unstable; urgency=medium + + [ Alexandre Viau ] + * Update default configuration file. + + [ Guillem Jover ] + * New upstream release. (Closes: #840656) + + -- Alexandre Viau Tue, 25 Oct 2016 13:19:04 -0400 + +influxdb (0.13.0+dfsg1-5) unstable; urgency=medium + + [ Alexandre Viau ] + * Prepare upload + * d/control: Add InfluxDB dependency on lsb-base (>= 3.0-6) + * Remove unused hardening-no-relro override + + [ Guillem Jover ] + * Remove existing static files (Closes: #835336) + * Improve and fix init script (Closes: #840636) + + [ Tim Potter ] + * Update influxdb-usage-client B-D to use backports + + -- Alexandre Viau Sat, 15 Oct 2016 14:12:16 -0400 + +influxdb (0.13.0+dfsg1-4) unstable; urgency=medium + + * Replace uglifyjs by yui-compressor + + -- Alexandre Viau Thu, 28 Jul 2016 13:58:37 -0400 + +influxdb (0.13.0+dfsg1-3) unstable; urgency=medium + + * Create var and logs directories with debhelper (Closes: #831982) + + -- Alexandre Viau Fri, 22 Jul 2016 10:59:15 -0400 + +influxdb (0.13.0+dfsg1-2) unstable; urgency=medium + + * d/rules clean: only build statik.go using + go generate (Closes: #829164) + + -- Alexandre Viau Fri, 01 Jul 2016 14:45:07 +0200 + +influxdb (0.13.0+dfsg1-1) unstable; urgency=medium + + * New upstream version + * Bumped Standards-Version to 3.9.8 + * Hand over maintenance to pkg-go, set myself as + uploader instead + + -- Alexandre Viau Sat, 25 Jun 2016 22:21:05 +0200 + +influxdb (0.12.0+dfsg1-3) unstable; urgency=medium + + * Fixed unhandled symlink to directory conversion (Closes: #823571) + + -- Alexandre Viau Tue, 10 May 2016 11:54:42 -0400 + +influxdb (0.12.0+dfsg1-2) unstable; urgency=medium + + * Added warning in NEWS (Closes: #822491) + + -- Alexandre Viau Tue, 03 May 2016 22:08:16 -0400 + +influxdb (0.12.0+dfsg1-1) unstable; urgency=medium + + * New upstream version + * Added tmpl dependency + * Updated d/copyright + + -- Alexandre Viau Wed, 06 Apr 2016 17:19:45 -0400 + +influxdb (0.10.2+dfsg1-2) unstable; urgency=medium + + * Temporarly disable tests (Closes: #818069) + + -- Alexandre Viau Sun, 20 Mar 2016 11:11:21 -0400 + +influxdb (0.10.2+dfsg1-1) unstable; urgency=medium + + * New upstream version + * Updated Standards-Version to 3.9.7 (no changes) + + -- Alexandre Viau Tue, 08 Mar 2016 10:36:30 -0500 + influxdb (0.10.0+dfsg1-1) unstable; urgency=medium * New upstream version diff -Nru influxdb-0.10.0+dfsg1/debian/control influxdb-1.1.1+dfsg1/debian/control --- influxdb-0.10.0+dfsg1/debian/control 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/control 2017-02-28 23:20:09.000000000 +0000 @@ -2,14 +2,15 @@ Section: database Priority: extra Homepage: https://influxdata.com/time-series-platform/influxdb/ -Maintainer: Alexandre Viau -Uploaders: Debian Go Packaging Team +Maintainer: Debian Go Packaging Team +Uploaders: Alexandre Viau , Tim Potter Build-Depends: debhelper (>= 9), dh-golang (>=1.9), dh-systemd, golang-go, - node-uglify, + yui-compressor, golang-statik, + tmpl, libjs-bootstrap (>= 3.0), libjs-jquery (>=1.9.1), # Golang libraries below - Shared deps with dev package @@ -20,29 +21,29 @@ golang-github-boltdb-bolt-dev, golang-github-peterh-liner-dev, golang-github-bmizerany-pat-dev, - golang-gopkg-fatih-pool.v2-dev, golang-github-rakyll-statik-dev, + golang-github-retailnext-hllpp-dev, golang-github-kimor79-gollectd-dev, golang-collectd-dev, - golang-github-hashicorp-raft-boltdb-dev, - golang-github-hashicorp-raft-dev, golang-github-davecgh-go-spew-dev, + golang-github-dgrijalva-jwt-go-dev, golang-github-dgryski-go-bitstream-dev, golang-github-dgryski-go-bits-dev, - golang-github-jwilder-encoding-dev, + golang-github-jwilder-encoding-dev (>= 0.0~git20160927.0.4dada27), golang-github-influxdb-enterprise-client-dev, golang-github-paulbellamy-ratecounter-dev, - golang-github-influxdb-usage-client-dev, + golang-github-influxdb-usage-client-dev (>= 0.0~git20151204.0.475977e-2~), golang-github-golang-snappy-dev -Standards-Version: 3.9.6 +Standards-Version: 3.9.8 Vcs-Git: https://anonscm.debian.org/git/pkg-go/packages/influxdb.git Vcs-Browser: https://anonscm.debian.org/cgit/pkg-go/packages/influxdb.git -XS-Go-Import-Path: github.com/influxdb/influxdb +XS-Go-Import-Path: github.com/influxdata/influxdb Package: golang-github-influxdb-influxdb-dev Architecture: all Replaces: influxdb-dev (<< 0.9.2.1+dfsg1-2) Breaks: influxdb-dev (<< 0.9.2.1+dfsg1-2) +Pre-Depends: ${misc:Pre-Depends} Depends: ${misc:Depends}, golang-go.crypto-dev | golang-golang-x-crypto-dev, golang-toml-dev | golang-github-burntsushi-toml-dev, @@ -51,16 +52,15 @@ golang-github-boltdb-bolt-dev, golang-github-peterh-liner-dev, golang-github-bmizerany-pat-dev, - golang-gopkg-fatih-pool.v2-dev, golang-github-rakyll-statik-dev, + golang-github-retailnext-hllpp-dev, golang-github-kimor79-gollectd-dev, golang-collectd-dev, - golang-github-hashicorp-raft-boltdb-dev, - golang-github-hashicorp-raft-dev, golang-github-davecgh-go-spew-dev, + golang-github-dgrijalva-jwt-go-dev, golang-github-dgryski-go-bitstream-dev, golang-github-dgryski-go-bits-dev, - golang-github-jwilder-encoding-dev, + golang-github-jwilder-encoding-dev (>= 0.0~git20160927.0.4dada27), golang-github-influxdb-enterprise-client-dev, golang-github-paulbellamy-ratecounter-dev, golang-github-influxdb-usage-client-dev, @@ -89,7 +89,8 @@ Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, - adduser + adduser, + lsb-base (>= 3.0-6) Built-Using: ${misc:Built-Using} Description: Scalable datastore for metrics, events, and real-time analytics InfluxDB is a time series, metrics, and analytics database. It’s written diff -Nru influxdb-0.10.0+dfsg1/debian/copyright influxdb-1.1.1+dfsg1/debian/copyright --- influxdb-0.10.0+dfsg1/debian/copyright 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/copyright 2017-02-28 23:20:09.000000000 +0000 @@ -1,8 +1,8 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: influxdb Source: https://github.com/influxdata/influxdb -Files-Excluded: shared/admin/js/vendor/* - shared/admin/css/bootstrap.css +Files-Excluded: services/admin/assets/js/vendor/* + services/admin/assets/css/bootstrap.css Files: * @@ -15,10 +15,13 @@ Files: uuid/uuid.go Copyright: 2012 The gocql Authors License: BSD-3-clause -Comment: The header mentions a BSD-style license. Upstream license - can be found here: https://github.com/gocql/gocql/blob/master/LICENSE. - I have sent PR to InfluxDB so that they include the license. See: - https://github.com/influxdb/influxdb/pull/3508 + +Files: pkg/deep/equal.go +Copyright: 2009 the The Go Authors +License: BSD-3-clause +Comment: The header mentions a BSD-style license. I have sent PR to InfluxDB + so that they include the license. See: + https://github.com/influxdata/influxdb/pull/6247 Files: debian/* Copyright: 2015 Alexandre Viau diff -Nru influxdb-0.10.0+dfsg1/debian/golang-github-influxdb-influxdb-dev.links influxdb-1.1.1+dfsg1/debian/golang-github-influxdb-influxdb-dev.links --- influxdb-0.10.0+dfsg1/debian/golang-github-influxdb-influxdb-dev.links 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/golang-github-influxdb-influxdb-dev.links 2017-02-28 23:20:09.000000000 +0000 @@ -1 +1 @@ -usr/share/gocode/src/github.com/influxdb/influxdb usr/share/gocode/src/github.com/influxdata/influxdb +usr/share/gocode/src/github.com/influxdata/influxdb usr/share/gocode/src/github.com/influxdb/influxdb diff -Nru influxdb-0.10.0+dfsg1/debian/golang-github-influxdb-influxdb-dev.maintscript influxdb-1.1.1+dfsg1/debian/golang-github-influxdb-influxdb-dev.maintscript --- influxdb-0.10.0+dfsg1/debian/golang-github-influxdb-influxdb-dev.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/golang-github-influxdb-influxdb-dev.maintscript 2017-02-28 23:20:09.000000000 +0000 @@ -0,0 +1 @@ +symlink_to_dir /usr/share/gocode/src/github.com/influxdata/influxdb ../influxdb/influxdb 0.12.0+dfsg1-3~ diff -Nru influxdb-0.10.0+dfsg1/debian/influxdb.conf influxdb-1.1.1+dfsg1/debian/influxdb.conf --- influxdb-0.10.0+dfsg1/debian/influxdb.conf 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/influxdb.conf 2017-02-28 23:20:09.000000000 +0000 @@ -1,10 +1,13 @@ ### Welcome to the InfluxDB configuration file. -# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com -# The data includes raft id (random 8 bytes), os, arch, version, and metadata. -# We don't track ip addresses of servers reporting. This is only used -# to track the number of instances running and the versions, which -# is very helpful for us. +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the the configuration +# field and the default value used. Uncommentting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. # Change this option to true to enable reporting. reporting-enabled = false @@ -21,19 +24,14 @@ ### [meta] - # Controls if this node should run the metaservice and participate in the Raft group - enabled = true - # Where the metadata/raft database is stored dir = "/var/lib/influxdb/meta" - bind-address = ":8088" - retention-autocreate = true - election-timeout = "1s" - heartbeat-timeout = "1s" - leader-lease-timeout = "500ms" - commit-timeout = "50ms" - cluster-tracing = false + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true ### ### [data] @@ -45,41 +43,15 @@ ### [data] - # Controls if this node holds time series data shards in the cluster - enabled = true - + # The directory where the TSM storage engine stores TSM files. dir = "/var/lib/influxdb/data" - # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't - # apply to any new shards created after upgrading to a version > 0.9.3. - max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. - wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. - wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. - - # These are the WAL settings for the storage engine >= 0.9.3 + # The directory where the TSM storage engine stores WAL files. wal-dir = "/var/lib/influxdb/wal" - wal-logging-enabled = true - data-logging-enabled = true - # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to - # flush to the index - # wal-ready-series-size = 25600 - - # Flush and compact a partition once this ratio of series are over the ready size - # wal-compaction-threshold = 0.6 - - # Force a flush and compaction if any series in a partition gets above this size in bytes - # wal-max-series-size = 2097152 - - # Force a flush of all series and full compaction if there have been no writes in this - # amount of time. This is useful for ensuring that shards that are cold for writes don't - # keep a bunch of data cached in memory and in the WAL. - # wal-flush-cold-interval = "10m" - - # Force a partition to flush its largest series if it reaches this approximate size in - # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. - # The more memory you have, the bigger this can be. - # wal-partition-size-threshold = 20971520 + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false # Whether queries should be logged before execution. Very useful for troubleshooting, but will # log any sensitive data contained within a query. @@ -89,7 +61,7 @@ # CacheMaxMemorySize is the maximum size a shard's cache can # reach before it starts rejecting writes. - # cache-max-memory-size = 524288000 + # cache-max-memory-size = 1048576000 # CacheSnapshotMemorySize is the size at which the engine will # snapshot the cache and write it to a TSM file, freeing up memory @@ -98,60 +70,60 @@ # CacheSnapshotWriteColdDuration is the length of time at # which the engine will snapshot the cache and write it to # a new TSM file if the shard hasn't received writes or deletes - # cache-snapshot-write-cold-duration = "1h" - - # MinCompactionFileCount is the minimum number of TSM files - # that need to exist before a compaction cycle will run - # compact-min-file-count = 3 + # cache-snapshot-write-cold-duration = "10m" # CompactFullWriteColdDuration is the duration at which the engine # will compact all TSM files in a shard if it hasn't received a # write or delete - # compact-full-write-cold-duration = "24h" + # compact-full-write-cold-duration = "4h" + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 - # MaxPointsPerBlock is the maximum number of points in an encoded - # block in a TSM file. Larger numbers may yield better compression - # but could incur a performance peanalty when querying - # max-points-per-block = 1000 + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 ### -### [hinted-handoff] +### [coordinator] ### -### Controls the hinted handoff feature, which allows nodes to temporarily -### store queued data when one node of a cluster is down for a short period -### of time. +### Controls the clustering service configuration. ### -[hinted-handoff] - enabled = true - dir = "/var/lib/influxdb/hh" - max-size = 1073741824 - max-age = "168h" - retry-rate-limit = 0 +# [coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" - # Hinted handoff will start retrying writes to down nodes at a rate of once per second. - # If any error occurs, it will backoff in an exponential manner, until the interval - # reaches retry-max-interval. Once writes to all nodes are successfully completed the - # interval will reset to retry-interval. - retry-interval = "1s" - retry-max-interval = "1m" + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 - # Interval between running checks for data that should be purged. Data is purged from - # hinted-handoff queues for two reasons. 1) The data is older than the max age, or - # 2) the target node has been dropped from the cluster. Data is never dropped until - # it has reached max-age however, for a dropped node or not. - purge-interval = "1h" + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" -### -### [cluster] -### -### Controls non-Raft cluster behavior, which generally includes how data is -### shared across shards. -### + # The the time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make the maximum + # point count unlimited. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + + # The maximum number of series a SELECT can run. A value of zero will make the maximum series + # count unlimited. + # max-select-series = 0 -[cluster] - shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request. - write-timeout = "10s" # The time within which a write request must complete on the cluster. + # The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 ### ### [retention] @@ -159,9 +131,12 @@ ### Controls the enforcement of retention policies for evicting old data. ### -[retention] - enabled = true - check-interval = "30m" +# [retention] + # Determines whether retention policy enforcment enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" ### ### [shard-precreation] @@ -171,10 +146,16 @@ ### future, will ever be created. Shards are never precreated that would be wholly ### or partially in the past. -[shard-precreation] - enabled = true - check-interval = "10m" - advance-period = "30m" +# [shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" ### ### Controls the system self-monitoring, statistics and diagnostics. @@ -185,10 +166,15 @@ ### and a replication factor of 1, if it does not exist. In all cases the ### this retention policy is configured as the default for the database. -[monitor] - store-enabled = true # Whether to record statistics internally. - store-database = "_internal" # The destination database for recorded statistics - store-interval = "10s" # The interval at which to record statistics +# [monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" ### ### [admin] @@ -196,12 +182,20 @@ ### Controls the availability of the built-in, web-based admin interface. If HTTPS is ### enabled for the admin interface, HTTPS must also be enabled on the [http] service. ### +### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release. -[admin] - enabled = true - bind-address = ":8083" - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" +# [admin] + # Determines whether the admin service is enabled. + # enabled = false + + # The default bind address used by the admin service. + # bind-address = ":8083" + + # Whether the admin service should use HTTPS. + # https-enabled = false + + # The SSL certificate used when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" ### ### [http] @@ -210,15 +204,81 @@ ### mechanism for getting data into and out of InfluxDB. ### -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" +# [http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether HTTP authentication is enabled. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enable.d + # log-enabled = true + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-sercret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 10000 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +# [subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + ### ### [[graphite]] @@ -226,87 +286,107 @@ ### Controls one or many listeners for Graphite data. ### -[[graphite]] - enabled = false +# [[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false # database = "graphite" + # retention-policy = "" # bind-address = ":2003" # protocol = "tcp" # consistency-level = "one" - # name-separator = "." # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - - ## "name-schema" configures tag names for parsing the metric name from graphite protocol; - ## separated by `name-separator`. - ## The "measurement" tag is special and the corresponding field will become - ## the name of the metric. - ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as - ## { - ## measurement: "cpu", - ## tags: { - ## "type": "server", - ## "host": "localhost, - ## "device": "cpu0" - ## } - ## } - # name-schema = "type.host.measurement.device" - - ## If set to true, when the input metric name has more fields than `name-schema` specified, - ## the extra fields will be ignored. - ## Otherwise an error will be logged and the metric rejected. - # ignore-unnamed = true + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] ### ### [collectd] ### -### Controls the listener for collectd data. +### Controls one or many listeners for collectd data. ### -[collectd] - enabled = false - # bind-address = "" - # database = "" - # typesdb = "" +# [[collectd]] + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # typesdb = "/usr/share/collectd/types.db" # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 ### ### [opentsdb] ### -### Controls the listener for OpenTSDB data. +### Controls one or many listeners for OpenTSDB data. ### -[opentsdb] - enabled = false +# [[opentsdb]] + # enabled = false # bind-address = ":4242" # database = "opentsdb" # retention-policy = "" # consistency-level = "one" # tls-enabled = false - # certificate= "" - # log-point-errors = true # Log an error for every malformed point. + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Only points # metrics received over the telnet protocol undergo batching. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" ### ### [[udp]] @@ -314,9 +394,9 @@ ### Controls the listeners for InfluxDB line protocol data via UDP. ### -[[udp]] - enabled = false - # bind-address = "" +# [[udp]] + # enabled = false + # bind-address = ":8089" # database = "udp" # retention-policy = "" @@ -324,13 +404,17 @@ # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 - # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536 - # udp-payload-size = 65536 + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 ### ### [continuous_queries] @@ -338,7 +422,12 @@ ### Controls how continuous queries are run within InfluxDB. ### -[continuous_queries] - log-enabled = true - enabled = true - # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run +# [continuous_queries] + # Determiens whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" diff -Nru influxdb-0.10.0+dfsg1/debian/influxdb.dirs influxdb-1.1.1+dfsg1/debian/influxdb.dirs --- influxdb-0.10.0+dfsg1/debian/influxdb.dirs 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/influxdb.dirs 2017-02-28 23:20:09.000000000 +0000 @@ -0,0 +1,2 @@ +var/lib/influxdb +var/log/influxdb diff -Nru influxdb-0.10.0+dfsg1/debian/influxdb.init influxdb-1.1.1+dfsg1/debian/influxdb.init --- influxdb-0.10.0+dfsg1/debian/influxdb.init 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/influxdb.init 2017-02-28 23:20:09.000000000 +0000 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh ### BEGIN INIT INFO # Provides: influxdb @@ -32,13 +32,13 @@ GROUP=influxdb if [ -r /lib/lsb/init-functions ]; then - source /lib/lsb/init-functions + . /lib/lsb/init-functions fi DEFAULT=/etc/default/influxdb if [ -r $DEFAULT ]; then - source $DEFAULT + . $DEFAULT fi if [ -z "$STDOUT" ]; then @@ -58,48 +58,6 @@ OPEN_FILE_LIMIT=65536 -function pidofproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile daemon-name" - fi - - pid=$(pgrep -f $3) - local pidfile=$(cat $2) - - if [ "x$pidfile" == "x" ]; then - return 1 - fi - - if [ "x$pid" != "x" -a "$pidfile" == "$pid" ]; then - return 0 - fi - - return 1 -} - -function killproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile signal" - fi - - PID=`cat $2` - - /bin/kill -s $3 $PID - while true; do - pidof `basename $daemon` >/dev/null - if [ $? -ne 0 ]; then - return 0 - fi - - sleep 1 - n=$(expr $n + 1) - if [ $n -eq 30 ]; then - /bin/kill -s SIGKILL $PID - return 0 - fi - done -} - # Process name ( For display ) name=influxd desc=database @@ -122,45 +80,10 @@ # If the daemon is not there, then exit. [ -x $daemon ] || exit 0 -function wait_for_startup() { - control=1 - while [ $control -lt 5 ] - do - if [ ! -e $pidfile ]; then - sleep 1 - control=$((control+1)) - else - break - fi - done -} - -function is_process_running() { - # Checked the PID file exists and check the actual status of process - if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - # If the status is SUCCESS then don't need to start again. - if [ "x$status" = "x0" ]; then - return 0 - else - return 1 - fi - else - return 1 - fi -} - case $1 in start) log_daemon_msg "Starting $desc" "$name" - # Check if it's running first - is_process_running - if [ $? -eq 0 ]; then - log_end_msg 0 - exit 0 - fi - # Bump the file limits, before launching the daemon. These will carry over to # launched processes. ulimit -n $OPEN_FILE_LIMIT @@ -170,41 +93,22 @@ exit 1 fi - if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & - else - su $USER -c "nohup $daemon -pidfile $pidfile -config $config $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" - fi + start-stop-daemon --start --quiet --oknodo --exec $daemon \ + --chuid $GROUP:$USER --pidfile $pidfile --background --no-close \ + -- -pidfile $pidfile -config $config $INFLUXD_OPTS \ + >>$STDOUT 2>>$STDERR - wait_for_startup && is_process_running - if [ $? -ne 0 ]; then - log_progress_msg "$name process failed to start" - log_end_msg 1 - exit 1 - else - log_end_msg 0 - exit 0 - fi + log_end_msg $? ;; stop) log_daemon_msg "Stopping $desc" "$name" # Stop the daemon. - is_process_running - if [ $? -ne 0 ]; then - log_progress_msg "$name process is not running" - log_end_msg 0 - exit 0 # Exit - else - if killproc -p $pidfile SIGTERM && /bin/rm -rf $pidfile; then - log_end_msg 0 - exit 0 - else - log_end_msg 1 - exit 1 - fi - fi + start-stop-daemon --stop --quiet --oknodo \ + --exec $daemon --pidfile $pidfile + + log_end_msg $? ;; restart|force-reload) @@ -216,19 +120,7 @@ ;; status) - log_daemon_msg "Checking status of $desc" "$name" - - # Check the status of the process. - is_process_running - if [ $? -eq 0 ]; then - log_progress_msg "running" - log_end_msg 0 - exit 0 - else - log_progress_msg "apparently not running" - log_end_msg 1 - exit 1 - fi + status_of_proc $daemon $name ;; version) @@ -237,7 +129,7 @@ *) # For invalid arguments, print the usage message. - echo "Usage: $0 {start|stop|restart|status|version}" + echo "Usage: $0 {start|stop|force-reload|restart|status|version}" exit 2 ;; esac diff -Nru influxdb-0.10.0+dfsg1/debian/influxdb.lintian-overrides influxdb-1.1.1+dfsg1/debian/influxdb.lintian-overrides --- influxdb-0.10.0+dfsg1/debian/influxdb.lintian-overrides 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/influxdb.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# The Go compiler is currently unable to produce read-only relocations -# (it produces static binaries). -hardening-no-relro usr/bin/influxd diff -Nru influxdb-0.10.0+dfsg1/debian/influxdb.postinst influxdb-1.1.1+dfsg1/debian/influxdb.postinst --- influxdb-0.10.0+dfsg1/debian/influxdb.postinst 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/influxdb.postinst 2017-02-28 23:20:09.000000000 +0000 @@ -27,13 +27,13 @@ adduser influxdb influxdb fi - # create data directory - mkdir -p /var/lib/influxdb - chown -R influxdb:influxdb /var/lib/influxdb + if [ -d /var/lib/influxdb ]; then + chown -R influxdb:influxdb /var/lib/influxdb + fi - # create logdir - mkdir -p /var/log/influxdb - chown -R influxdb:influxdb /var/log/influxdb + if [ -d /var/log/influxdb ]; then + chown -R influxdb:influxdb /var/log/influxdb + fi # create rundir mkdir -p /var/run/influxdb diff -Nru influxdb-0.10.0+dfsg1/debian/NEWS influxdb-1.1.1+dfsg1/debian/NEWS --- influxdb-0.10.0+dfsg1/debian/NEWS 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/NEWS 2017-02-28 23:20:09.000000000 +0000 @@ -0,0 +1,8 @@ +influxdb (0.12.0+dfsg1-2) unstable; urgency=medium + + If you are upgrading from <0.11, you will have to create + a new database. InfluxDB 0.12.0 does not provide any tools + to perform an upgrade from 0.11. This is done by deleting + files in /var/lib/influxdb + + -- Alexandre Viau Tue, 03 May 2016 22:08:46 -0400 diff -Nru influxdb-0.10.0+dfsg1/debian/patches/disable-flaky-tests.patch influxdb-1.1.1+dfsg1/debian/patches/disable-flaky-tests.patch --- influxdb-0.10.0+dfsg1/debian/patches/disable-flaky-tests.patch 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/patches/disable-flaky-tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Description: Disable flaky tests - Temporarily disable timing-dependent tests. Upstream is working on - fixing this. -Forwarded: https://github.com/influxdb/influxdb/issues/4102 -Author: Alexandre Viau -Last-Update: 2015-09-15 - ---- a/tsdb/engine/wal/wal_test.go -+++ b/tsdb/engine/wal/wal_test.go -@@ -281,6 +281,7 @@ - - // Ensure the wal forces a full flush after not having a write in a given interval of time - func TestWAL_CompactAfterTimeWithoutWrite(t *testing.T) { -+ t.Skip("Temporarily disabled") - log := openTestWAL() - - // set this low ---- a/cmd/influxd/run/server_test.go -+++ b/cmd/influxd/run/server_test.go -@@ -1061,6 +1061,7 @@ - - // Ensure the server works with tag queries. - func TestServer_Query_Tags(t *testing.T) { -+ t.Skip("Temporairly disabled") - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() diff -Nru influxdb-0.10.0+dfsg1/debian/patches/disable-reporting.patch influxdb-1.1.1+dfsg1/debian/patches/disable-reporting.patch --- influxdb-0.10.0+dfsg1/debian/patches/disable-reporting.patch 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/patches/disable-reporting.patch 2017-02-28 23:20:09.000000000 +0000 @@ -6,10 +6,15 @@ Author: Alexandre Viau Last-Update: 2015-08-11 +--- + cmd/influxd/run/config.go | 2 +- + cmd/influxd/run/server.go | 6 +++--- + 2 files changed, 4 insertions(+), 4 deletions(-) + --- a/cmd/influxd/run/config.go +++ b/cmd/influxd/run/config.go -@@ -58,7 +58,7 @@ - HintedHandoff hh.Config `toml:"hinted-handoff"` +@@ -56,7 +56,7 @@ + ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` // Server reporting - ReportingDisabled bool `toml:"reporting-disabled"` @@ -28,16 +33,16 @@ // Profiling CPUProfile string -@@ -169,7 +169,7 @@ +@@ -147,7 +147,7 @@ - Monitor: monitor.New(c.Monitor), + MetaClient: meta.NewClient(c.Meta), - reportingDisabled: c.ReportingDisabled, + reportingEnabled: c.ReportingEnabled, - joinPeers: c.Meta.JoinPeers, - metaUseTLS: c.Meta.HTTPSEnabled, -@@ -478,7 +478,7 @@ + httpAPIAddr: c.HTTPD.BindAddress, + httpUseTLS: c.HTTPD.HTTPSEnabled, +@@ -434,7 +434,7 @@ } // Start the reporting service, if not disabled. @@ -48,12 +53,12 @@ --- a/cmd/influxd/run/server_helpers_test.go +++ b/cmd/influxd/run/server_helpers_test.go -@@ -214,7 +214,7 @@ - // NewConfig returns the default config with temporary paths. +@@ -229,7 +229,7 @@ func NewConfig() *run.Config { c := run.NewConfig() + c.BindAddress = "127.0.0.1:0" - c.ReportingDisabled = true + c.ReportingEnabled = false - c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second) - c.Cluster.WriteTimeout = toml.Duration(30 * time.Second) + c.Coordinator.WriteTimeout = toml.Duration(30 * time.Second) c.Meta.Dir = MustTempFile() + diff -Nru influxdb-0.10.0+dfsg1/debian/patches/disable_testengine_backup.patch influxdb-1.1.1+dfsg1/debian/patches/disable_testengine_backup.patch --- influxdb-0.10.0+dfsg1/debian/patches/disable_testengine_backup.patch 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/patches/disable_testengine_backup.patch 2017-02-28 23:20:09.000000000 +0000 @@ -0,0 +1,14 @@ +Description: Disable unstable test +Bug: 850282 +Author: Alexandre Viau + +--- a/tsdb/engine/tsm1/engine_test.go ++++ b/tsdb/engine/tsm1/engine_test.go +@@ -132,6 +132,7 @@ + + // Ensure that the engine will backup any TSM files created since the passed in time + func TestEngine_Backup(t *testing.T) { ++ t.Skip() + // Generate temporary file. + f, _ := ioutil.TempFile("", "tsm") + f.Close() diff -Nru influxdb-0.10.0+dfsg1/debian/patches/jwt-v2.patch influxdb-1.1.1+dfsg1/debian/patches/jwt-v2.patch --- influxdb-0.10.0+dfsg1/debian/patches/jwt-v2.patch 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/patches/jwt-v2.patch 2017-02-28 23:20:09.000000000 +0000 @@ -0,0 +1,67 @@ +Description: Fix build with github.com/dgrijalva/jwt-go v2 +Author: Guillem Jover +Last-Update: 2016-12-02 + +--- + services/httpd/handler.go | 11 ++--------- + 1 file changed, 2 insertions(+), 9 deletions(-) + +--- a/services/httpd/handler.go ++++ b/services/httpd/handler.go +@@ -901,21 +901,14 @@ + return + } + +- claims, ok := token.Claims.(jwt.MapClaims) +- if !ok { +- h.httpError(w, "problem authenticating token", http.StatusInternalServerError) +- h.Logger.Print("Could not assert JWT token claims as jwt.MapClaims") +- return +- } +- + // Make sure an expiration was set on the token. +- if exp, ok := claims["exp"].(float64); !ok || exp <= 0.0 { ++ if exp, ok := token.Claims["exp"].(float64); !ok || exp <= 0.0 { + h.httpError(w, "token expiration required", http.StatusUnauthorized) + return + } + + // Get the username from the token. +- username, ok := claims["username"].(string) ++ username, ok := token.Claims["username"].(string) + if !ok { + h.httpError(w, "username in token must be a string", http.StatusUnauthorized) + return +--- a/services/httpd/handler_test.go ++++ b/services/httpd/handler_test.go +@@ -193,13 +193,13 @@ + h.ServeHTTP(w, req) + if w.Code != http.StatusUnauthorized { + t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String()) +- } else if !strings.Contains(w.Body.String(), `{"error":"Token is expired`) { ++ } else if !strings.Contains(w.Body.String(), `{"error":"token is expired`) { + t.Fatalf("unexpected body: %s", w.Body.String()) + } + + // Test handler with JWT token that has no expiration set. + token, _ := MustJWTToken("user1", h.Config.SharedSecret, false) +- delete(token.Claims.(jwt.MapClaims), "exp") ++ delete(token.Claims, "exp") + signedToken, err := token.SignedString([]byte(h.Config.SharedSecret)) + if err != nil { + t.Fatal(err) +@@ -707,11 +707,11 @@ + // MustJWTToken returns a new JWT token and signed string or panics trying. + func MustJWTToken(username, secret string, expired bool) (*jwt.Token, string) { + token := jwt.New(jwt.GetSigningMethod("HS512")) +- token.Claims.(jwt.MapClaims)["username"] = username ++ token.Claims["username"] = username + if expired { +- token.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(-time.Second).Unix() ++ token.Claims["exp"] = time.Now().Add(-time.Second).Unix() + } else { +- token.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Minute * 10).Unix() ++ token.Claims["exp"] = time.Now().Add(time.Minute * 10).Unix() + } + signed, err := token.SignedString([]byte(secret)) + if err != nil { diff -Nru influxdb-0.10.0+dfsg1/debian/patches/series influxdb-1.1.1+dfsg1/debian/patches/series --- influxdb-0.10.0+dfsg1/debian/patches/series 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/patches/series 2017-02-28 23:20:09.000000000 +0000 @@ -1,2 +1,3 @@ -disable-flaky-tests.patch +disable_testengine_backup.patch disable-reporting.patch +jwt-v2.patch diff -Nru influxdb-0.10.0+dfsg1/debian/rules influxdb-1.1.1+dfsg1/debian/rules --- influxdb-0.10.0+dfsg1/debian/rules 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/rules 2017-02-28 23:20:09.000000000 +0000 @@ -6,7 +6,7 @@ SOURCE = $(TMP)/../ export DH_GOLANG_GO_GENERATE := 1 -export DH_GOPKG := github.com/influxdb/influxdb +export DH_GOPKG := github.com/influxdata/influxdb %: dh $@ --buildsystem=golang --with=golang,systemd --builddirectory=_build @@ -16,29 +16,32 @@ # Include tests files cp -v $(SOURCE)/stress/stress.toml _build/src/$(DH_GOPKG)/stress/stress.toml + mkdir -v -p _build/src/$(DH_GOPKG)/stress/v2/iql + cp -v $(SOURCE)/stress/v2/iql/file.iql _build/src/$(DH_GOPKG)/stress/v2/iql/file.iql + cp -v $(SOURCE)/stress/v2/iql/default.iql _build/src/$(DH_GOPKG)/stress/v2/iql/default.iql # Install Web UI components - cp -v -r $(SOURCE)/shared _build/src/$(DH_GOPKG)/shared - - # Forwarded usptream. There is no need for css files to be executable ;) - chmod -v -x _build/src/$(DH_GOPKG)/shared/admin/css/dropdowns-enhancement.css + cp -v -r $(SOURCE)/services/admin/assets _build/src/$(DH_GOPKG)/services/admin # Replace bundled javascript/css with our own - mkdir -v _build/src/$(DH_GOPKG)/shared/admin/js/vendor + mkdir -v _build/src/$(DH_GOPKG)/services/admin/assets/js/vendor - ln -v -s /usr/share/javascript/bootstrap/js/bootstrap.min.js _build/src/$(DH_GOPKG)/shared/admin/js/vendor/bootstrap-3.3.5.min.js - ln -v -s /usr/share/javascript/jquery/jquery.min.js _build/src/$(DH_GOPKG)/shared/admin/js/vendor/jquery-2.1.4.min.js - uglifyjs $(TMP)/missing-sources/react-0.13.3.js --output _build/src/$(DH_GOPKG)/shared/admin/js/vendor/react-0.13.3.min.js - ln -v -s /usr/share/javascript/bootstrap/css/bootstrap.css _build/src/$(DH_GOPKG)/shared/admin/css/bootstrap.css + ln -v -s /usr/share/javascript/bootstrap/js/bootstrap.min.js _build/src/$(DH_GOPKG)/services/admin/assets/js/vendor/bootstrap-3.3.5.min.js + ln -v -s /usr/share/javascript/jquery/jquery.min.js _build/src/$(DH_GOPKG)/services/admin/assets/js/vendor/jquery-2.1.4.min.js + yui-compressor $(TMP)/missing-sources/react-0.13.3.js -o _build/src/$(DH_GOPKG)/services/admin/assets/js/vendor/react-0.13.3.min.js + ln -v -s /usr/share/javascript/bootstrap/css/bootstrap.css _build/src/$(DH_GOPKG)/services/admin/assets/css/bootstrap.css # glyphicons-halflings-regular.* - rm --verbose _build/src/$(DH_GOPKG)/shared/admin/fonts/glyphicons-halflings-regular.* - ln -s /usr/share/javascript/bootstrap/fonts/glyphicons-halflings-regular.* _build/src/$(DH_GOPKG)/shared/admin/fonts/ + rm --verbose _build/src/$(DH_GOPKG)/services/admin/assets/fonts/glyphicons-halflings-regular.* + ln -s /usr/share/javascript/bootstrap/fonts/glyphicons-halflings-regular.* _build/src/$(DH_GOPKG)/services/admin/assets/fonts/ + + # include tmpldata files + cp -v $(SOURCE)/tsdb/engine/tsm1/*.tmpl* _build/src/$(DH_GOPKG)/tsdb/engine/tsm1 + cp -v $(SOURCE)/influxql/*.tmpl* _build/src/$(DH_GOPKG)/influxql + cp -v $(SOURCE)/influxql/tmpldata _build/src/$(DH_GOPKG)/influxql - # Rebuild statik.go - rm --verbose _build/src/$(DH_GOPKG)/statik/statik.go - cd _build/src/$(DH_GOPKG) && golang-statik -src=./shared/admin - go fmt _build/src/$(DH_GOPKG)/statik/statik.go + # Remove statik files, latest version cannot cope. + rm -f _build/src/$(DH_GOPKG)/services/admin/statik/statik.go # influxdb version DEB_VERSION := $(shell dpkg-parsechangelog -l$(DEBIAN_DIR)/changelog --show-field Version) @@ -49,7 +52,6 @@ dh_auto_build -- -ldflags="-X main.version=$(DEB_UPSTREAM_VERSION)" # Don't tests on unsupported architectures. Test are unstable. -# In the future, we should only skip cluster tests. SKIP_TESTS := True ifeq ($(DEB_HOST_ARCH_CPU),amd64) diff -Nru influxdb-0.10.0+dfsg1/debian/source/lintian-overrides influxdb-1.1.1+dfsg1/debian/source/lintian-overrides --- influxdb-0.10.0+dfsg1/debian/source/lintian-overrides 2016-02-04 22:47:04.000000000 +0000 +++ influxdb-1.1.1+dfsg1/debian/source/lintian-overrides 2017-02-28 23:20:09.000000000 +0000 @@ -1,3 +1,3 @@ # False positive, see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=799861 -source-contains-prebuilt-javascript-object debian/missing-sources/react-0.13.3.js -source-is-missing debian/missing-sources/react-0.13.3.js +source-contains-prebuilt-javascript-object debian/missing-sources/react-0.13.3.js * +source-is-missing debian/missing-sources/react-0.13.3.js * diff -Nru influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu32 influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu32 --- influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu32 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu32 2016-12-06 21:36:15.000000000 +0000 @@ -1,6 +1,6 @@ -FROM 32bit/ubuntu:14.04 +FROM ioft/i386-ubuntu:14.04 -RUN apt-get update && apt-get install -y \ +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ python-software-properties \ software-properties-common \ wget \ @@ -18,18 +18,18 @@ # Install go ENV GOPATH /root/go -ENV GO_VERSION 1.4.3 +ENV GO_VERSION 1.7.4 ENV GO_ARCH 386 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ENV PATH /usr/local/go/bin:$PATH -ENV PROJECT_DIR $GOPATH/src/github.com/influxdb/influxdb +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb ENV PATH $GOPATH/bin:$PATH RUN mkdir -p $PROJECT_DIR WORKDIR $PROJECT_DIR VOLUME $PROJECT_DIR -ENTRYPOINT [ "/root/go/src/github.com/influxdb/influxdb/build.py" ] +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff -Nru influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu64 influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu64 --- influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu64 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu64 2016-12-06 21:36:15.000000000 +0000 @@ -1,6 +1,6 @@ FROM ubuntu:trusty -RUN apt-get update && apt-get install -y \ +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ python-software-properties \ software-properties-common \ wget \ @@ -12,24 +12,27 @@ rpm \ zip \ python \ - python-boto + python-boto \ + asciidoc \ + xmlto \ + docbook-xsl RUN gem install fpm # Install go ENV GOPATH /root/go -ENV GO_VERSION 1.4.3 +ENV GO_VERSION 1.7.4 ENV GO_ARCH amd64 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ENV PATH /usr/local/go/bin:$PATH -ENV PROJECT_DIR $GOPATH/src/github.com/influxdb/influxdb +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb ENV PATH $GOPATH/bin:$PATH RUN mkdir -p $PROJECT_DIR WORKDIR $PROJECT_DIR VOLUME $PROJECT_DIR -ENTRYPOINT [ "/root/go/src/github.com/influxdb/influxdb/build.py" ] +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff -Nru influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu64_git influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu64_git --- influxdb-0.10.0+dfsg1/Dockerfile_build_ubuntu64_git 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Dockerfile_build_ubuntu64_git 2016-12-06 21:36:15.000000000 +0000 @@ -1,6 +1,6 @@ FROM ubuntu:trusty -RUN apt-get update && apt-get install -y \ +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ python-software-properties \ software-properties-common \ wget \ @@ -18,7 +18,7 @@ # Setup env ENV GOPATH /root/go -ENV PROJECT_DIR $GOPATH/src/github.com/influxdb/influxdb +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb ENV PATH $GOPATH/bin:$PATH RUN mkdir -p $PROJECT_DIR @@ -26,7 +26,7 @@ # Install go -ENV GO_VERSION 1.4.3 +ENV GO_VERSION 1.7.4 ENV GO_ARCH amd64 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ diff -Nru influxdb-0.10.0+dfsg1/Dockerfile_test_ubuntu32 influxdb-1.1.1+dfsg1/Dockerfile_test_ubuntu32 --- influxdb-0.10.0+dfsg1/Dockerfile_test_ubuntu32 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Dockerfile_test_ubuntu32 2016-12-06 21:36:15.000000000 +0000 @@ -1,12 +1,12 @@ FROM 32bit/ubuntu:14.04 -RUN apt-get update && apt-get install -y python-software-properties software-properties-common git +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git RUN add-apt-repository ppa:evarlast/golang1.4 -RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go ENV GOPATH=/root/go -RUN mkdir -p /root/go/src/github.com/influxdb/influxdb +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb RUN mkdir -p /tmp/artifacts -VOLUME /root/go/src/github.com/influxdb/influxdb +VOLUME /root/go/src/github.com/influxdata/influxdb VOLUME /tmp/artifacts diff -Nru influxdb-0.10.0+dfsg1/DOCKER.md influxdb-1.1.1+dfsg1/DOCKER.md --- influxdb-0.10.0+dfsg1/DOCKER.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/DOCKER.md 2016-12-06 21:36:15.000000000 +0000 @@ -11,12 +11,12 @@ $ ./build-docker.sh ``` -This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. +This script uses the `golang:1.7.4` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. To build the image using a different version of go: ``` -$ GO_VER=1.4.2 ./build-docker.sh +$ GO_VER=1.7.4 ./build-docker.sh ``` Available version can be found [here](https://hub.docker.com/_/golang/). @@ -28,17 +28,3 @@ ``` $ docker run -it -p 8086:8086 -p 8088:8088 influxdb ``` - -## Multi-Node Cluster - -This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. - -The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. - -``` -$ export HOST_IP= -$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088 -$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 -$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 -``` - diff -Nru influxdb-0.10.0+dfsg1/errors.go influxdb-1.1.1+dfsg1/errors.go --- influxdb-0.10.0+dfsg1/errors.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/errors.go 2016-12-06 21:36:15.000000000 +0000 @@ -6,13 +6,9 @@ "strings" ) -var ( - // ErrFieldsRequired is returned when a point does not any fields. - ErrFieldsRequired = errors.New("fields required") - - // ErrFieldTypeConflict is returned when a new field already exists with a different type. - ErrFieldTypeConflict = errors.New("field type conflict") -) +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") // ErrDatabaseNotFound indicates that a database operation failed on the // specified database because the specified database does not exist. @@ -30,16 +26,19 @@ return false } - if err == ErrFieldsRequired { - return true - } - if err == ErrFieldTypeConflict { - return true - } - - if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { return true } return false } + +const upgradeMessage = `******************************************************************* + UNSUPPORTED SHARD FORMAT DETECTED + +As of version 0.11, only tsm shards are supported. Please use the +influx_tsm tool to convert non-tsm shards. + +More information can be found at the documentation site: +https://docs.influxdata.com/influxdb/v0.10/administration/upgrading +*******************************************************************` diff -Nru influxdb-0.10.0+dfsg1/etc/config.sample.toml influxdb-1.1.1+dfsg1/etc/config.sample.toml --- influxdb-0.10.0+dfsg1/etc/config.sample.toml 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/etc/config.sample.toml 2016-12-06 21:36:15.000000000 +0000 @@ -1,12 +1,15 @@ ### Welcome to the InfluxDB configuration file. -# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com -# The data includes raft id (random 8 bytes), os, arch, version, and metadata. -# We don't track ip addresses of servers reporting. This is only used -# to track the number of instances running and the versions, which -# is very helpful for us. +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the the configuration +# field and the default value used. Uncommentting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. # Change this option to true to disable reporting. -reporting-disabled = false +# reporting-disabled = false # we'll try to get the hostname automatically, but if it the os returns something # that isn't resolvable by other servers in the cluster, use this option to @@ -21,19 +24,14 @@ ### [meta] - # Controls if this node should run the metaservice and participate in the Raft group - enabled = true - # Where the metadata/raft database is stored dir = "/var/lib/influxdb/meta" - bind-address = ":8088" - retention-autocreate = true - election-timeout = "1s" - heartbeat-timeout = "1s" - leader-lease-timeout = "500ms" - commit-timeout = "50ms" - cluster-tracing = false + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true ### ### [data] @@ -45,41 +43,15 @@ ### [data] - # Controls if this node holds time series data shards in the cluster - enabled = true - + # The directory where the TSM storage engine stores TSM files. dir = "/var/lib/influxdb/data" - # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't - # apply to any new shards created after upgrading to a version > 0.9.3. - max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. - wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. - wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. - - # These are the WAL settings for the storage engine >= 0.9.3 + # The directory where the TSM storage engine stores WAL files. wal-dir = "/var/lib/influxdb/wal" - wal-logging-enabled = true - data-logging-enabled = true - # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to - # flush to the index - # wal-ready-series-size = 25600 - - # Flush and compact a partition once this ratio of series are over the ready size - # wal-compaction-threshold = 0.6 - - # Force a flush and compaction if any series in a partition gets above this size in bytes - # wal-max-series-size = 2097152 - - # Force a flush of all series and full compaction if there have been no writes in this - # amount of time. This is useful for ensuring that shards that are cold for writes don't - # keep a bunch of data cached in memory and in the WAL. - # wal-flush-cold-interval = "10m" - - # Force a partition to flush its largest series if it reaches this approximate size in - # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. - # The more memory you have, the bigger this can be. - # wal-partition-size-threshold = 20971520 + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false # Whether queries should be logged before execution. Very useful for troubleshooting, but will # log any sensitive data contained within a query. @@ -89,7 +61,7 @@ # CacheMaxMemorySize is the maximum size a shard's cache can # reach before it starts rejecting writes. - # cache-max-memory-size = 524288000 + # cache-max-memory-size = 1048576000 # CacheSnapshotMemorySize is the size at which the engine will # snapshot the cache and write it to a TSM file, freeing up memory @@ -98,60 +70,60 @@ # CacheSnapshotWriteColdDuration is the length of time at # which the engine will snapshot the cache and write it to # a new TSM file if the shard hasn't received writes or deletes - # cache-snapshot-write-cold-duration = "1h" - - # MinCompactionFileCount is the minimum number of TSM files - # that need to exist before a compaction cycle will run - # compact-min-file-count = 3 + # cache-snapshot-write-cold-duration = "10m" # CompactFullWriteColdDuration is the duration at which the engine # will compact all TSM files in a shard if it hasn't received a # write or delete - # compact-full-write-cold-duration = "24h" + # compact-full-write-cold-duration = "4h" + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 - # MaxPointsPerBlock is the maximum number of points in an encoded - # block in a TSM file. Larger numbers may yield better compression - # but could incur a performance peanalty when querying - # max-points-per-block = 1000 + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 ### -### [hinted-handoff] +### [coordinator] ### -### Controls the hinted handoff feature, which allows nodes to temporarily -### store queued data when one node of a cluster is down for a short period -### of time. +### Controls the clustering service configuration. ### -[hinted-handoff] - enabled = true - dir = "/var/lib/influxdb/hh" - max-size = 1073741824 - max-age = "168h" - retry-rate-limit = 0 +# [coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" - # Hinted handoff will start retrying writes to down nodes at a rate of once per second. - # If any error occurs, it will backoff in an exponential manner, until the interval - # reaches retry-max-interval. Once writes to all nodes are successfully completed the - # interval will reset to retry-interval. - retry-interval = "1s" - retry-max-interval = "1m" + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 - # Interval between running checks for data that should be purged. Data is purged from - # hinted-handoff queues for two reasons. 1) The data is older than the max age, or - # 2) the target node has been dropped from the cluster. Data is never dropped until - # it has reached max-age however, for a dropped node or not. - purge-interval = "1h" + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" -### -### [cluster] -### -### Controls non-Raft cluster behavior, which generally includes how data is -### shared across shards. -### + # The the time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make the maximum + # point count unlimited. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + + # The maximum number of series a SELECT can run. A value of zero will make the maximum series + # count unlimited. + # max-select-series = 0 -[cluster] - shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request. - write-timeout = "10s" # The time within which a write request must complete on the cluster. + # The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 ### ### [retention] @@ -159,9 +131,12 @@ ### Controls the enforcement of retention policies for evicting old data. ### -[retention] - enabled = true - check-interval = "30m" +# [retention] + # Determines whether retention policy enforcment enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" ### ### [shard-precreation] @@ -171,10 +146,16 @@ ### future, will ever be created. Shards are never precreated that would be wholly ### or partially in the past. -[shard-precreation] - enabled = true - check-interval = "10m" - advance-period = "30m" +# [shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" ### ### Controls the system self-monitoring, statistics and diagnostics. @@ -185,10 +166,15 @@ ### and a replication factor of 1, if it does not exist. In all cases the ### this retention policy is configured as the default for the database. -[monitor] - store-enabled = true # Whether to record statistics internally. - store-database = "_internal" # The destination database for recorded statistics - store-interval = "10s" # The interval at which to record statistics +# [monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" ### ### [admin] @@ -196,12 +182,20 @@ ### Controls the availability of the built-in, web-based admin interface. If HTTPS is ### enabled for the admin interface, HTTPS must also be enabled on the [http] service. ### +### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release. -[admin] - enabled = true - bind-address = ":8083" - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" +# [admin] + # Determines whether the admin service is enabled. + # enabled = false + + # The default bind address used by the admin service. + # bind-address = ":8083" + + # Whether the admin service should use HTTPS. + # https-enabled = false + + # The SSL certificate used when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" ### ### [http] @@ -210,15 +204,81 @@ ### mechanism for getting data into and out of InfluxDB. ### -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" +# [http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether HTTP authentication is enabled. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enable.d + # log-enabled = true + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-sercret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 10000 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +# [subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + ### ### [[graphite]] @@ -226,87 +286,107 @@ ### Controls one or many listeners for Graphite data. ### -[[graphite]] - enabled = false +# [[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false # database = "graphite" + # retention-policy = "" # bind-address = ":2003" # protocol = "tcp" # consistency-level = "one" - # name-separator = "." # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - - ## "name-schema" configures tag names for parsing the metric name from graphite protocol; - ## separated by `name-separator`. - ## The "measurement" tag is special and the corresponding field will become - ## the name of the metric. - ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as - ## { - ## measurement: "cpu", - ## tags: { - ## "type": "server", - ## "host": "localhost, - ## "device": "cpu0" - ## } - ## } - # name-schema = "type.host.measurement.device" - - ## If set to true, when the input metric name has more fields than `name-schema` specified, - ## the extra fields will be ignored. - ## Otherwise an error will be logged and the metric rejected. - # ignore-unnamed = true + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] ### ### [collectd] ### -### Controls the listener for collectd data. +### Controls one or many listeners for collectd data. ### -[collectd] - enabled = false - # bind-address = "" - # database = "" - # typesdb = "" +# [[collectd]] + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # typesdb = "/usr/share/collectd/types.db" # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 ### ### [opentsdb] ### -### Controls the listener for OpenTSDB data. +### Controls one or many listeners for OpenTSDB data. ### -[opentsdb] - enabled = false +# [[opentsdb]] + # enabled = false # bind-address = ":4242" # database = "opentsdb" # retention-policy = "" # consistency-level = "one" # tls-enabled = false - # certificate= "" - # log-point-errors = true # Log an error for every malformed point. + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true # These next lines control how batching works. You should have this enabled # otherwise you could get dropped metrics or poor performance. Only points # metrics received over the telnet protocol undergo batching. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" ### ### [[udp]] @@ -314,9 +394,9 @@ ### Controls the listeners for InfluxDB line protocol data via UDP. ### -[[udp]] - enabled = false - # bind-address = "" +# [[udp]] + # enabled = false + # bind-address = ":8089" # database = "udp" # retention-policy = "" @@ -324,13 +404,17 @@ # otherwise you could get dropped metrics or poor performance. Batching # will buffer points in memory if you have many coming in. - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 - # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536 - # udp-payload-size = 65536 + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 ### ### [continuous_queries] @@ -338,7 +422,12 @@ ### Controls how continuous queries are run within InfluxDB. ### -[continuous_queries] - log-enabled = true - enabled = true - # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run +# [continuous_queries] + # Determiens whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" diff -Nru influxdb-0.10.0+dfsg1/.gitattributes influxdb-1.1.1+dfsg1/.gitattributes --- influxdb-0.10.0+dfsg1/.gitattributes 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.gitattributes 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1 @@ +CHANGELOG.md merge=union diff -Nru influxdb-0.10.0+dfsg1/.github/ISSUE_TEMPLATE.md influxdb-1.1.1+dfsg1/.github/ISSUE_TEMPLATE.md --- influxdb-0.10.0+dfsg1/.github/ISSUE_TEMPLATE.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.github/ISSUE_TEMPLATE.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,54 @@ +### Directions +_GitHub Issues are reserved for actionable bug reports and feature requests._ +_General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb)._ + +_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._ +_If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below. +_Erase the other section and everything on and above this line._ + +### Bug report + +__System info:__ [Include InfluxDB version, operating system name, and other relevant details] + +__Steps to reproduce:__ + +1. [First Step] +2. [Second Step] +3. [and so on...] + +__Expected behavior:__ [What you expected to happen] + +__Actual behavior:__ [What actually happened] + +__Additional info:__ [Include gist of relevant config, logs, etc.] + +Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team. + +``` +curl -o block.txt "http://localhost:8086/debug/pprof/block?debug=1" +curl -o goroutine.txt "http://localhost:8086/debug/pprof/goroutine?debug=1" +curl -o heap.txt "http://localhost:8086/debug/pprof/heap?debug=1" +curl -o vars.txt "http://localhost:8086/debug/vars" +iostat -xd 1 30 > iostat.txt +influx -execute "show shards" > shards.txt +influx -execute "show stats" > stats.txt +influx -execute "show diagnostics" > diagnostics.txt +``` + +Please run those if possible and link them from a [gist](http://gist.github.com). + +*Please note, the quickest way to fix a bug is to open a Pull Request.* + + +### Feature Request + +Opening a feature request kicks off a discussion. +Requests may be closed if we're not actively planning to work on them. + +__Proposal:__ [Description of the feature] + +__Current behavior:__ [What currently happens] + +__Desired behavior:__ [What you would like to happen] + +__Use case:__ [Why is this important (helps with prioritizing requests)] diff -Nru influxdb-0.10.0+dfsg1/.github/PULL_REQUEST_TEMPLATE.md influxdb-1.1.1+dfsg1/.github/PULL_REQUEST_TEMPLATE.md --- influxdb-0.10.0+dfsg1/.github/PULL_REQUEST_TEMPLATE.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.github/PULL_REQUEST_TEMPLATE.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,11 @@ +###### Required for all non-trivial PRs +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] CHANGELOG.md updated +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) + +###### Required only if applicable +_You can erase any checkboxes below this note if they are not applicable to your Pull Request._ +- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated +- [ ] Provide example syntax +- [ ] Update man page when modifying a command diff -Nru influxdb-0.10.0+dfsg1/.gitignore influxdb-1.1.1+dfsg1/.gitignore --- influxdb-0.10.0+dfsg1/.gitignore 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.gitignore 2016-12-06 21:36:15.000000000 +0000 @@ -1,23 +1,21 @@ +# Keep editor-specific, non-project specific ignore rules in global .gitignore: +# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore + *~ src/ config.json /bin/ -TAGS - -# vim temp files -*.swp - -*.test /query/a.out* -.DS_Store # ignore generated files. cmd/influxd/version.go # executables +*.test + influx_tsm **/influx_tsm !**/influx_tsm/ @@ -69,11 +67,7 @@ # test data files integration/migration_data/ -# goide project files -.idea - -# goconvey config files -*.goconvey - -// Ingnore SourceGraph directory -.srclib-store/ +# man outputs +man/*.xml +man/*.1 +man/*.1.gz diff -Nru influxdb-0.10.0+dfsg1/Godeps influxdb-1.1.1+dfsg1/Godeps --- influxdb-0.10.0+dfsg1/Godeps 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Godeps 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,18 @@ +collectd.org 9fc824c70f713ea0f058a07b49a4c563ef2a3b98 +github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca +github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c +github.com/boltdb/bolt 5cc10bbbc5c141029940133bb33c9e969512a698 +github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/dgrijalva/jwt-go 63734eae1ef55eaac06fdc0f312615f2e321e273 +github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef +github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486 +github.com/gogo/protobuf 0394392b81058a7f972029451f06e528bb18ba50 +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 +github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 +github.com/jwilder/encoding 4dada27c33277820fe35c7ee71ed34fbc9477d00 +github.com/kimor79/gollectd 61d0deeb4ffcc167b2a1baa8efd72365692811bc +github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 +github.com/peterh/liner 8975875355a81d612fafb9f5a6037bdcc2d9b073 +github.com/rakyll/statik 274df120e9065bdd08eb1120e0375e3dc1ae8465 +github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d +golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6 diff -Nru influxdb-0.10.0+dfsg1/.hooks/pre-commit influxdb-1.1.1+dfsg1/.hooks/pre-commit --- influxdb-0.10.0+dfsg1/.hooks/pre-commit 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.hooks/pre-commit 2016-12-06 21:36:15.000000000 +0000 @@ -6,8 +6,7 @@ exit 1 fi -# Due to the way composites work, vet will fail for some of our tests so we ignore it -vetcount=`go tool vet -composites=true ./ 2>&1 | wc -l` +vetcount=`go tool vet ./ 2>&1 | wc -l` if [ $vetcount -gt 0 ]; then echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing" exit 1 @@ -16,9 +15,9 @@ # Ensure FIXME lines are removed before commit. fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g') -if [ "$todo_lines" != "" ]; then +if [ "$fixme_lines" != "" ]; then echo "Please remove the following lines:" - echo -e "$todo_lines" + echo -e "$fixme_lines" exit 1 fi diff -Nru influxdb-0.10.0+dfsg1/importer/README.md influxdb-1.1.1+dfsg1/importer/README.md --- influxdb-0.10.0+dfsg1/importer/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/importer/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -15,7 +15,28 @@ `0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). -The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdb/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://influxdb.com/docs/v0.9/guides/writing_data.html) in `0.9`. Remember that batching is important and we don't recommend batch sizes over 5k. +The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdata/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data) in `0.10`. Remember that batching is important and we don't recommend batch sizes over 5k without further testing. + +Example export file: +``` +# DDL +CREATE DATABASE db0 +CREATE DATABASE db1 +CREATE RETENTION POLICY rp1 ON db1 DURATION 1h REPLICATION 1 + +# DML +# CONTEXT-DATABASE:db0 +# CONTEXT-RETENTION-POLICY:autogen +cpu,host=server1 value=33.3 1464026335000000000 +cpu,host=server1 value=43.3 1464026395000000000 +cpu,host=server1 value=63.3 1464026575000000000 + +# CONTEXT-DATABASE:db1 +# CONTEXT-RETENTION-POLICY:rp1 +cpu,host=server1 value=73.3 1464026335000000000 +cpu,host=server1 value=83.3 1464026395000000000 +cpu,host=server1 value=93.3 1464026575000000000 +``` You need to specify a database and shard group when you export. @@ -31,42 +52,42 @@ Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: ```sh -curl -o export http://username:password@http://localhost:8086/export/metrics/default +curl -o export http://username:password@localhost:8086/export/metrics/default ``` Compression is supported, and will result in a significantly smaller file size. Use the following command for compression: ```sh -curl -o export.gz --compressed http://username:password@http://localhost:8086/export/metrics/default +curl -o export.gz --compressed http://username:password@localhost:8086/export/metrics/default ``` You can also export just the `DDL` with this option: ```sh -curl -o export.ddl http://username:password@http://localhost:8086/export/metrics/default?l=ddl +curl -o export.ddl http://username:password@localhost:8086/export/metrics/default?l=ddl ``` Or just the `DML` with this option: ```sh -curl -o export.dml.gz --compressed http://username:password@http://localhost:8086/export/metrics/default?l=dml +curl -o export.dml.gz --compressed http://username:password@localhost:8086/export/metrics/default?l=dml ``` ### Assumptions -- Series name mapping follows these [guidelines](https://influxdb.com/docs/v0.8/advanced_topics/schema_design.html) -- Database name will map directly from `0.8` to `0.9` +- Series name mapping follows these [guidelines](https://docs.influxdata.com/influxdb/v0.8/advanced_topics/schema_design/) +- Database name will map directly from `0.8` to `0.10` - Shard Spaces map to Retention Policies -- Shard Space Duration is ignored, as in `0.9` we determine shard size automatically +- Shard Space Duration is ignored, as in `0.10` we determine shard size automatically - Regex is used to match the correct series names and only exports that data for the database - Duration becomes the new Retention Policy duration -- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.9` +- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.10` ### Upgrade Recommendations -It's recommended that you upgrade to `0.9.3` first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. +It's recommended that you upgrade to `0.9.3` or later first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: @@ -190,4 +211,4 @@ 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer ``` - This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` the field has to have a consistent type. + This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` and greater the field has to have a consistent type. diff -Nru influxdb-0.10.0+dfsg1/importer/v8/importer.go influxdb-1.1.1+dfsg1/importer/v8/importer.go --- influxdb-0.10.0+dfsg1/importer/v8/importer.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/importer/v8/importer.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,4 +1,4 @@ -package v8 +package v8 // import "github.com/influxdata/influxdb/importer/v8" import ( "bufio" @@ -11,7 +11,7 @@ "strings" "time" - "github.com/influxdb/influxdb/client" + "github.com/influxdata/influxdb/client" ) const batchSize = 5000 diff -Nru influxdb-0.10.0+dfsg1/influxdb.go influxdb-1.1.1+dfsg1/influxdb.go --- influxdb-0.10.0+dfsg1/influxdb.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxdb.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1 @@ +package influxdb // import "github.com/influxdata/influxdb" diff -Nru influxdb-0.10.0+dfsg1/influxql/ast.go influxdb-1.1.1+dfsg1/influxql/ast.go --- influxdb-0.10.0+dfsg1/influxql/ast.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/ast.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,12 +5,14 @@ "errors" "fmt" "regexp" + "regexp/syntax" "sort" "strconv" "strings" "time" - "github.com/influxdb/influxdb/pkg/slices" + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" ) // DataType represents the primitive data types available in InfluxQL. @@ -23,14 +25,24 @@ Float = 1 // Integer means the data type is a integer Integer = 2 - // Boolean means the data type is a boolean. - Boolean = 3 // String means the data type is a string of text. - String = 4 + String = 3 + // Boolean means the data type is a boolean. + Boolean = 4 // Time means the data type is a time. Time = 5 // Duration means the data type is a duration of time. Duration = 6 + // Tag means the data type is a tag. + Tag = 7 + // AnyField means the data type is any field. + AnyField = 8 +) + +var ( + // ErrInvalidTime is returned when the timestamp string used to + // compare against time field is invalid. + ErrInvalidTime = errors.New("invalid timestamp string") ) // InspectDataType returns the data type of a given value. @@ -40,10 +52,10 @@ return Float case int64, int32, int: return Integer - case bool: - return Boolean case string: return String + case bool: + return Boolean case time.Time: return Time case time.Duration: @@ -53,20 +65,33 @@ } } +// InspectDataTypes returns all of the data types for an interface slice. +func InspectDataTypes(a []interface{}) []DataType { + dta := make([]DataType, len(a)) + for i, v := range a { + dta[i] = InspectDataType(v) + } + return dta +} + func (d DataType) String() string { switch d { case Float: return "float" case Integer: return "integer" - case Boolean: - return "boolean" case String: return "string" + case Boolean: + return "boolean" case Time: return "time" case Duration: return "duration" + case Tag: + return "tag" + case AnyField: + return "field" } return "unknown" } @@ -87,28 +112,30 @@ func (*CreateSubscriptionStatement) node() {} func (*CreateUserStatement) node() {} func (*Distinct) node() {} +func (*DeleteSeriesStatement) node() {} func (*DeleteStatement) node() {} func (*DropContinuousQueryStatement) node() {} func (*DropDatabaseStatement) node() {} func (*DropMeasurementStatement) node() {} func (*DropRetentionPolicyStatement) node() {} func (*DropSeriesStatement) node() {} -func (*DropServerStatement) node() {} +func (*DropShardStatement) node() {} func (*DropSubscriptionStatement) node() {} func (*DropUserStatement) node() {} func (*GrantStatement) node() {} func (*GrantAdminStatement) node() {} +func (*KillQueryStatement) node() {} func (*RevokeStatement) node() {} func (*RevokeAdminStatement) node() {} func (*SelectStatement) node() {} func (*SetPasswordUserStatement) node() {} func (*ShowContinuousQueriesStatement) node() {} func (*ShowGrantsForUserStatement) node() {} -func (*ShowServersStatement) node() {} func (*ShowDatabasesStatement) node() {} func (*ShowFieldKeysStatement) node() {} func (*ShowRetentionPoliciesStatement) node() {} func (*ShowMeasurementsStatement) node() {} +func (*ShowQueriesStatement) node() {} func (*ShowSeriesStatement) node() {} func (*ShowShardGroupsStatement) node() {} func (*ShowShardsStatement) node() {} @@ -125,6 +152,7 @@ func (*Dimension) node() {} func (Dimensions) node() {} func (*DurationLiteral) node() {} +func (*IntegerLiteral) node() {} func (*Field) node() {} func (Fields) node() {} func (*Measurement) node() {} @@ -133,6 +161,7 @@ func (*NumberLiteral) node() {} func (*ParenExpr) node() {} func (*RegexLiteral) node() {} +func (*ListLiteral) node() {} func (*SortField) node() {} func (SortFields) node() {} func (Sources) node() {} @@ -166,7 +195,7 @@ type Statement interface { Node stmt() - RequiredPrivileges() ExecutionPrivileges + RequiredPrivileges() (ExecutionPrivileges, error) } // HasDefaultDatabase provides an interface to get the default database from a Statement. @@ -198,28 +227,30 @@ func (*CreateRetentionPolicyStatement) stmt() {} func (*CreateSubscriptionStatement) stmt() {} func (*CreateUserStatement) stmt() {} +func (*DeleteSeriesStatement) stmt() {} func (*DeleteStatement) stmt() {} func (*DropContinuousQueryStatement) stmt() {} func (*DropDatabaseStatement) stmt() {} func (*DropMeasurementStatement) stmt() {} func (*DropRetentionPolicyStatement) stmt() {} func (*DropSeriesStatement) stmt() {} -func (*DropServerStatement) stmt() {} func (*DropSubscriptionStatement) stmt() {} func (*DropUserStatement) stmt() {} func (*GrantStatement) stmt() {} func (*GrantAdminStatement) stmt() {} +func (*KillQueryStatement) stmt() {} func (*ShowContinuousQueriesStatement) stmt() {} func (*ShowGrantsForUserStatement) stmt() {} -func (*ShowServersStatement) stmt() {} func (*ShowDatabasesStatement) stmt() {} func (*ShowFieldKeysStatement) stmt() {} func (*ShowMeasurementsStatement) stmt() {} +func (*ShowQueriesStatement) stmt() {} func (*ShowRetentionPoliciesStatement) stmt() {} func (*ShowSeriesStatement) stmt() {} func (*ShowShardGroupsStatement) stmt() {} func (*ShowShardsStatement) stmt() {} func (*ShowStatsStatement) stmt() {} +func (*DropShardStatement) stmt() {} func (*ShowSubscriptionsStatement) stmt() {} func (*ShowDiagnosticsStatement) stmt() {} func (*ShowTagKeysStatement) stmt() {} @@ -241,15 +272,33 @@ func (*Call) expr() {} func (*Distinct) expr() {} func (*DurationLiteral) expr() {} +func (*IntegerLiteral) expr() {} func (*nilLiteral) expr() {} func (*NumberLiteral) expr() {} func (*ParenExpr) expr() {} func (*RegexLiteral) expr() {} +func (*ListLiteral) expr() {} func (*StringLiteral) expr() {} func (*TimeLiteral) expr() {} func (*VarRef) expr() {} func (*Wildcard) expr() {} +// Literal represents a static literal. +type Literal interface { + Expr + literal() +} + +func (*BooleanLiteral) literal() {} +func (*DurationLiteral) literal() {} +func (*IntegerLiteral) literal() {} +func (*nilLiteral) literal() {} +func (*NumberLiteral) literal() {} +func (*RegexLiteral) literal() {} +func (*ListLiteral) literal() {} +func (*StringLiteral) literal() {} +func (*TimeLiteral) literal() {} + // Source represents a source of data for a statement. type Source interface { Node @@ -261,6 +310,58 @@ // Sources represents a list of sources. type Sources []Source +// Names returns a list of source names. +func (a Sources) Names() []string { + names := make([]string, 0, len(a)) + for _, s := range a { + switch s := s.(type) { + case *Measurement: + names = append(names, s.Name) + } + } + return names +} + +// Filter returns a list of source names filtered by the database/retention policy. +func (a Sources) Filter(database, retentionPolicy string) []Source { + sources := make([]Source, 0, len(a)) + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if s.Database == database && s.RetentionPolicy == retentionPolicy { + sources = append(sources, s) + } + } + } + return sources +} + +// HasSystemSource returns true if any of the sources are internal, system sources. +func (a Sources) HasSystemSource() bool { + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if IsSystemName(s.Name) { + return true + } + } + } + return false +} + +// HasRegex returns true if any of the sources are regex measurements. +func (a Sources) HasRegex() bool { + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if s.Regex != nil { + return true + } + } + } + return false +} + // String returns a string representation of a Sources array. func (a Sources) String() string { var buf bytes.Buffer @@ -276,6 +377,47 @@ return buf.String() } +// MarshalBinary encodes a list of sources to a binary format. +func (a Sources) MarshalBinary() ([]byte, error) { + var pb internal.Measurements + pb.Items = make([]*internal.Measurement, len(a)) + for i, source := range a { + pb.Items[i] = encodeMeasurement(source.(*Measurement)) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes binary data into a list of sources. +func (a *Sources) UnmarshalBinary(buf []byte) error { + var pb internal.Measurements + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + *a = make(Sources, len(pb.GetItems())) + for i := range pb.GetItems() { + mm, err := decodeMeasurement(pb.GetItems()[i]) + if err != nil { + return err + } + (*a)[i] = mm + } + return nil +} + +// IsSystemName returns true if name is an internal system name. +func IsSystemName(name string) bool { + switch name { + case "_fieldKeys", + "_measurements", + "_series", + "_tagKeys", + "_tags": + return true + default: + return false + } +} + // SortField represents a field to sort results by. type SortField struct { // Name of the field @@ -317,72 +459,72 @@ // Name of the database to be created. Name string - // IfNotExists indicates whether to return without error if the database - // already exists. - IfNotExists bool - // RetentionPolicyCreate indicates whether the user explicitly wants to create a retention policy RetentionPolicyCreate bool // RetentionPolicyDuration indicates retention duration for the new database - RetentionPolicyDuration time.Duration + RetentionPolicyDuration *time.Duration // RetentionPolicyReplication indicates retention replication for the new database - RetentionPolicyReplication int + RetentionPolicyReplication *int // RetentionPolicyName indicates retention name for the new database RetentionPolicyName string + + // RetentionPolicyShardGroupDuration indicates shard group duration for the new database + RetentionPolicyShardGroupDuration time.Duration } // String returns a string representation of the create database statement. func (s *CreateDatabaseStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE DATABASE ") - if s.IfNotExists { - _, _ = buf.WriteString("IF NOT EXISTS ") - } _, _ = buf.WriteString(QuoteIdent(s.Name)) if s.RetentionPolicyCreate { - _, _ = buf.WriteString(" WITH DURATION ") - _, _ = buf.WriteString(s.RetentionPolicyDuration.String()) - _, _ = buf.WriteString(" REPLICATION ") - _, _ = buf.WriteString(strconv.Itoa(s.RetentionPolicyReplication)) - _, _ = buf.WriteString(" NAME ") - _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName)) + _, _ = buf.WriteString(" WITH") + if s.RetentionPolicyDuration != nil { + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(s.RetentionPolicyDuration.String()) + } + if s.RetentionPolicyReplication != nil { + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(*s.RetentionPolicyReplication)) + } + if s.RetentionPolicyShardGroupDuration > 0 { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(s.RetentionPolicyShardGroupDuration.String()) + } + if s.RetentionPolicyName != "" { + _, _ = buf.WriteString(" NAME ") + _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName)) + } } return buf.String() } // RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement. -func (s *CreateDatabaseStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *CreateDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropDatabaseStatement represents a command to drop a database. type DropDatabaseStatement struct { // Name of the database to be dropped. Name string - - // IfExists indicates whether to return without error if the database - // does not exists. - IfExists bool } // String returns a string representation of the drop database statement. func (s *DropDatabaseStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DROP DATABASE ") - if s.IfExists { - _, _ = buf.WriteString("IF EXISTS ") - } _, _ = buf.WriteString(QuoteIdent(s.Name)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement. -func (s *DropDatabaseStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *DropDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropRetentionPolicyStatement represents a command to drop a retention policy from a database. @@ -405,8 +547,8 @@ } // RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement. -func (s *DropRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}} +func (s *DropRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}, nil } // CreateUserStatement represents a command for creating a new user. @@ -435,8 +577,8 @@ } // RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement. -func (s *CreateUserStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *CreateUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropUserStatement represents a command for dropping a user. @@ -454,8 +596,8 @@ } // RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement. -func (s *DropUserStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *DropUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // Privilege is a type of action a user can be granted the right to use. @@ -515,8 +657,8 @@ } // RequiredPrivileges returns the privilege required to execute a GrantStatement. -func (s *GrantStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *GrantStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // GrantAdminStatement represents a command for granting admin privilege. @@ -534,8 +676,34 @@ } // RequiredPrivileges returns the privilege required to execute a GrantAdminStatement. -func (s *GrantAdminStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *GrantAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// KillQueryStatement represents a command for killing a query. +type KillQueryStatement struct { + // The query to kill. + QueryID uint64 + + // The host to delegate the kill to. + Host string +} + +// String returns a string representation of the kill query statement. +func (s *KillQueryStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("KILL QUERY ") + _, _ = buf.WriteString(strconv.FormatUint(s.QueryID, 10)) + if s.Host != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Host)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a KillQueryStatement. +func (s *KillQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // SetPasswordUserStatement represents a command for changing user password. @@ -558,8 +726,8 @@ } // RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement. -func (s *SetPasswordUserStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *SetPasswordUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // RevokeStatement represents a command to revoke a privilege from a user. @@ -587,8 +755,8 @@ } // RequiredPrivileges returns the privilege required to execute a RevokeStatement. -func (s *RevokeStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *RevokeStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // RevokeAdminStatement represents a command to revoke admin privilege from a user. @@ -606,8 +774,8 @@ } // RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement. -func (s *RevokeAdminStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *RevokeAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // CreateRetentionPolicyStatement represents a command to create a retention policy. @@ -626,6 +794,9 @@ // Should this policy be set as default for the database? Default bool + + // Shard Duration + ShardGroupDuration time.Duration } // String returns a string representation of the create retention policy. @@ -639,6 +810,10 @@ _, _ = buf.WriteString(FormatDuration(s.Duration)) _, _ = buf.WriteString(" REPLICATION ") _, _ = buf.WriteString(strconv.Itoa(s.Replication)) + if s.ShardGroupDuration > 0 { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(FormatDuration(s.ShardGroupDuration)) + } if s.Default { _, _ = buf.WriteString(" DEFAULT") } @@ -646,8 +821,8 @@ } // RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement. -func (s *CreateRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *CreateRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // AlterRetentionPolicyStatement represents a command to alter an existing retention policy. @@ -666,6 +841,9 @@ // Should this policy be set as defalut for the database? Default bool + + // Duration of the Shard + ShardGroupDuration *time.Duration } // String returns a string representation of the alter retention policy statement. @@ -686,6 +864,11 @@ _, _ = buf.WriteString(strconv.Itoa(*s.Replication)) } + if s.ShardGroupDuration != nil { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(FormatDuration(*s.ShardGroupDuration)) + } + if s.Default { _, _ = buf.WriteString(" DEFAULT") } @@ -694,10 +877,11 @@ } // RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement. -func (s *AlterRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *AlterRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } +// FillOption represents different options for aggregate windows. type FillOption int const ( @@ -709,6 +893,8 @@ NumberFill // PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had PreviousFill + // LinearFill means that empty aggregate windows will be filled with whatever a linear value between non null windows + LinearFill ) // SelectStatement represents a command for extracting data from the database. @@ -754,18 +940,15 @@ // The value to fill empty aggregate buckets with, if any FillValue interface{} -} -// SourceNames returns a list of source names. -func (s *SelectStatement) SourceNames() []string { - a := make([]string, 0, len(s.Sources)) - for _, src := range s.Sources { - switch src := src.(type) { - case *Measurement: - a = append(a, src.Name) - } - } - return a + // Renames the implicit time field name. + TimeAlias string + + // Removes the "time" column from the output. + OmitTime bool + + // Removes duplicate rows from raw queries. + Dedupe bool } // HasDerivative returns true if one of the function calls in the statement is a @@ -793,55 +976,28 @@ return false } -// HasSimpleCount return true if one of the function calls is a count function with a -// variable ref as the first arg -func (s *SelectStatement) HasSimpleCount() bool { - // recursively check for a simple count(varref) function - var hasCount func(f *Call) bool - hasCount = func(f *Call) bool { - if f.Name == "count" { - // it's nested if the first argument is an aggregate function - if _, ok := f.Args[0].(*VarRef); ok { - return true - } - } else { - for _, arg := range f.Args { - if child, ok := arg.(*Call); ok { - return hasCount(child) - } - } - } - return false - } - for _, f := range s.FunctionCalls() { - if hasCount(f) { - return true - } - } - return false -} - // TimeAscending returns true if the time field is sorted in chronological order. func (s *SelectStatement) TimeAscending() bool { return len(s.SortFields) == 0 || s.SortFields[0].Ascending } +// TimeFieldName returns the name of the time field. +func (s *SelectStatement) TimeFieldName() string { + if s.TimeAlias != "" { + return s.TimeAlias + } + return "time" +} + // Clone returns a deep copy of the statement. func (s *SelectStatement) Clone() *SelectStatement { - clone := &SelectStatement{ - Fields: make(Fields, 0, len(s.Fields)), - Dimensions: make(Dimensions, 0, len(s.Dimensions)), - Sources: cloneSources(s.Sources), - SortFields: make(SortFields, 0, len(s.SortFields)), - Condition: CloneExpr(s.Condition), - Limit: s.Limit, - Offset: s.Offset, - SLimit: s.SLimit, - SOffset: s.SOffset, - Fill: s.Fill, - FillValue: s.FillValue, - IsRawQuery: s.IsRawQuery, - } + clone := *s + clone.Fields = make(Fields, 0, len(s.Fields)) + clone.Dimensions = make(Dimensions, 0, len(s.Dimensions)) + clone.Sources = cloneSources(s.Sources) + clone.SortFields = make(SortFields, 0, len(s.SortFields)) + clone.Condition = CloneExpr(s.Condition) + if s.Target != nil { clone.Target = &Target{ Measurement: &Measurement{ @@ -861,7 +1017,7 @@ for _, f := range s.SortFields { clone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending}) } - return clone + return &clone } func cloneSources(sources Sources) Sources { @@ -889,55 +1045,313 @@ } } -// RewriteWildcards returns the re-written form of the select statement. Any wildcard query +// RewriteFields returns the re-written form of the select statement. Any wildcard query // fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced -// with the supplied dimensions. -func (s *SelectStatement) RewriteWildcards(fields Fields, dimensions Dimensions) *SelectStatement { +// with the supplied dimensions. Any fields with no type specifier are rewritten with the +// appropriate type. +func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, error) { + // Retrieve a list of unique field and dimensions. + fieldSet, dimensionSet, err := ic.FieldDimensions(s.Sources) + if err != nil { + return s, err + } + + // Rewrite all variable references in the fields with their types if one + // hasn't been specified. + rewrite := func(n Node) { + ref, ok := n.(*VarRef) + if !ok || (ref.Type != Unknown && ref.Type != AnyField) { + return + } + + if typ, ok := fieldSet[ref.Val]; ok { + ref.Type = typ + } else if ref.Type != AnyField { + if _, ok := dimensionSet[ref.Val]; ok { + ref.Type = Tag + } + } + } + WalkFunc(s.Fields, rewrite) + WalkFunc(s.Condition, rewrite) + + // Ignore if there are no wildcards. + hasFieldWildcard := s.HasFieldWildcard() + hasDimensionWildcard := s.HasDimensionWildcard() + if !hasFieldWildcard && !hasDimensionWildcard { + return s, nil + } + + // If there are no dimension wildcards then merge dimensions to fields. + if !hasDimensionWildcard { + // Remove the dimensions present in the group by so they don't get added as fields. + for _, d := range s.Dimensions { + switch expr := d.Expr.(type) { + case *VarRef: + if _, ok := dimensionSet[expr.Val]; ok { + delete(dimensionSet, expr.Val) + } + } + } + } + + // Sort the field and dimension names for wildcard expansion. + var fields []VarRef + if len(fieldSet) > 0 { + fields = make([]VarRef, 0, len(fieldSet)) + for name, typ := range fieldSet { + fields = append(fields, VarRef{Val: name, Type: typ}) + } + if !hasDimensionWildcard { + for name := range dimensionSet { + fields = append(fields, VarRef{Val: name, Type: Tag}) + } + dimensionSet = nil + } + sort.Sort(VarRefs(fields)) + } + dimensions := stringSetSlice(dimensionSet) + other := s.Clone() - selectWildcard, groupWildcard := false, false // Rewrite all wildcard query fields - rwFields := make(Fields, 0, len(s.Fields)) - for _, f := range s.Fields { - switch f.Expr.(type) { - case *Wildcard: - // Sort wildcard fields for consistent output - sort.Sort(fields) - rwFields = append(rwFields, fields...) - selectWildcard = true - default: - rwFields = append(rwFields, f) + if hasFieldWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwFields := make(Fields, 0, len(s.Fields)+len(fields)-1) + for _, f := range s.Fields { + switch expr := f.Expr.(type) { + case *Wildcard: + for _, ref := range fields { + if expr.Type == FIELD && ref.Type == Tag { + continue + } else if expr.Type == TAG && ref.Type != Tag { + continue + } + rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) + } + case *RegexLiteral: + for _, ref := range fields { + if expr.Val.MatchString(ref.Val) { + rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) + } + } + case *Call: + // Clone a template that we can modify and use for new fields. + template := CloneExpr(expr).(*Call) + + // Search for the call with a wildcard by continuously descending until + // we no longer have a call. + call := template + for len(call.Args) > 0 { + arg, ok := call.Args[0].(*Call) + if !ok { + break + } + call = arg + } + + // Check if this field value is a wildcard. + if len(call.Args) == 0 { + rwFields = append(rwFields, f) + continue + } + + // Retrieve if this is a wildcard or a regular expression. + var re *regexp.Regexp + switch expr := call.Args[0].(type) { + case *Wildcard: + if expr.Type == TAG { + return s, fmt.Errorf("unable to use tag wildcard in %s()", call.Name) + } + case *RegexLiteral: + re = expr.Val + default: + rwFields = append(rwFields, f) + continue + } + + // All types that can expand wildcards support float and integer. + supportedTypes := map[DataType]struct{}{ + Float: struct{}{}, + Integer: struct{}{}, + } + + // Add additional types for certain functions. + switch call.Name { + case "count", "first", "last", "distinct", "elapsed", "mode": + supportedTypes[String] = struct{}{} + fallthrough + case "min", "max": + supportedTypes[Boolean] = struct{}{} + } + + for _, ref := range fields { + // Do not expand tags within a function call. It likely won't do anything + // anyway and will be the wrong thing in 99% of cases. + if ref.Type == Tag { + continue + } else if _, ok := supportedTypes[ref.Type]; !ok { + continue + } else if re != nil && !re.MatchString(ref.Val) { + continue + } + + // Make a new expression and replace the wildcard within this cloned expression. + call.Args[0] = &VarRef{Val: ref.Val, Type: ref.Type} + rwFields = append(rwFields, &Field{ + Expr: CloneExpr(template), + Alias: fmt.Sprintf("%s_%s", f.Name(), ref.Val), + }) + } + default: + rwFields = append(rwFields, f) + } } + other.Fields = rwFields } - other.Fields = rwFields // Rewrite all wildcard GROUP BY fields - rwDimensions := make(Dimensions, 0, len(s.Dimensions)) - for _, d := range s.Dimensions { - switch d.Expr.(type) { - case *Wildcard: - rwDimensions = append(rwDimensions, dimensions...) - groupWildcard = true - default: - rwDimensions = append(rwDimensions, d) + if hasDimensionWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwDimensions := make(Dimensions, 0, len(s.Dimensions)+len(dimensions)-1) + for _, d := range s.Dimensions { + switch expr := d.Expr.(type) { + case *Wildcard: + for _, name := range dimensions { + rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) + } + case *RegexLiteral: + for _, name := range dimensions { + if expr.Val.MatchString(name) { + rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) + } + } + default: + rwDimensions = append(rwDimensions, d) + } + } + other.Dimensions = rwDimensions + } + + return other, nil +} + +// RewriteRegexExprs rewrites regex conditions to make better use of the +// database index. +// +// Conditions that can currently be simplified are: +// +// - host =~ /^foo$/ becomes host = 'foo' +// - host !~ /^foo$/ becomes host != 'foo' +// +// Note: if the regex contains groups, character classes, repetition or +// similar, it's likely it won't be rewritten. In order to support rewriting +// regexes with these characters would be a lot more work. +func (s *SelectStatement) RewriteRegexConditions() { + s.Condition = RewriteExpr(s.Condition, func(e Expr) Expr { + be, ok := e.(*BinaryExpr) + if !ok || (be.Op != EQREGEX && be.Op != NEQREGEX) { + // This expression is not a binary condition or doesn't have a + // regex based operator. + return e + } + + // Handle regex-based condition. + rhs := be.RHS.(*RegexLiteral) // This must be a regex. + + val, ok := matchExactRegex(rhs.Val.String()) + if !ok { + // Regex didn't match. + return e } + + // Remove leading and trailing ^ and $. + be.RHS = &StringLiteral{Val: val} + + // Update the condition operator. + if be.Op == EQREGEX { + be.Op = EQ + } else { + be.Op = NEQ + } + return be + }) +} + +// matchExactRegex matches regexes that have the following form: /^foo$/. It +// considers /^$/ to be a matching regex. +func matchExactRegex(v string) (string, bool) { + re, err := syntax.Parse(v, syntax.Perl) + if err != nil { + // Nothing we can do or log. + return "", false } - if selectWildcard && !groupWildcard { - rwDimensions = append(rwDimensions, dimensions...) + if re.Op != syntax.OpConcat { + return "", false } - other.Dimensions = rwDimensions - return other + if len(re.Sub) < 2 || len(re.Sub) > 3 { + // Regex has too few or too many subexpressions. + return "", false + } + + start := re.Sub[0] + if !(start.Op == syntax.OpBeginLine || start.Op == syntax.OpBeginText) { + // Regex does not begin with ^ + return "", false + } + + end := re.Sub[len(re.Sub)-1] + if !(end.Op == syntax.OpEndLine || end.Op == syntax.OpEndText) { + // Regex does not end with $ + return "", false + } + + if len(re.Sub) == 3 { + middle := re.Sub[1] + if middle.Op != syntax.OpLiteral { + // Regex does not contain a literal op. + return "", false + } + + // We can rewrite this regex. + return string(middle.Rune), true + } + + // The regex /^$/ + return "", true } // RewriteDistinct rewrites the expression to be a call for map/reduce to work correctly // This method assumes all validation has passed func (s *SelectStatement) RewriteDistinct() { - for i, f := range s.Fields { - if d, ok := f.Expr.(*Distinct); ok { - s.Fields[i].Expr = d.NewCall() - s.IsRawQuery = false + WalkFunc(s.Fields, func(n Node) { + switch n := n.(type) { + case *Field: + if expr, ok := n.Expr.(*Distinct); ok { + n.Expr = expr.NewCall() + s.IsRawQuery = false + } + case *Call: + for i, arg := range n.Args { + if arg, ok := arg.(*Distinct); ok { + n.Args[i] = arg.NewCall() + } + } + } + }) +} + +// RewriteTimeFields removes any "time" field references. +func (s *SelectStatement) RewriteTimeFields() { + for i := 0; i < len(s.Fields); i++ { + switch expr := s.Fields[i].Expr.(type) { + case *VarRef: + if expr.Val == "time" { + s.TimeAlias = s.Fields[i].Alias + s.Fields = append(s.Fields[:i], s.Fields[i+1:]...) + } } } } @@ -945,31 +1359,70 @@ // ColumnNames will walk all fields and functions and return the appropriate field names for the select statement // while maintaining order of the field names func (s *SelectStatement) ColumnNames() []string { - // Always set the first column to be time, even if they didn't specify it - columnNames := []string{"time"} - - // First walk each field + // First walk each field to determine the number of columns. + columnFields := Fields{} for _, field := range s.Fields { + columnFields = append(columnFields, field) + switch f := field.Expr.(type) { case *Call: if f.Name == "top" || f.Name == "bottom" { - if len(f.Args) == 2 { - columnNames = append(columnNames, f.Name) - continue + for _, arg := range f.Args[1:] { + ref, ok := arg.(*VarRef) + if ok { + columnFields = append(columnFields, &Field{Expr: ref}) + } } - // We have a special case now where we have to add the column names for the fields TOP or BOTTOM asked for as well - columnNames = slices.Union(columnNames, f.Fields(), true) - continue - } - columnNames = append(columnNames, field.Name()) - default: - // time is always first, and we already added it, so ignore it if they asked for it anywhere else. - if field.Name() != "time" { - columnNames = append(columnNames, field.Name()) } } } + // Determine if we should add an extra column for an implicit time. + offset := 0 + if !s.OmitTime { + offset++ + } + + columnNames := make([]string, len(columnFields)+offset) + if !s.OmitTime { + // Add the implicit time if requested. + columnNames[0] = s.TimeFieldName() + } + + // Keep track of the encountered column names. + names := make(map[string]int) + + // Resolve aliases first. + for i, col := range columnFields { + if col.Alias != "" { + columnNames[i+offset] = col.Alias + names[col.Alias] = 1 + } + } + + // Resolve any generated names and resolve conflicts. + for i, col := range columnFields { + if columnNames[i+offset] != "" { + continue + } + + name := col.Name() + count, conflict := names[name] + if conflict { + for { + resolvedName := fmt.Sprintf("%s_%d", name, count) + _, conflict = names[resolvedName] + if !conflict { + names[name] = count + 1 + name = resolvedName + break + } + count++ + } + } + names[name]++ + columnNames[i+offset] = name + } return columnNames } @@ -1011,6 +1464,8 @@ _, _ = buf.WriteString(" fill(none)") case NumberFill: _, _ = buf.WriteString(fmt.Sprintf(" fill(%v)", s.FillValue)) + case LinearFill: + _, _ = buf.WriteString(" fill(linear)") case PreviousFill: _, _ = buf.WriteString(" fill(previous)") } @@ -1035,14 +1490,29 @@ } // RequiredPrivileges returns the privilege required to execute the SelectStatement. -func (s *SelectStatement) RequiredPrivileges() ExecutionPrivileges { - ep := ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +// NOTE: Statement should be normalized first (database name(s) in Sources and +// Target should be populated). If the statement has not been normalized, an +// empty string will be returned for the database name and it is up to the caller +// to interpret that as the default database. +func (s *SelectStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + ep := ExecutionPrivileges{} + for _, source := range s.Sources { + measurement, ok := source.(*Measurement) + if !ok { + return nil, fmt.Errorf("invalid measurement: %s", source) + } + + ep = append(ep, ExecutionPrivilege{ + Name: measurement.Database, + Privilege: ReadPrivilege, + }) + } if s.Target != nil { p := ExecutionPrivilege{Admin: false, Name: s.Target.Measurement.Database, Privilege: WritePrivilege} ep = append(ep, p) } - return ep + return ep, nil } // HasWildcard returns whether or not the select statement has at least 1 wildcard @@ -1051,23 +1521,25 @@ } // HasFieldWildcard returns whether or not the select statement has at least 1 wildcard in the fields -func (s *SelectStatement) HasFieldWildcard() bool { - for _, f := range s.Fields { - _, ok := f.Expr.(*Wildcard) - if ok { - return true +func (s *SelectStatement) HasFieldWildcard() (hasWildcard bool) { + WalkFunc(s.Fields, func(n Node) { + if hasWildcard { + return + } + switch n.(type) { + case *Wildcard, *RegexLiteral: + hasWildcard = true } - } - - return false + }) + return hasWildcard } // HasDimensionWildcard returns whether or not the select statement has // at least 1 wildcard in the dimensions aka `GROUP BY` func (s *SelectStatement) HasDimensionWildcard() bool { for _, d := range s.Dimensions { - _, ok := d.Expr.(*Wildcard) - if ok { + switch d.Expr.(type) { + case *Wildcard, *RegexLiteral: return true } } @@ -1088,18 +1560,10 @@ return err } - if err := s.validateCountDistinct(); err != nil { - return err - } - if err := s.validateAggregates(tr); err != nil { return err } - if err := s.validateDerivative(); err != nil { - return err - } - return nil } @@ -1108,32 +1572,56 @@ if len(ns) == 1 && ns[0] == "time" { return fmt.Errorf("at least 1 non-time field must be queried") } - return nil -} -func (s *SelectStatement) validateDimensions() error { + for _, f := range s.Fields { + switch expr := f.Expr.(type) { + case *BinaryExpr: + if err := expr.validate(); err != nil { + return err + } + } + } + return nil +} + +func (s *SelectStatement) validateDimensions() error { var dur time.Duration for _, dim := range s.Dimensions { switch expr := dim.Expr.(type) { case *Call: - // Ensure the call is time() and it only has one duration argument. + // Ensure the call is time() and it has one or two duration arguments. // If we already have a duration if expr.Name != "time" { return errors.New("only time() calls allowed in dimensions") - } else if len(expr.Args) != 1 { - return errors.New("time dimension expected one argument") + } else if got := len(expr.Args); got < 1 || got > 2 { + return errors.New("time dimension expected 1 or 2 arguments") } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { - return errors.New("time dimension must have one duration argument") + return errors.New("time dimension must have duration argument") } else if dur != 0 { return errors.New("multiple time dimensions not allowed") } else { dur = lit.Val + if len(expr.Args) == 2 { + switch lit := expr.Args[1].(type) { + case *DurationLiteral: + // noop + case *Call: + if lit.Name != "now" { + return errors.New("time dimension offset function must be now()") + } else if len(lit.Args) != 0 { + return errors.New("time dimension offset now() function requires no arguments") + } + default: + return errors.New("time dimension offset must be duration or now()") + } + } } case *VarRef: if strings.ToLower(expr.Val) == "time" { return errors.New("time() is a function and expects at least one argument") } case *Wildcard: + case *RegexLiteral: default: return errors.New("only time and tag dimensions allowed") } @@ -1158,12 +1646,12 @@ numAggregates++ } } - // For TOP, BOTTOM, MAX, MIN, FIRST, LAST (selector functions) it is ok to ask for fields and tags + // For TOP, BOTTOM, MAX, MIN, FIRST, LAST, PERCENTILE (selector functions) it is ok to ask for fields and tags // but only if one function is specified. Combining multiple functions and fields and tags is not currently supported onlySelectors := true for k := range calls { switch k { - case "top", "bottom", "max", "min", "first", "last": + case "top", "bottom", "max", "min", "first", "last", "percentile", "sample": default: onlySelectors = false break @@ -1192,7 +1680,7 @@ return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got) } if len(expr.Args) > 1 { - callLimit, ok := expr.Args[len(expr.Args)-1].(*NumberLiteral) + callLimit, ok := expr.Args[len(expr.Args)-1].(*IntegerLiteral) if !ok { return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1]) } @@ -1218,34 +1706,94 @@ if exp, got := 2, len(expr.Args); got != exp { return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) } - _, ok := expr.Args[1].(*NumberLiteral) - if !ok { + + switch expr.Args[0].(type) { + case *VarRef, *RegexLiteral, *Wildcard: + // do nothing + default: + return fmt.Errorf("expected field argument in percentile()") + } + + switch expr.Args[1].(type) { + case *IntegerLiteral, *NumberLiteral: + return nil + default: return fmt.Errorf("expected float argument in percentile()") } - return nil +} + +// validPercentileAggr determines if PERCENTILE have valid arguments. +func (s *SelectStatement) validSampleAggr(expr *Call) error { + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + + switch expr.Args[0].(type) { + case *VarRef, *RegexLiteral, *Wildcard: + // do nothing + default: + return fmt.Errorf("expected field argument in sample()") + } + + switch expr.Args[1].(type) { + case *IntegerLiteral: + return nil + default: + return fmt.Errorf("expected integer argument in sample()") + } } func (s *SelectStatement) validateAggregates(tr targetRequirement) error { for _, f := range s.Fields { for _, expr := range walkFunctionCalls(f.Expr) { switch expr.Name { - case "derivative", "non_negative_derivative": + case "derivative", "non_negative_derivative", "difference", "moving_average", "cumulative_sum", "elapsed": if err := s.validSelectWithAggregate(); err != nil { return err } - if min, max, got := 1, 2, len(expr.Args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + switch expr.Name { + case "derivative", "non_negative_derivative", "elapsed": + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // If a duration arg is passed, make sure it's a duration + if len(expr.Args) == 2 { + // Second must be a duration .e.g (1h) + if _, ok := expr.Args[1].(*DurationLiteral); !ok { + return fmt.Errorf("second argument to %s must be a duration, got %T", expr.Name, expr.Args[1]) + } + } + case "difference", "cumulative_sum": + if got := len(expr.Args); got != 1 { + return fmt.Errorf("invalid number of arguments for %s, expected 1, got %d", expr.Name, got) + } + case "moving_average": + if got := len(expr.Args); got != 2 { + return fmt.Errorf("invalid number of arguments for moving_average, expected 2, got %d", got) + } + + if lit, ok := expr.Args[1].(*IntegerLiteral); !ok { + return fmt.Errorf("second argument for moving_average must be an integer, got %T", expr.Args[1]) + } else if lit.Val <= 1 { + return fmt.Errorf("moving_average window must be greater than 1, got %d", lit.Val) + } else if int64(int(lit.Val)) != lit.Val { + return fmt.Errorf("moving_average window too large, got %d", lit.Val) + } } // Validate that if they have grouping by time, they need a sub-call like min/max, etc. groupByInterval, err := s.GroupByInterval() if err != nil { return fmt.Errorf("invalid group interval: %v", err) } - if groupByInterval > 0 { - c, ok := expr.Args[0].(*Call) - if !ok { - return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) - } + + if c, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", expr.Name) + } else if !ok && groupByInterval > 0 { + return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) + } else if ok { switch c.Name { case "top", "bottom": if err := s.validTopBottomAggr(c); err != nil { @@ -1259,6 +1807,25 @@ if exp, got := 1, len(c.Args); got != exp { return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) } + + switch fc := c.Args[0].(type) { + case *VarRef, *Wildcard, *RegexLiteral: + // do nothing + case *Call: + if fc.Name != "distinct" || expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", c.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct %s) can only have %d argument(s), got %d", fc.Name, exp, got) + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", c.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", c.Name) + } } } case "top", "bottom": @@ -1269,19 +1836,56 @@ if err := s.validPercentileAggr(expr); err != nil { return err } + case "sample": + if err := s.validSampleAggr(expr); err != nil { + return err + } + case "holt_winters", "holt_winters_with_fit": + if exp, got := 3, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + // Validate that if they have grouping by time, they need a sub-call like min/max, etc. + groupByInterval, err := s.GroupByInterval() + if err != nil { + return fmt.Errorf("invalid group interval: %v", err) + } + + if _, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", expr.Name) + } else if !ok { + return fmt.Errorf("must use aggregate function with %s", expr.Name) + } + if arg, ok := expr.Args[1].(*IntegerLiteral); !ok { + return fmt.Errorf("expected integer argument as second arg in %s", expr.Name) + } else if arg.Val <= 0 { + return fmt.Errorf("second arg to %s must be greater than 0, got %d", expr.Name, arg.Val) + } + if _, ok := expr.Args[2].(*IntegerLiteral); !ok { + return fmt.Errorf("expected integer argument as third arg in %s", expr.Name) + } default: if err := s.validSelectWithAggregate(); err != nil { return err } if exp, got := 1, len(expr.Args); got != exp { + // Special error message if distinct was used as the argument. + if expr.Name == "count" && got >= 1 { + if _, ok := expr.Args[0].(*Distinct); ok { + return fmt.Errorf("count(distinct ) can only have one argument") + } + } return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) } switch fc := expr.Args[0].(type) { - case *VarRef: + case *VarRef, *Wildcard, *RegexLiteral: // do nothing case *Call: - if fc.Name != "distinct" { + if fc.Name != "distinct" || expr.Name != "count" { return fmt.Errorf("expected field argument in %s()", expr.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct ) can only have one argument") + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") } case *Distinct: if expr.Name != "count" { @@ -1313,6 +1917,7 @@ return nil } +// HasDistinct checks if a select statement contains DISTINCT func (s *SelectStatement) HasDistinct() bool { // determine if we have a call named distinct for _, f := range s.Fields { @@ -1350,103 +1955,7 @@ return nil } -func (s *SelectStatement) HasCountDistinct() bool { - for _, f := range s.Fields { - if c, ok := f.Expr.(*Call); ok { - if c.Name == "count" { - for _, a := range c.Args { - if _, ok := a.(*Distinct); ok { - return true - } - if c, ok := a.(*Call); ok { - if c.Name == "distinct" { - return true - } - } - } - } - } - } - return false -} - -func (s *SelectStatement) validateCountDistinct() error { - if !s.HasCountDistinct() { - return nil - } - - valid := func(e Expr) bool { - c, ok := e.(*Call) - if !ok { - return true - } - if c.Name != "count" { - return true - } - for _, a := range c.Args { - if _, ok := a.(*Distinct); ok { - return len(c.Args) == 1 - } - if d, ok := a.(*Call); ok { - if d.Name == "distinct" { - return len(d.Args) == 1 - } - } - } - return true - } - - for _, f := range s.Fields { - if !valid(f.Expr) { - return fmt.Errorf("count(distinct ) can only have one argument") - } - } - - return nil -} - -func (s *SelectStatement) validateDerivative() error { - if !s.HasDerivative() { - return nil - } - - // If a derivative is requested, it must be the only field in the query. We don't support - // multiple fields in combination w/ derivaties yet. - if len(s.Fields) != 1 { - return fmt.Errorf("derivative cannot be used with other fields") - } - - aggr := s.FunctionCalls() - if len(aggr) != 1 { - return fmt.Errorf("derivative cannot be used with other fields") - } - - // Derivative requires two arguments - derivativeCall := aggr[0] - if len(derivativeCall.Args) == 0 { - return fmt.Errorf("derivative requires a field argument") - } - - // First arg must be a field or aggr over a field e.g. (mean(field)) - _, callOk := derivativeCall.Args[0].(*Call) - _, varOk := derivativeCall.Args[0].(*VarRef) - - if !(callOk || varOk) { - return fmt.Errorf("derivative requires a field argument") - } - - // If a duration arg is pased, make sure it's a duration - if len(derivativeCall.Args) == 2 { - // Second must be a duration .e.g (1h) - if _, ok := derivativeCall.Args[1].(*DurationLiteral); !ok { - return fmt.Errorf("derivative requires a duration argument") - } - } - - return nil -} - -// GroupByIterval extracts the time interval, if specified. +// GroupByInterval extracts the time interval, if specified. func (s *SelectStatement) GroupByInterval() (time.Duration, error) { // return if we've already pulled it out if s.groupByInterval != 0 { @@ -1461,14 +1970,14 @@ for _, d := range s.Dimensions { if call, ok := d.Expr.(*Call); ok && call.Name == "time" { // Make sure there is exactly one argument. - if len(call.Args) != 1 { - return 0, errors.New("time dimension expected one argument") + if got := len(call.Args); got < 1 || got > 2 { + return 0, errors.New("time dimension expected 1 or 2 arguments") } // Ensure the argument is a duration. lit, ok := call.Args[0].(*DurationLiteral) if !ok { - return 0, errors.New("time dimension must have one duration argument") + return 0, errors.New("time dimension must have duration argument") } s.groupByInterval = lit.Val return lit.Val, nil @@ -1477,6 +1986,36 @@ return 0, nil } +// GroupByOffset extracts the time interval offset, if specified. +func (s *SelectStatement) GroupByOffset() (time.Duration, error) { + interval, err := s.GroupByInterval() + if err != nil { + return 0, err + } + + // Ignore if there are no dimensions. + if len(s.Dimensions) == 0 { + return 0, nil + } + + for _, d := range s.Dimensions { + if call, ok := d.Expr.(*Call); ok && call.Name == "time" { + if len(call.Args) == 2 { + switch expr := call.Args[1].(type) { + case *DurationLiteral: + return expr.Val % interval, nil + case *TimeLiteral: + return expr.Val.Sub(expr.Val.Truncate(interval)), nil + default: + return 0, fmt.Errorf("invalid time dimension offset: %s", expr) + } + } + return 0, nil + } + } + return 0, nil +} + // SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive. // This is used commonly for continuous queries so the start and end are in buckets. func (s *SelectStatement) SetTimeRange(start, end time.Time) error { @@ -1516,71 +2055,6 @@ return n.String() } -/* - -BinaryExpr - -SELECT mean(xxx.value) + avg(yyy.value) FROM xxx JOIN yyy WHERE xxx.host = 123 - -from xxx where host = 123 -select avg(value) from yyy where host = 123 - -SELECT xxx.value FROM xxx WHERE xxx.host = 123 -SELECT yyy.value FROM yyy - ---- - -SELECT MEAN(xxx.value) + MEAN(cpu.load.value) -FROM xxx JOIN yyy -GROUP BY host -WHERE (xxx.region == "uswest" OR yyy.region == "uswest") AND xxx.otherfield == "XXX" - -select * from ( - select mean + mean from xxx join yyy - group by time(5m), host -) (xxx.region == "uswest" OR yyy.region == "uswest") AND xxx.otherfield == "XXX" - -(seriesIDS for xxx.region = 'uswest' union seriesIDs for yyy.regnion = 'uswest') | seriesIDS xxx.otherfield = 'XXX' - -WHERE xxx.region == "uswest" AND xxx.otherfield == "XXX" -WHERE yyy.region == "uswest" - - -*/ - -// Substatement returns a single-series statement for a given variable reference. -func (s *SelectStatement) Substatement(ref *VarRef) (*SelectStatement, error) { - // Copy dimensions and properties to new statement. - other := &SelectStatement{ - Fields: Fields{{Expr: ref}}, - Dimensions: s.Dimensions, - Limit: s.Limit, - Offset: s.Offset, - SortFields: s.SortFields, - } - - // If there is only one series source then return it with the whole condition. - if len(s.Sources) == 1 { - other.Sources = s.Sources - other.Condition = s.Condition - return other, nil - } - - // Find the matching source. - name := MatchSource(s.Sources, ref.Val) - if name == "" { - return nil, fmt.Errorf("field source not found: %s", ref.Val) - } - other.Sources = append(other.Sources, &Measurement{Name: name}) - - // Filter out conditions. - if s.Condition != nil { - other.Condition = filterExprBySource(name, s.Condition) - } - - return other, nil -} - // NamesInWhere returns the field and tag names (idents) referenced in the where clause func (s *SelectStatement) NamesInWhere() []string { var a []string @@ -1613,22 +2087,22 @@ } // LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. -func (s *SelectStatement) LimitTagSets(a []*TagSet) []*TagSet { +func LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet { // Ignore if no limit or offset is specified. - if s.SLimit == 0 && s.SOffset == 0 { + if slimit == 0 && soffset == 0 { return a } // If offset is beyond the number of tag sets then return nil. - if s.SOffset > len(a) { + if soffset > len(a) { return nil } // Clamp limit to the max number of tag sets. - if s.SOffset+s.SLimit > len(a) { - s.SLimit = len(a) - s.SOffset + if soffset+slimit > len(a) { + slimit = len(a) - soffset } - return a[s.SOffset : s.SOffset+s.SLimit] + return a[soffset : soffset+slimit] } // walkNames will walk the Expr and return the database fields @@ -1637,15 +2111,13 @@ case *VarRef: return []string{expr.Val} case *Call: - if len(expr.Args) == 0 { - return nil - } - lit, ok := expr.Args[0].(*VarRef) - if !ok { - return nil + var a []string + for _, expr := range expr.Args { + if ref, ok := expr.(*VarRef); ok { + a = append(a, ref.Val) + } } - - return []string{lit.Val} + return a case *BinaryExpr: var ret []string ret = append(ret, walkNames(expr.LHS)...) @@ -1658,6 +2130,52 @@ return nil } +// walkRefs will walk the Expr and return the database fields +func walkRefs(exp Expr) []VarRef { + switch expr := exp.(type) { + case *VarRef: + return []VarRef{*expr} + case *Call: + a := make([]VarRef, 0, len(expr.Args)) + for _, expr := range expr.Args { + if ref, ok := expr.(*VarRef); ok { + a = append(a, *ref) + } + } + return a + case *BinaryExpr: + lhs := walkRefs(expr.LHS) + rhs := walkRefs(expr.RHS) + ret := make([]VarRef, 0, len(lhs)+len(rhs)) + ret = append(ret, lhs...) + ret = append(ret, rhs...) + return ret + case *ParenExpr: + return walkRefs(expr.Expr) + } + + return nil +} + +// ExprNames returns a list of non-"time" field names from an expression. +func ExprNames(expr Expr) []VarRef { + m := make(map[VarRef]struct{}) + for _, ref := range walkRefs(expr) { + if ref.Val == "time" { + continue + } + m[ref] = struct{}{} + } + + a := make([]VarRef, 0, len(m)) + for k := range m { + a = append(a, k) + } + sort.Sort(VarRefs(a)) + + return a +} + // FunctionCalls returns the Call objects from the query func (s *SelectStatement) FunctionCalls() []*Call { var a []*Call @@ -1792,12 +2310,16 @@ } // RequiredPrivileges returns the privilege required to execute a DeleteStatement. -func (s *DeleteStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +func (s *DeleteStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil } // ShowSeriesStatement represents a command for listing series in the database. type ShowSeriesStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + // Measurement(s) the series are listed for. Sources Sources @@ -1820,6 +2342,10 @@ var buf bytes.Buffer _, _ = buf.WriteString("SHOW SERIES") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) @@ -1845,8 +2371,8 @@ } // RequiredPrivileges returns the privilege required to execute a ShowSeriesStatement. -func (s *ShowSeriesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // DropSeriesStatement represents a command for removing a series from the database. @@ -1876,35 +2402,60 @@ } // RequiredPrivileges returns the privilege required to execute a DropSeriesStatement. -func (s DropSeriesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +func (s DropSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} + +// DeleteSeriesStatement represents a command for deleting all or part of a series from a database. +type DeleteSeriesStatement struct { + // Data source that fields are extracted from (optional) + Sources Sources + + // An expression evaluated on data point (optional) + Condition Expr +} + +// String returns a string representation of the delete series statement. +func (s *DeleteSeriesStatement) String() string { + var buf bytes.Buffer + buf.WriteString("DELETE") + + if s.Sources != nil { + buf.WriteString(" FROM ") + buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + buf.WriteString(" WHERE ") + buf.WriteString(s.Condition.String()) + } + + return buf.String() } -// DropServerStatement represents a command for removing a server from the cluster. -type DropServerStatement struct { - // ID of the node to be dropped. - NodeID uint64 +// RequiredPrivileges returns the privilege required to execute a DeleteSeriesStatement. +func (s DeleteSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} - // Meta indicates if the server being dropped is a meta or data node - Meta bool +// DropShardStatement represents a command for removing a shard from +// the node. +type DropShardStatement struct { + // ID of the shard to be dropped. + ID uint64 } // String returns a string representation of the drop series statement. -func (s *DropServerStatement) String() string { +func (s *DropShardStatement) String() string { var buf bytes.Buffer - _, _ = buf.WriteString("DROP ") - if s.Meta { - _, _ = buf.WriteString(" META SERVER ") - } else { - _, _ = buf.WriteString(" DATA SERVER ") - } - _, _ = buf.WriteString(strconv.FormatUint(s.NodeID, 10)) + buf.WriteString("DROP SHARD ") + buf.WriteString(strconv.FormatUint(s.ID, 10)) return buf.String() } -// RequiredPrivileges returns the privilege required to execute a DropServerStatement. -func (s *DropServerStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}} +// RequiredPrivileges returns the privilege required to execute a +// DropShardStatement. +func (s *DropShardStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowContinuousQueriesStatement represents a command for listing continuous queries. @@ -1914,8 +2465,8 @@ func (s *ShowContinuousQueriesStatement) String() string { return "SHOW CONTINUOUS QUERIES" } // RequiredPrivileges returns the privilege required to execute a ShowContinuousQueriesStatement. -func (s *ShowContinuousQueriesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowContinuousQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowGrantsForUserStatement represents a command for listing user privileges. @@ -1934,19 +2485,8 @@ } // RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement -func (s *ShowGrantsForUserStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} -} - -// ShowServersStatement represents a command for listing all servers. -type ShowServersStatement struct{} - -// String returns a string representation of the show servers command. -func (s *ShowServersStatement) String() string { return "SHOW SERVERS" } - -// RequiredPrivileges returns the privilege required to execute a ShowServersStatement -func (s *ShowServersStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowGrantsForUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowDatabasesStatement represents a command for listing all databases in the cluster. @@ -1956,8 +2496,8 @@ func (s *ShowDatabasesStatement) String() string { return "SHOW DATABASES" } // RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement -func (s *ShowDatabasesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowDatabasesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // CreateContinuousQueryStatement represents a command for creating a continuous query. @@ -2002,7 +2542,7 @@ } // RequiredPrivileges returns the privilege required to execute a CreateContinuousQueryStatement. -func (s *CreateContinuousQueryStatement) RequiredPrivileges() ExecutionPrivileges { +func (s *CreateContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { ep := ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}} // Selecting into a database that's different from the source? @@ -2019,7 +2559,7 @@ ep = append(ep, p) } - return ep + return ep, nil } func (s *CreateContinuousQueryStatement) validate() error { @@ -2051,12 +2591,15 @@ } // RequiredPrivileges returns the privilege(s) required to execute a DropContinuousQueryStatement -func (s *DropContinuousQueryStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +func (s *DropContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil } // ShowMeasurementsStatement represents a command for listing measurements. type ShowMeasurementsStatement struct { + // Database to query. If blank, use the default database. + Database string + // Measurement name or regex. Source Source @@ -2079,6 +2622,10 @@ var buf bytes.Buffer _, _ = buf.WriteString("SHOW MEASUREMENTS") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(s.Database) + } if s.Source != nil { _, _ = buf.WriteString(" WITH MEASUREMENT ") if m, ok := s.Source.(*Measurement); ok && m.Regex != nil { @@ -2108,8 +2655,8 @@ } // RequiredPrivileges returns the privilege(s) required to execute a ShowMeasurementsStatement -func (s *ShowMeasurementsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowMeasurementsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // DropMeasurementStatement represents a command to drop a measurement. @@ -2127,8 +2674,21 @@ } // RequiredPrivileges returns the privilege(s) required to execute a DropMeasurementStatement -func (s *DropMeasurementStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *DropMeasurementStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowQueriesStatement represents a command for listing all running queries. +type ShowQueriesStatement struct{} + +// String returns a string representation of the show queries statement. +func (s *ShowQueriesStatement) String() string { + return "SHOW QUERIES" +} + +// RequiredPrivileges returns the privilege required to execute a ShowQueriesStatement. +func (s *ShowQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowRetentionPoliciesStatement represents a command for listing retention policies. @@ -2140,17 +2700,20 @@ // String returns a string representation of a ShowRetentionPoliciesStatement. func (s *ShowRetentionPoliciesStatement) String() string { var buf bytes.Buffer - _, _ = buf.WriteString("SHOW RETENTION POLICIES ON ") - _, _ = buf.WriteString(QuoteIdent(s.Database)) + _, _ = buf.WriteString("SHOW RETENTION POLICIES") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement -func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } -// ShowStats statement displays statistics for a given module. +// ShowStatsStatement displays statistics for a given module. type ShowStatsStatement struct { // Module Module string @@ -2159,17 +2722,17 @@ // String returns a string representation of a ShowStatsStatement. func (s *ShowStatsStatement) String() string { var buf bytes.Buffer - _, _ = buf.WriteString("SHOW STATS ") + _, _ = buf.WriteString("SHOW STATS") if s.Module != "" { - _, _ = buf.WriteString("FOR ") - _, _ = buf.WriteString(s.Module) + _, _ = buf.WriteString(" FOR ") + _, _ = buf.WriteString(QuoteString(s.Module)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement -func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowStatsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowShardGroupsStatement represents a command for displaying shard groups in the cluster. @@ -2179,8 +2742,8 @@ func (s *ShowShardGroupsStatement) String() string { return "SHOW SHARD GROUPS" } // RequiredPrivileges returns the privileges required to execute the statement. -func (s *ShowShardGroupsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowShardGroupsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowShardsStatement represents a command for displaying shards in the cluster. @@ -2190,8 +2753,8 @@ func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } // RequiredPrivileges returns the privileges required to execute the statement. -func (s *ShowShardsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowShardsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowDiagnosticsStatement represents a command for show node diagnostics. @@ -2203,17 +2766,17 @@ // String returns a string representation of the ShowDiagnosticsStatement. func (s *ShowDiagnosticsStatement) String() string { var buf bytes.Buffer - _, _ = buf.WriteString("SHOW DIAGNOSTICS ") + _, _ = buf.WriteString("SHOW DIAGNOSTICS") if s.Module != "" { - _, _ = buf.WriteString("FOR ") - _, _ = buf.WriteString(s.Module) + _, _ = buf.WriteString(" FOR ") + _, _ = buf.WriteString(QuoteString(s.Module)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement -func (s *ShowDiagnosticsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowDiagnosticsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream @@ -2248,8 +2811,8 @@ } // RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement -func (s *CreateSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *CreateSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream. @@ -2265,8 +2828,8 @@ } // RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement -func (s *DropSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *DropSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowSubscriptionsStatement represents a command to show a list of subscriptions. @@ -2279,12 +2842,16 @@ } // RequiredPrivileges returns the privilege required to execute a ShowSubscriptionStatement -func (s *ShowSubscriptionsStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowSubscriptionsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowTagKeysStatement represents a command for listing tag keys. type ShowTagKeysStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + // Data sources that fields are extracted from. Sources Sources @@ -2312,6 +2879,10 @@ var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG KEYS") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) @@ -2344,17 +2915,24 @@ } // RequiredPrivileges returns the privilege(s) required to execute a ShowTagKeysStatement -func (s *ShowTagKeysStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowTagKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowTagValuesStatement represents a command for listing tag values. type ShowTagValuesStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + // Data source that fields are extracted from. Sources Sources - // Tag key(s) to pull values from. - TagKeys []string + // Operation to use when selecting tag key(s). + Op Token + + // Literal to compare the tag key(s) with. + TagKeyExpr Literal // An expression evaluated on data point. Condition Expr @@ -2375,18 +2953,22 @@ var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG VALUES") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } - _, _ = buf.WriteString(" WITH KEY IN (") - for idx, tagKey := range s.TagKeys { - if idx != 0 { - _, _ = buf.WriteString(", ") - } - _, _ = buf.WriteString(QuoteIdent(tagKey)) + _, _ = buf.WriteString(" WITH KEY ") + _, _ = buf.WriteString(s.Op.String()) + _, _ = buf.WriteString(" ") + if lit, ok := s.TagKeyExpr.(*StringLiteral); ok { + _, _ = buf.WriteString(QuoteIdent(lit.Val)) + } else { + _, _ = buf.WriteString(s.TagKeyExpr.String()) } - _, _ = buf.WriteString(")") if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) @@ -2407,8 +2989,8 @@ } // RequiredPrivileges returns the privilege(s) required to execute a ShowTagValuesStatement -func (s *ShowTagValuesStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowTagValuesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowUsersStatement represents a command for listing users. @@ -2420,12 +3002,16 @@ } // RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement -func (s *ShowUsersStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +func (s *ShowUsersStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowFieldKeysStatement represents a command for listing field keys. type ShowFieldKeysStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + // Data sources that fields are extracted from. Sources Sources @@ -2445,6 +3031,10 @@ var buf bytes.Buffer _, _ = buf.WriteString("SHOW FIELD KEYS") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) @@ -2465,8 +3055,8 @@ } // RequiredPrivileges returns the privilege(s) required to execute a ShowFieldKeysStatement -func (s *ShowFieldKeysStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +func (s *ShowFieldKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // Fields represents a list of fields. @@ -2527,6 +3117,11 @@ switch expr := f.Expr.(type) { case *Call: return expr.Name + case *BinaryExpr: + return BinaryExprName(expr) + case *ParenExpr: + f := Field{Expr: expr.Expr} + return f.Name() case *VarRef: return expr.Val } @@ -2546,9 +3141,9 @@ } // Sort Interface for Fields -func (f Fields) Len() int { return len(f) } -func (f Fields) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f Fields) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (a Fields) Len() int { return len(a) } +func (a Fields) Less(i, j int) bool { return a[i].Name() < a[j].Name() } +func (a Fields) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Dimensions represents a list of dimensions. type Dimensions []*Dimension @@ -2635,15 +3230,74 @@ return buf.String() } -// VarRef represents a reference to a variable. -type VarRef struct { - Val string +func encodeMeasurement(mm *Measurement) *internal.Measurement { + pb := &internal.Measurement{ + Database: proto.String(mm.Database), + RetentionPolicy: proto.String(mm.RetentionPolicy), + Name: proto.String(mm.Name), + IsTarget: proto.Bool(mm.IsTarget), + } + if mm.Regex != nil { + pb.Regex = proto.String(mm.Regex.Val.String()) + } + return pb } -// String returns a string representation of the variable reference. -func (r *VarRef) String() string { - return QuoteIdent(r.Val) -} +func decodeMeasurement(pb *internal.Measurement) (*Measurement, error) { + mm := &Measurement{ + Database: pb.GetDatabase(), + RetentionPolicy: pb.GetRetentionPolicy(), + Name: pb.GetName(), + IsTarget: pb.GetIsTarget(), + } + + if pb.Regex != nil { + regex, err := regexp.Compile(pb.GetRegex()) + if err != nil { + return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) + } + mm.Regex = &RegexLiteral{Val: regex} + } + + return mm, nil +} + +// VarRef represents a reference to a variable. +type VarRef struct { + Val string + Type DataType +} + +// String returns a string representation of the variable reference. +func (r *VarRef) String() string { + buf := bytes.NewBufferString(QuoteIdent(r.Val)) + if r.Type != Unknown { + buf.WriteString("::") + buf.WriteString(r.Type.String()) + } + return buf.String() +} + +// VarRefs represents a slice of VarRef types. +type VarRefs []VarRef + +func (a VarRefs) Len() int { return len(a) } +func (a VarRefs) Less(i, j int) bool { + if a[i].Val != a[j].Val { + return a[i].Val < a[j].Val + } + return a[i].Type < a[j].Type +} +func (a VarRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Strings returns a slice of the variable names. +func (a VarRefs) Strings() []string { + s := make([]string, len(a)) + for i, ref := range a { + s[i] = ref.Val + } + return s +} // Call represents a function call. type Call struct { @@ -2685,7 +3339,7 @@ } } return keys - case "min", "max", "first", "last", "sum", "mean": + case "min", "max", "first", "last", "sum", "mean", "mode": // maintain the order the user specified in the query keyMap := make(map[string]struct{}) keys := []string{} @@ -2733,6 +3387,14 @@ // String returns a string representation of the literal. func (l *NumberLiteral) String() string { return strconv.FormatFloat(l.Val, 'f', 3, 64) } +// IntegerLiteral represents an integer literal. +type IntegerLiteral struct { + Val int64 +} + +// String returns a string representation of the literal. +func (l *IntegerLiteral) String() string { return fmt.Sprintf("%d", l.Val) } + // BooleanLiteral represents a boolean literal. type BooleanLiteral struct { Val bool @@ -2762,6 +3424,25 @@ return false } +// ListLiteral represents a list of strings literal. +type ListLiteral struct { + Vals []string +} + +// String returns a string representation of the literal. +func (s *ListLiteral) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("(") + for idx, tagKey := range s.Vals { + if idx != 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(QuoteIdent(tagKey)) + } + _, _ = buf.WriteString(")") + return buf.String() +} + // StringLiteral represents a string literal. type StringLiteral struct { Val string @@ -2770,6 +3451,33 @@ // String returns a string representation of the literal. func (l *StringLiteral) String() string { return QuoteString(l.Val) } +// IsTimeLiteral returns if this string can be interpreted as a time literal. +func (l *StringLiteral) IsTimeLiteral() bool { + return isDateTimeString(l.Val) || isDateString(l.Val) +} + +// ToTimeLiteral returns a time literal if this string can be converted to a time literal. +func (l *StringLiteral) ToTimeLiteral() (*TimeLiteral, error) { + if isDateTimeString(l.Val) { + t, err := time.Parse(DateTimeFormat, l.Val) + if err != nil { + // try to parse it as an RFCNano time + t, err = time.Parse(time.RFC3339Nano, l.Val) + if err != nil { + return nil, ErrInvalidTime + } + } + return &TimeLiteral{Val: t}, nil + } else if isDateString(l.Val) { + t, err := time.Parse(DateFormat, l.Val) + if err != nil { + return nil, ErrInvalidTime + } + return &TimeLiteral{Val: t}, nil + } + return nil, ErrInvalidTime +} + // TimeLiteral represents a point-in-time literal. type TimeLiteral struct { Val time.Time @@ -2807,6 +3515,75 @@ return fmt.Sprintf("%s %s %s", e.LHS.String(), e.Op.String(), e.RHS.String()) } +func (e *BinaryExpr) validate() error { + v := binaryExprValidator{} + Walk(&v, e) + if v.err != nil { + return v.err + } else if v.calls && v.refs { + return errors.New("binary expressions cannot mix aggregates and raw fields") + } + return nil +} + +type binaryExprValidator struct { + calls bool + refs bool + err error +} + +func (v *binaryExprValidator) Visit(n Node) Visitor { + if v.err != nil { + return nil + } + + switch n := n.(type) { + case *Call: + v.calls = true + + if n.Name == "top" || n.Name == "bottom" { + v.err = fmt.Errorf("cannot use %s() inside of a binary expression", n.Name) + return nil + } + + for _, expr := range n.Args { + switch e := expr.(type) { + case *BinaryExpr: + v.err = e.validate() + return nil + } + } + return nil + case *VarRef: + v.refs = true + return nil + } + return v +} + +// BinaryExprName returns the name of a binary expression by concatenating +// the variables in the binary expression with underscores. +func BinaryExprName(expr *BinaryExpr) string { + v := binaryExprNameVisitor{} + Walk(&v, expr) + return strings.Join(v.names, "_") +} + +type binaryExprNameVisitor struct { + names []string +} + +func (v *binaryExprNameVisitor) Visit(n Node) Visitor { + switch n := n.(type) { + case *VarRef: + v.names = append(v.names, n.Val) + case *Call: + v.names = append(v.names, n.Name) + return nil + } + return v +} + // ParenExpr represents a parenthesized expression. type ParenExpr struct { Expr Expr @@ -2843,10 +3620,21 @@ } // Wildcard represents a wild card expression. -type Wildcard struct{} +type Wildcard struct { + Type Token +} // String returns a string representation of the wildcard. -func (e *Wildcard) String() string { return "*" } +func (e *Wildcard) String() string { + switch e.Type { + case FIELD: + return "*::field" + case TAG: + return "*::tag" + default: + return "*" + } +} // CloneExpr returns a deep copy of the expression. func CloneExpr(expr Expr) Expr { @@ -2868,6 +3656,8 @@ return &Distinct{Val: expr.Val} case *DurationLiteral: return &DurationLiteral{Val: expr.Val} + case *IntegerLiteral: + return &IntegerLiteral{Val: expr.Val} case *NumberLiteral: return &NumberLiteral{Val: expr.Val} case *ParenExpr: @@ -2879,9 +3669,9 @@ case *TimeLiteral: return &TimeLiteral{Val: expr.Val} case *VarRef: - return &VarRef{Val: expr.Val} + return &VarRef{Val: expr.Val, Type: expr.Type} case *Wildcard: - return &Wildcard{} + return &Wildcard{Type: expr.Type} } panic("unreachable") } @@ -2929,15 +3719,23 @@ // TimeRange returns the minimum and maximum times specified by an expression. // Returns zero times if there is no bound. -func TimeRange(expr Expr) (min, max time.Time) { +func TimeRange(expr Expr) (min, max time.Time, err error) { WalkFunc(expr, func(n Node) { + if err != nil { + return + } + if n, ok := n.(*BinaryExpr); ok { // Extract literal expression & operator on LHS. // Check for "time" on the left-hand side first. // Otherwise check for for the right-hand side and flip the operator. - value, op := timeExprValue(n.LHS, n.RHS), n.Op - if value.IsZero() { - if value = timeExprValue(n.RHS, n.LHS); value.IsZero() { + op := n.Op + var value time.Time + value, err = timeExprValue(n.LHS, n.RHS) + if err != nil { + return + } else if value.IsZero() { + if value, err = timeExprValue(n.RHS, n.LHS); value.IsZero() || err != nil { return } else if op == LT { op = GT @@ -2973,8 +3771,8 @@ if min.IsZero() || value.After(min) { min = value } - if max.IsZero() || value.Before(max) { - max = value + if max.IsZero() || value.Add(1*time.Nanosecond).Before(max) { + max = value.Add(1 * time.Nanosecond) } } } @@ -2982,13 +3780,17 @@ return } -// TimeRange returns the minimum and maximum times, as epoch nano, specified by -// and expression. If there is no lower bound, the start of the epoch is returned +// TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by +// an expression. If there is no lower bound, the minimum time is returned // for minimum. If there is no higher bound, now is returned for maximum. -func TimeRangeAsEpochNano(expr Expr) (min, max int64) { - tmin, tmax := TimeRange(expr) +func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) { + tmin, tmax, err := TimeRange(expr) + if err != nil { + return 0, 0, err + } + if tmin.IsZero() { - min = time.Unix(0, 0).UnixNano() + min = time.Unix(0, MinTime).UnixNano() } else { min = tmin.UnixNano() } @@ -3002,18 +3804,40 @@ // timeExprValue returns the time literal value of a "time == " expression. // Returns zero time if the expression is not a time expression. -func timeExprValue(ref Expr, lit Expr) time.Time { +func timeExprValue(ref Expr, lit Expr) (t time.Time, err error) { if ref, ok := ref.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + // If literal looks like a date time then parse it as a time literal. + if strlit, ok := lit.(*StringLiteral); ok { + if strlit.IsTimeLiteral() { + t, err := strlit.ToTimeLiteral() + if err != nil { + return time.Time{}, err + } + lit = t + } + } + switch lit := lit.(type) { case *TimeLiteral: - return lit.Val + if lit.Val.After(time.Unix(0, MaxTime)) { + return time.Time{}, fmt.Errorf("time %s overflows time literal", lit.Val.Format(time.RFC3339)) + } else if lit.Val.Before(time.Unix(0, MinTime+1)) { + // The minimum allowable time literal is one greater than the minimum time because the minimum time + // is a sentinel value only used internally. + return time.Time{}, fmt.Errorf("time %s underflows time literal", lit.Val.Format(time.RFC3339)) + } + return lit.Val, nil case *DurationLiteral: - return time.Unix(0, int64(lit.Val)).UTC() + return time.Unix(0, int64(lit.Val)).UTC(), nil case *NumberLiteral: - return time.Unix(0, int64(lit.Val)).UTC() + return time.Unix(0, int64(lit.Val)).UTC(), nil + case *IntegerLiteral: + return time.Unix(0, lit.Val).UTC(), nil + default: + return time.Time{}, fmt.Errorf("invalid operation: time and %T are not compatible", lit) } } - return time.Time{} + return time.Time{}, nil } // Visitor can be called by Walk to traverse an AST hierarchy. @@ -3053,6 +3877,10 @@ Walk(v, c) } + case *DeleteSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + case *DropSeriesStatement: Walk(v, n.Sources) Walk(v, n.Condition) @@ -3193,6 +4021,36 @@ func (fn rewriterFunc) Rewrite(n Node) Node { return fn(n) } +// RewriteExpr recursively invokes the function to replace each expr. +// Nodes are traversed depth-first and rewritten from leaf to root. +func RewriteExpr(expr Expr, fn func(Expr) Expr) Expr { + switch e := expr.(type) { + case *BinaryExpr: + e.LHS = RewriteExpr(e.LHS, fn) + e.RHS = RewriteExpr(e.RHS, fn) + if e.LHS != nil && e.RHS == nil { + expr = e.LHS + } else if e.RHS != nil && e.LHS == nil { + expr = e.RHS + } else if e.LHS == nil && e.RHS == nil { + return nil + } + + case *ParenExpr: + e.Expr = RewriteExpr(e.Expr, fn) + if e.Expr == nil { + return nil + } + + case *Call: + for i, expr := range e.Args { + e.Args[i] = RewriteExpr(expr, fn) + } + } + + return fn(expr) +} + // Eval evaluates expr against a map. func Eval(expr Expr, m map[string]interface{}) interface{} { if expr == nil { @@ -3204,10 +4062,14 @@ return evalBinaryExpr(expr, m) case *BooleanLiteral: return expr.Val + case *IntegerLiteral: + return expr.Val case *NumberLiteral: return expr.Val case *ParenExpr: return Eval(expr.Expr, m) + case *RegexLiteral: + return expr.Val case *StringLiteral: return expr.Val case *VarRef: @@ -3224,81 +4086,148 @@ // Evaluate if both sides are simple types. switch lhs := lhs.(type) { case bool: - rhs, _ := rhs.(bool) + rhs, ok := rhs.(bool) switch expr.Op { case AND: - return lhs && rhs + return ok && (lhs && rhs) case OR: - return lhs || rhs + return ok && (lhs || rhs) case EQ: - return lhs == rhs + return ok && (lhs == rhs) case NEQ: - return lhs != rhs + return ok && (lhs != rhs) } case float64: - rhs, _ := rhs.(float64) + // Try the rhs as a float64 or int64 + rhsf, ok := rhs.(float64) + if !ok { + var rhsi int64 + if rhsi, ok = rhs.(int64); ok { + rhsf = float64(rhsi) + } + } + + rhs := rhsf switch expr.Op { case EQ: - return lhs == rhs + return ok && (lhs == rhs) case NEQ: - return lhs != rhs + return ok && (lhs != rhs) case LT: - return lhs < rhs + return ok && (lhs < rhs) case LTE: - return lhs <= rhs + return ok && (lhs <= rhs) case GT: - return lhs > rhs + return ok && (lhs > rhs) case GTE: - return lhs >= rhs + return ok && (lhs >= rhs) case ADD: + if !ok { + return nil + } return lhs + rhs case SUB: + if !ok { + return nil + } return lhs - rhs case MUL: + if !ok { + return nil + } return lhs * rhs case DIV: - if rhs == 0 { + if !ok { + return nil + } else if rhs == 0 { return float64(0) } return lhs / rhs } case int64: - // we parse all number literals as float 64, so we have to convert from - // an interface to the float64, then cast to an int64 for comparison - rhsf, _ := rhs.(float64) - rhs := int64(rhsf) - switch expr.Op { - case EQ: - return lhs == rhs - case NEQ: - return lhs != rhs - case LT: - return lhs < rhs - case LTE: - return lhs <= rhs - case GT: - return lhs > rhs - case GTE: - return lhs >= rhs - case ADD: - return lhs + rhs - case SUB: - return lhs - rhs - case MUL: - return lhs * rhs - case DIV: - if rhs == 0 { - return int64(0) + // Try as a float64 to see if a float cast is required. + rhsf, ok := rhs.(float64) + if ok { + lhs := float64(lhs) + rhs := rhsf + switch expr.Op { + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + case LT: + return lhs < rhs + case LTE: + return lhs <= rhs + case GT: + return lhs > rhs + case GTE: + return lhs >= rhs + case ADD: + return lhs + rhs + case SUB: + return lhs - rhs + case MUL: + return lhs * rhs + case DIV: + if rhs == 0 { + return float64(0) + } + return lhs / rhs + } + } else { + rhs, ok := rhs.(int64) + switch expr.Op { + case EQ: + return ok && (lhs == rhs) + case NEQ: + return ok && (lhs != rhs) + case LT: + return ok && (lhs < rhs) + case LTE: + return ok && (lhs <= rhs) + case GT: + return ok && (lhs > rhs) + case GTE: + return ok && (lhs >= rhs) + case ADD: + if !ok { + return nil + } + return lhs + rhs + case SUB: + if !ok { + return nil + } + return lhs - rhs + case MUL: + if !ok { + return nil + } + return lhs * rhs + case DIV: + if !ok { + return nil + } else if rhs == 0 { + return float64(0) + } + return lhs / rhs } - return lhs / rhs } case string: - rhs, _ := rhs.(string) switch expr.Op { case EQ: - return lhs == rhs + rhs, ok := rhs.(string) + return ok && lhs == rhs case NEQ: - return lhs != rhs + rhs, ok := rhs.(string) + return ok && lhs != rhs + case EQREGEX: + rhs, ok := rhs.(*regexp.Regexp) + return ok && rhs.MatchString(lhs) + case NEQREGEX: + rhs, ok := rhs.(*regexp.Regexp) + return ok && !rhs.MatchString(lhs) } } return nil @@ -3379,6 +4308,8 @@ return reduceBinaryExprBooleanLHS(op, lhs, rhs) case *DurationLiteral: return reduceBinaryExprDurationLHS(op, lhs, rhs) + case *IntegerLiteral: + return reduceBinaryExprIntegerLHS(op, lhs, rhs) case *nilLiteral: return reduceBinaryExprNilLHS(op, lhs, rhs) case *NumberLiteral: @@ -3442,11 +4373,93 @@ } return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} } + case *IntegerLiteral: + switch op { + case MUL: + return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} + case DIV: + if rhs.Val == 0 { + return &DurationLiteral{Val: 0} + } + return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} + } case *TimeLiteral: switch op { case ADD: return &TimeLiteral{Val: rhs.Val.Add(lhs.Val)} } + case *StringLiteral: + t, err := rhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprDurationLHS(op, lhs, t) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprIntegerLHS(op Token, lhs *IntegerLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *NumberLiteral: + return reduceBinaryExprNumberLHS(op, &NumberLiteral{Val: float64(lhs.Val)}, rhs) + case *IntegerLiteral: + switch op { + case ADD: + return &IntegerLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &IntegerLiteral{Val: lhs.Val - rhs.Val} + case MUL: + return &IntegerLiteral{Val: lhs.Val * rhs.Val} + case DIV: + if rhs.Val == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: float64(lhs.Val) / float64(rhs.Val)} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *DurationLiteral: + // Treat the integer as a timestamp. + switch op { + case ADD: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(rhs.Val)} + case SUB: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(-rhs.Val)} + } + case *TimeLiteral: + d := &DurationLiteral{Val: time.Duration(lhs.Val)} + expr := reduceBinaryExprDurationLHS(op, d, rhs) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *StringLiteral: + t, err := rhs.ToTimeLiteral() + if err != nil { + break + } + d := &DurationLiteral{Val: time.Duration(lhs.Val)} + expr := reduceBinaryExprDurationLHS(op, d, t) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } case *nilLiteral: return &BooleanLiteral{Val: false} } @@ -3489,6 +4502,32 @@ case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } + case *IntegerLiteral: + switch op { + case ADD: + return &NumberLiteral{Val: lhs.Val + float64(rhs.Val)} + case SUB: + return &NumberLiteral{Val: lhs.Val - float64(rhs.Val)} + case MUL: + return &NumberLiteral{Val: lhs.Val * float64(rhs.Val)} + case DIV: + if float64(rhs.Val) == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: lhs.Val / float64(rhs.Val)} + case EQ: + return &BooleanLiteral{Val: lhs.Val == float64(rhs.Val)} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != float64(rhs.Val)} + case GT: + return &BooleanLiteral{Val: lhs.Val > float64(rhs.Val)} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= float64(rhs.Val)} + case LT: + return &BooleanLiteral{Val: lhs.Val < float64(rhs.Val)} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= float64(rhs.Val)} + } case *nilLiteral: return &BooleanLiteral{Val: false} } @@ -3500,11 +4539,105 @@ case *StringLiteral: switch op { case EQ: - return &BooleanLiteral{Val: lhs.Val == rhs.Val} + var expr Expr = &BooleanLiteral{Val: lhs.Val == rhs.Val} + // This might be a comparison between time literals. + // If it is, parse the time literals and then compare since it + // could be a different result if they use different formats + // for the same time. + if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { + tlhs, err := lhs.ToTimeLiteral() + if err != nil { + return expr + } + + trhs, err := rhs.ToTimeLiteral() + if err != nil { + return expr + } + + t := reduceBinaryExprTimeLHS(op, tlhs, trhs) + if _, ok := t.(*BinaryExpr); !ok { + expr = t + } + } + return expr case NEQ: - return &BooleanLiteral{Val: lhs.Val != rhs.Val} + var expr Expr = &BooleanLiteral{Val: lhs.Val != rhs.Val} + // This might be a comparison between time literals. + // If it is, parse the time literals and then compare since it + // could be a different result if they use different formats + // for the same time. + if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { + tlhs, err := lhs.ToTimeLiteral() + if err != nil { + return expr + } + + trhs, err := rhs.ToTimeLiteral() + if err != nil { + return expr + } + + t := reduceBinaryExprTimeLHS(op, tlhs, trhs) + if _, ok := t.(*BinaryExpr); !ok { + expr = t + } + } + return expr case ADD: return &StringLiteral{Val: lhs.Val + rhs.Val} + default: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + } + case *DurationLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *TimeLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *IntegerLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr } case *nilLiteral: switch op { @@ -3524,6 +4657,12 @@ case SUB: return &TimeLiteral{Val: lhs.Val.Add(-rhs.Val)} } + case *IntegerLiteral: + d := &DurationLiteral{Val: time.Duration(rhs.Val)} + expr := reduceBinaryExprTimeLHS(op, lhs, d) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } case *TimeLiteral: switch op { case SUB: @@ -3541,6 +4680,18 @@ case LTE: return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val) || lhs.Val.Equal(rhs.Val)} } + case *StringLiteral: + t, err := rhs.ToTimeLiteral() + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, lhs, t) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } case *nilLiteral: return &BooleanLiteral{Val: false} } @@ -3575,14 +4726,14 @@ func reduceVarRef(expr *VarRef, valuer Valuer) Expr { // Ignore if there is no valuer. if valuer == nil { - return &VarRef{Val: expr.Val} + return &VarRef{Val: expr.Val, Type: expr.Type} } // Retrieve the value of the ref. // Ignore if the value doesn't exist. v, ok := valuer.Value(expr.Val) if !ok { - return &VarRef{Val: expr.Val} + return &VarRef{Val: expr.Val, Type: expr.Type} } // Return the value as a literal. @@ -3609,14 +4760,36 @@ Value(key string) (interface{}, bool) } -// nowValuer returns only the value for "now()". +// NowValuer returns only the value for "now()". type NowValuer struct { Now time.Time } +// Value is a method that returns the value and existence flag for a given key. func (v *NowValuer) Value(key string) (interface{}, bool) { if key == "now()" { return v.Now, true } return nil, false } + +// ContainsVarRef returns true if expr is a VarRef or contains one. +func ContainsVarRef(expr Expr) bool { + var v containsVarRefVisitor + Walk(&v, expr) + return v.contains +} + +type containsVarRefVisitor struct { + contains bool +} + +func (v *containsVarRefVisitor) Visit(n Node) Visitor { + switch n.(type) { + case *Call: + return nil + case *VarRef: + v.contains = true + } + return v +} diff -Nru influxdb-0.10.0+dfsg1/influxql/ast_test.go influxdb-1.1.1+dfsg1/influxql/ast_test.go --- influxdb-0.10.0+dfsg1/influxql/ast_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/ast_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -7,9 +7,17 @@ "testing" "time" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/influxql" ) +func BenchmarkQuery_String(b *testing.B) { + p := influxql.NewParser(strings.NewReader(`SELECT foo AS zoo, a AS b FROM bar WHERE value > 10 AND q = 'hello'`)) + q, _ := p.ParseStatement() + for i := 0; i < b.N; i++ { + _ = q.String() + } +} + // Ensure a value's data type can be retrieved. func TestInspectDataType(t *testing.T) { for i, tt := range []struct { @@ -17,6 +25,14 @@ typ influxql.DataType }{ {float64(100), influxql.Float}, + {int64(100), influxql.Integer}, + {int32(100), influxql.Integer}, + {100, influxql.Integer}, + {true, influxql.Boolean}, + {"string", influxql.String}, + {time.Now(), influxql.Time}, + {time.Second, influxql.Duration}, + {nil, influxql.Unknown}, } { if typ := influxql.InspectDataType(tt.v); tt.typ != typ { t.Errorf("%d. %v (%s): unexpected type: %s", i, tt.v, tt.typ, typ) @@ -25,73 +41,22 @@ } } -// Ensure the SELECT statement can extract substatements. -func TestSelectStatement_Substatement(t *testing.T) { - var tests = []struct { - stmt string - expr *influxql.VarRef - sub string - err string +func TestDataType_String(t *testing.T) { + for i, tt := range []struct { + typ influxql.DataType + v string }{ - // 0. Single series - { - stmt: `SELECT value FROM myseries WHERE value > 1`, - expr: &influxql.VarRef{Val: "value"}, - sub: `SELECT value FROM myseries WHERE value > 1.000`, - }, - - // 1. Simple join - { - stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`, - expr: &influxql.VarRef{Val: "aa.value"}, - sub: `SELECT "aa.value" FROM aa`, - }, - - // 2. Simple merge - { - stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`, - expr: &influxql.VarRef{Val: "bb.value"}, - sub: `SELECT "bb.value" FROM bb`, - }, - - // 3. Join with condition - { - stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND bb.host = 'serverb'`, - expr: &influxql.VarRef{Val: "bb.value"}, - sub: `SELECT "bb.value" FROM bb WHERE "bb.host" = 'serverb'`, - }, - - // 4. Join with complex condition - { - stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND (bb.host = 'serverb' OR bb.host = 'serverc') AND 1 = 2`, - expr: &influxql.VarRef{Val: "bb.value"}, - sub: `SELECT "bb.value" FROM bb WHERE ("bb.host" = 'serverb' OR "bb.host" = 'serverc') AND 1.000 = 2.000`, - }, - - // 5. 4 with different condition order - { - stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE ((bb.host = 'serverb' OR bb.host = 'serverc') AND aa.host = 'servera') AND 1 = 2`, - expr: &influxql.VarRef{Val: "bb.value"}, - sub: `SELECT "bb.value" FROM bb WHERE (("bb.host" = 'serverb' OR "bb.host" = 'serverc')) AND 1.000 = 2.000`, - }, - } - - for i, tt := range tests { - // Parse statement. - stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() - if err != nil { - t.Fatalf("invalid statement: %q: %s", tt.stmt, err) - } - - // Extract substatement. - sub, err := stmt.(*influxql.SelectStatement).Substatement(tt.expr) - if err != nil { - t.Errorf("%d. %q: unexpected error: %s", i, tt.stmt, err) - continue - } - if substr := sub.String(); tt.sub != substr { - t.Errorf("%d. %q: unexpected substatement:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.sub, substr) - continue + {influxql.Float, "float"}, + {influxql.Integer, "integer"}, + {influxql.Boolean, "boolean"}, + {influxql.String, "string"}, + {influxql.Time, "time"}, + {influxql.Duration, "duration"}, + {influxql.Tag, "tag"}, + {influxql.Unknown, "unknown"}, + } { + if v := tt.typ.String(); tt.v != v { + t.Errorf("%d. %v (%s): unexpected string: %s", i, tt.typ, tt.v, v) } } } @@ -123,11 +88,10 @@ } s := stmt.(*influxql.SelectStatement) - min, max := influxql.TimeRange(s.Condition) start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) - min, max = influxql.TimeRange(s.Condition) + min, max := MustTimeRange(s.Condition) if min != start { t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) @@ -148,7 +112,7 @@ } s = stmt.(*influxql.SelectStatement) - min, max = influxql.TimeRange(s.Condition) + min, max = MustTimeRange(s.Condition) if start != min || end != max { t.Fatalf("start and end times weren't equal:\n exp: %s\n got: %s\n exp: %s\n got:%s\n", start, min, end, max) } @@ -157,7 +121,7 @@ start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) - min, max = influxql.TimeRange(s.Condition) + min, max = MustTimeRange(s.Condition) // TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to. // shouldn't matter for our purposes with continuous queries, but fix this later @@ -184,7 +148,7 @@ start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) - min, max = influxql.TimeRange(s.Condition) + min, max = MustTimeRange(s.Condition) if min != start { t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) @@ -295,7 +259,6 @@ for i, tt := range tests { // Parse statement. - t.Logf("index: %d, statement: %s", i, tt.stmt) stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.stmt, err) @@ -309,17 +272,8 @@ } } -// Test SELECT statement wildcard rewrite. -func TestSelectStatement_RewriteWildcards(t *testing.T) { - var fields = influxql.Fields{ - &influxql.Field{Expr: &influxql.VarRef{Val: "value1"}}, - &influxql.Field{Expr: &influxql.VarRef{Val: "value2"}}, - } - var dimensions = influxql.Dimensions{ - &influxql.Dimension{Expr: &influxql.VarRef{Val: "host"}}, - &influxql.Dimension{Expr: &influxql.VarRef{Val: "region"}}, - } - +// Test SELECT statement field rewrite. +func TestSelectStatement_RewriteFields(t *testing.T) { var tests = []struct { stmt string rewrite string @@ -333,22 +287,28 @@ // Query wildcard { stmt: `SELECT * FROM cpu`, - rewrite: `SELECT value1, value2 FROM cpu GROUP BY host, region`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer FROM cpu`, }, // Parser fundamentally prohibits multiple query sources // Query wildcard with explicit - // { - // stmt: `SELECT *,value1 FROM cpu`, - // rewrite: `SELECT value1, value2, value1 FROM cpu`, - // }, + { + stmt: `SELECT *,value1 FROM cpu`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, value1::float FROM cpu`, + }, // Query multiple wildcards - // { - // stmt: `SELECT *,* FROM cpu`, - // rewrite: `SELECT value1,value2,value1,value2 FROM cpu`, - // }, + { + stmt: `SELECT *,* FROM cpu`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, + }, + + // Query wildcards with group by + { + stmt: `SELECT * FROM cpu GROUP BY host`, + rewrite: `SELECT region::tag, value1::float, value2::integer FROM cpu GROUP BY host`, + }, // No GROUP BY wildcards { @@ -374,7 +334,7 @@ rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`, }, - // GROUP BY wildarde with fill + // GROUP BY wildcard with fill { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`, rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`, @@ -395,27 +355,218 @@ // Combo { stmt: `SELECT * FROM cpu GROUP BY *`, - rewrite: `SELECT value1, value2 FROM cpu GROUP BY host, region`, + rewrite: `SELECT value1::float, value2::integer FROM cpu GROUP BY host, region`, + }, + + // Wildcard function with all fields. + { + stmt: `SELECT mean(*) FROM cpu`, + rewrite: `SELECT mean(value1::float) AS mean_value1, mean(value2::integer) AS mean_value2 FROM cpu`, + }, + + { + stmt: `SELECT distinct(*) FROM strings`, + rewrite: `SELECT distinct(string::string) AS distinct_string, distinct(value::float) AS distinct_value FROM strings`, + }, + + { + stmt: `SELECT distinct(*) FROM bools`, + rewrite: `SELECT distinct(bool::boolean) AS distinct_bool, distinct(value::float) AS distinct_value FROM bools`, + }, + + // Wildcard function with some fields excluded. + { + stmt: `SELECT mean(*) FROM strings`, + rewrite: `SELECT mean(value::float) AS mean_value FROM strings`, + }, + + { + stmt: `SELECT mean(*) FROM bools`, + rewrite: `SELECT mean(value::float) AS mean_value FROM bools`, + }, + + // Wildcard function with an alias. + { + stmt: `SELECT mean(*) AS alias FROM cpu`, + rewrite: `SELECT mean(value1::float) AS alias_value1, mean(value2::integer) AS alias_value2 FROM cpu`, + }, + + // Query regex + { + stmt: `SELECT /1/ FROM cpu`, + rewrite: `SELECT value1::float FROM cpu`, + }, + + { + stmt: `SELECT value1 FROM cpu GROUP BY /h/`, + rewrite: `SELECT value1::float FROM cpu GROUP BY host`, + }, + + // Query regex + { + stmt: `SELECT mean(/1/) FROM cpu`, + rewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`, }, } for i, tt := range tests { - t.Logf("index: %d, statement: %s", i, tt.stmt) // Parse statement. stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.stmt, err) } + var ic IteratorCreator + ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + source := sources[0].(*influxql.Measurement) + switch source.Name { + case "cpu": + fields = map[string]influxql.DataType{ + "value1": influxql.Float, + "value2": influxql.Integer, + } + case "strings": + fields = map[string]influxql.DataType{ + "value": influxql.Float, + "string": influxql.String, + } + case "bools": + fields = map[string]influxql.DataType{ + "value": influxql.Float, + "bool": influxql.Boolean, + } + } + dimensions = map[string]struct{}{"host": struct{}{}, "region": struct{}{}} + return + } + // Rewrite statement. - rw := stmt.(*influxql.SelectStatement).RewriteWildcards(fields, dimensions) - if rw == nil { + rw, err := stmt.(*influxql.SelectStatement).RewriteFields(&ic) + if err != nil { + t.Errorf("%d. %q: error: %s", i, tt.stmt, err) + } else if rw == nil { t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt) - continue - } - if rw := rw.String(); tt.rewrite != rw { + } else if rw := rw.String(); tt.rewrite != rw { t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw) - continue + } + } +} + +// Test SELECT statement regex conditions rewrite. +func TestSelectStatement_RewriteRegexConditions(t *testing.T) { + var tests = []struct { + in string + out string + }{ + {in: `SELECT value FROM cpu`, out: `SELECT value FROM cpu`}, + {in: `SELECT value FROM cpu WHERE host='server-1'`, out: `SELECT value FROM cpu WHERE host='server-1'`}, + {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host != 'server-1'`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, + + // Non matching regex + {in: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`, out: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`}, + {in: `SELECT value FROM cpu WHERE host =~ /server-1/`, out: `SELECT value FROM cpu WHERE host =~ /server-1/`}, + {in: `SELECT value FROM cpu WHERE host !~ /server-1/`, out: `SELECT value FROM cpu WHERE host !~ /server-1/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1/`, out: `SELECT value FROM cpu WHERE host =~ /^server-1/`}, + {in: `SELECT value FROM cpu WHERE host =~ /server-1$/`, out: `SELECT value FROM cpu WHERE host =~ /server-1$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`, out: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /\^$/`, out: `SELECT value FROM cpu WHERE host !~ /\^$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`, out: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^\$/`, out: `SELECT value FROM cpu WHERE host =~ /^\$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a/`, out: `SELECT value FROM cpu WHERE host !~ /^a/`}, + + // These regexes are not supported due to the presence of escaped or meta characters. + {in: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`, out: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^?a$/`, out: `SELECT value FROM cpu WHERE host !~ /^?a$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`, out: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\d$/`, out: `SELECT value FROM cpu WHERE host !~ /^\d$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a*$/`, out: `SELECT value FROM cpu WHERE host !~ /^a*$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a.b$/`, out: `SELECT value FROM cpu WHERE host !~ /^a.b$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^ab+$/`, out: `SELECT value FROM cpu WHERE host !~ /^ab+$/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`, out: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`}, + + // These regexes all match and will be rewritten. + {in: `SELECT value FROM cpu WHERE host !~ /^a[2]$/`, out: `SELECT value FROM cpu WHERE host != 'a2'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server 1$/`, out: `SELECT value FROM cpu WHERE host = 'server 1'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^$/`, out: `SELECT value FROM cpu WHERE host = ''`}, + {in: `SELECT value FROM cpu WHERE host !~ /^$/`, out: `SELECT value FROM cpu WHERE host != ''`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server-2$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server]a$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server]a'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^hello\?$/`, out: `SELECT value FROM cpu WHERE host = 'hello?'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\\$/`, out: `SELECT value FROM cpu WHERE host != '\\'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\\\$$/`, out: `SELECT value FROM cpu WHERE host != '\\$'`}, + } + + for i, test := range tests { + stmt, err := influxql.NewParser(strings.NewReader(test.in)).ParseStatement() + if err != nil { + t.Fatalf("[Example %d], %v", i, err) + } + + // Rewrite any supported regex conditions. + stmt.(*influxql.SelectStatement).RewriteRegexConditions() + + // Get the expected rewritten statement. + expStmt, err := influxql.NewParser(strings.NewReader(test.out)).ParseStatement() + if err != nil { + t.Fatalf("[Example %d], %v", i, err) + } + + // Compare the (potentially) rewritten AST to the expected AST. + if got, exp := stmt, expStmt; !reflect.DeepEqual(got, exp) { + t.Errorf("[Example %d]\nattempting %v\ngot %v\n%s\n\nexpected %v\n%s\n", i+1, test.in, got, mustMarshalJSON(got), exp, mustMarshalJSON(exp)) + } + } +} + +// Test SELECT statement time field rewrite. +func TestSelectStatement_RewriteTimeFields(t *testing.T) { + var tests = []struct { + s string + stmt influxql.Statement + }{ + { + s: `SELECT time, field1 FROM cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + }, + }, + { + s: `SELECT time AS timestamp, field1 FROM cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + TimeAlias: "timestamp", + }, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.s, err) + } + + // Rewrite statement. + stmt.(*influxql.SelectStatement).RewriteTimeFields() + if !reflect.DeepEqual(tt.stmt, stmt) { + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) + t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) } } } @@ -456,8 +607,7 @@ }, } - for i, tt := range tests { - t.Logf("index: %d, statement: %s", i, tt.stmt) + for _, tt := range tests { s := MustParseSelectStatement(tt.stmt) if s.IsRawQuery != tt.isRaw { t.Errorf("'%s', IsRawQuery should be %v", tt.stmt, tt.isRaw) @@ -592,106 +742,25 @@ } } -func TestSelectStatement_HasSimpleCount(t *testing.T) { - var tests = []struct { - stmt string - count bool - }{ - // No counts - { - stmt: `SELECT value FROM cpu`, - count: false, - }, - - // Query count - { - stmt: `SELECT count(value) FROM cpu`, - count: true, - }, - - // No GROUP BY time only - { - stmt: `SELECT count(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, - count: false, - }, - - // Query count - { - stmt: `SELECT typoCount(value) FROM cpu`, - count: false, - }, - - // No GROUP BY time only - { - stmt: `SELECT typoCount(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, - count: false, - }, - } - - for i, tt := range tests { - // Parse statement. - t.Logf("index: %d, statement: %s", i, tt.stmt) - stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() - if err != nil { - t.Fatalf("invalid statement: %q: %s", tt.stmt, err) - } - - // Test count detection. - if c := stmt.(*influxql.SelectStatement).HasSimpleCount(); tt.count != c { - t.Errorf("%d. %q: unexpected count detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.count, c) - continue - } - } -} - -func TestSelectStatement_HasCountDistinct(t *testing.T) { - var tests = []struct { - stmt string - count bool +// Ensure binary expression names can be evaluated. +func TestBinaryExprName(t *testing.T) { + for i, tt := range []struct { + expr string + name string }{ - // No counts - { - stmt: `SELECT value FROM cpu`, - count: false, - }, - - // Query count - { - stmt: `SELECT count(value) FROM cpu`, - count: false, - }, - - // No GROUP BY time only - { - stmt: `SELECT count(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, - count: true, - }, - - // Query count - { - stmt: `SELECT typoCount(value) FROM cpu`, - count: false, - }, - - // No GROUP BY time only - { - stmt: `SELECT typoCount(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, - count: false, - }, - } - - for i, tt := range tests { - // Parse statement. - t.Logf("index: %d, statement: %s", i, tt.stmt) - stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() - if err != nil { - t.Fatalf("invalid statement: %q: %s", tt.stmt, err) - } - - // Test count detection. - if c := stmt.(*influxql.SelectStatement).HasCountDistinct(); tt.count != c { - t.Errorf("%d. %q: unexpected count detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.count, c) - continue + {expr: `value + 1`, name: `value`}, + {expr: `"user" / total`, name: `user_total`}, + {expr: `("user" + total) / total`, name: `user_total_total`}, + } { + expr := influxql.MustParseExpr(tt.expr) + switch expr := expr.(type) { + case *influxql.BinaryExpr: + name := influxql.BinaryExprName(expr) + if name != tt.name { + t.Errorf("%d. unexpected name %s, got %s", i, name, tt.name) + } + default: + t.Errorf("%d. unexpected expr type: %T", i, expr) } } } @@ -699,8 +768,8 @@ // Ensure the time range of an expression can be extracted. func TestTimeRange(t *testing.T) { for i, tt := range []struct { - expr string - min, max string + expr string + min, max, err string }{ // LHS VarRef {expr: `time > '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`}, @@ -718,7 +787,7 @@ {expr: `time < 10`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`}, // Equality - {expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + {expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00.000000001Z`}, // Multiple time expressions. {expr: `time >= '2000-01-01 00:00:00' AND time < '2000-01-02 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T23:59:59.999999999Z`}, @@ -727,17 +796,22 @@ {expr: `time >= '2000-01-01 00:00:00' AND time <= '1999-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `1999-01-01T00:00:00Z`}, // Absolute time - {expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00Z`}, + {expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00.000000001Z`}, // Non-comparative expressions. {expr: `time`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, {expr: `time + 2`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, {expr: `time - '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, {expr: `time AND '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + + // Invalid time expressions. + {expr: `time > "2000-01-01 00:00:00"`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `invalid operation: time and *influxql.VarRef are not compatible`}, + {expr: `time > '2262-04-11 23:47:17'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 2262-04-11T23:47:17Z overflows time literal`}, + {expr: `time > '1677-09-20 19:12:43'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 1677-09-20T19:12:43Z underflows time literal`}, } { // Extract time range. expr := MustParseExpr(tt.expr) - min, max := influxql.TimeRange(expr) + min, max, err := influxql.TimeRange(expr) // Compare with expected min/max. if min := min.Format(time.RFC3339Nano); tt.min != min { @@ -748,6 +822,9 @@ t.Errorf("%d. %s: unexpected max:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.max, max) continue } + if (err != nil && err.Error() != tt.err) || (err == nil && tt.err != "") { + t.Errorf("%d. %s: unexpected error:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.err, err) + } } } @@ -806,7 +883,28 @@ }) // Verify that everything is flipped. - if act := act.String(); act != `2.000 = foo OR 1.000 > time` { + if act := act.String(); act != `2 = foo OR 1 > time` { + t.Fatalf("unexpected result: %s", act) + } +} + +// Ensure an Expr can be rewritten handling nils. +func TestRewriteExpr(t *testing.T) { + expr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`) + + // Remove all time expressions. + act := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr { + switch e := e.(type) { + case *influxql.BinaryExpr: + if lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == "time" { + return nil + } + } + return e + }) + + // Verify that everything is flipped. + if act := act.String(); act != `foo = 2` { t.Fatalf("unexpected result: %s", act) } } @@ -858,8 +956,8 @@ { stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`, }, - // See issues https://github.com/influxdb/influxdb/issues/1647 - // and https://github.com/influxdb/influxdb/issues/4404 + // See issues https://github.com/influxdata/influxdb/issues/1647 + // and https://github.com/influxdata/influxdb/issues/4404 //{ // stmt: `DELETE FROM "my db"."my rp"."my measurement"`, //}, @@ -924,7 +1022,7 @@ data map[string]interface{} }{ // Number literals. - {in: `1 + 2`, out: float64(3)}, + {in: `1 + 2`, out: int64(3)}, {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{"foo": float64(5)}}, {in: `foo / 2`, out: float64(2), data: map[string]interface{}{"foo": float64(4)}}, {in: `4 = 4`, out: true}, @@ -934,20 +1032,33 @@ {in: `4 < 6`, out: true}, {in: `4 <= 4`, out: true}, {in: `4 AND 5`, out: nil}, + {in: `0 = 'test'`, out: false}, + {in: `1.0 = 1`, out: true}, + {in: `1.2 = 1`, out: false}, // Boolean literals. {in: `true AND false`, out: false}, {in: `true OR false`, out: true}, + {in: `false = 4`, out: false}, // String literals. {in: `'foo' = 'bar'`, out: false}, {in: `'foo' = 'foo'`, out: true}, + {in: `'' = 4`, out: false}, + + // Regex literals. + {in: `'foo' =~ /f.*/`, out: true}, + {in: `'foo' =~ /b.*/`, out: false}, + {in: `'foo' !~ /f.*/`, out: false}, + {in: `'foo' !~ /b.*/`, out: true}, // Variable references. {in: `foo`, out: "bar", data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: true, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: nil, data: map[string]interface{}{"foo": nil}}, {in: `foo <> 'bar'`, out: true, data: map[string]interface{}{"foo": "xxx"}}, + {in: `foo =~ /b.*/`, out: true, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo !~ /b.*/`, out: false, data: map[string]interface{}{"foo": "bar"}}, } { // Evaluate expression. out := influxql.Eval(MustParseExpr(tt.in), tt.data) @@ -970,17 +1081,18 @@ data Valuer }{ // Number literals. - {in: `1 + 2`, out: `3.000`}, - {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2.000) + 16.500`}, - {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5.000), 4.000)`}, + {in: `1 + 2`, out: `3`}, + {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.500`}, + {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`}, {in: `4 / 0`, out: `0.000`}, + {in: `1 / 2`, out: `0.500`}, {in: `4 = 4`, out: `true`}, {in: `4 <> 4`, out: `false`}, {in: `6 > 4`, out: `true`}, {in: `4 >= 4`, out: `true`}, {in: `4 < 6`, out: `true`}, {in: `4 <= 4`, out: `true`}, - {in: `4 AND 5`, out: `4.000 AND 5.000`}, + {in: `4 AND 5`, out: `4 AND 5`}, // Boolean literals. {in: `true AND false`, out: `false`}, @@ -993,10 +1105,13 @@ {in: `true <> false`, out: `true`}, {in: `true + false`, out: `true + false`}, - // Time literals. + // Time literals with now(). {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{"now()": now}}, {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, + {in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() = now()`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() <> now()`, out: `false`, data: map[string]interface{}{"now()": now}}, {in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, @@ -1006,6 +1121,29 @@ {in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{"now()": now}}, {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now()`, out: `now()`}, + {in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`}, + + // Time literals. + {in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`}, + {in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, + {in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`}, + {in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, + {in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`}, + {in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, + {in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, + {in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, + {in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`}, + {in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, // Duration literals. {in: `10m + 1h - 60s`, out: `69m`}, @@ -1018,7 +1156,7 @@ {in: `60s >= 1m`, out: `true`}, {in: `60s AND 1m`, out: `1m AND 1m`}, {in: `60m / 0`, out: `0s`}, - {in: `60m + 50`, out: `1h + 50.000`}, + {in: `60m + 50`, out: `1h + 50`}, // String literals. {in: `'foo' + 'bar'`, out: `'foobar'`}, @@ -1049,12 +1187,12 @@ { //case: binary expr(valRef) in: []string{"value+value"}, out: []string{"value", "value"}, - alias: []string{""}, + alias: []string{"value_value"}, }, { //case: binary expr + valRef in: []string{"value+value", "temperature"}, out: []string{"value", "value", "temperature"}, - alias: []string{"", "temperature"}, + alias: []string{"value_value", "temperature"}, }, { //case: aggregate expr in: []string{"mean(value)"}, @@ -1064,37 +1202,37 @@ { //case: binary expr(aggregate expr) in: []string{"mean(value) + max(value)"}, out: []string{"value", "value"}, - alias: []string{""}, + alias: []string{"mean_max"}, }, { //case: binary expr(aggregate expr) + valRef in: []string{"mean(value) + max(value)", "temperature"}, out: []string{"value", "value", "temperature"}, - alias: []string{"", "temperature"}, + alias: []string{"mean_max", "temperature"}, }, { //case: mixed aggregate and varRef in: []string{"mean(value) + temperature"}, out: []string{"value", "temperature"}, - alias: []string{""}, + alias: []string{"mean_temperature"}, }, { //case: ParenExpr(varRef) in: []string{"(value)"}, out: []string{"value"}, - alias: []string{""}, + alias: []string{"value"}, }, { //case: ParenExpr(varRef + varRef) in: []string{"(value + value)"}, out: []string{"value", "value"}, - alias: []string{""}, + alias: []string{"value_value"}, }, { //case: ParenExpr(aggregate) in: []string{"(mean(value))"}, out: []string{"value"}, - alias: []string{""}, + alias: []string{"mean"}, }, { //case: ParenExpr(aggregate + aggregate) in: []string{"(mean(value) + max(value))"}, out: []string{"value", "value"}, - alias: []string{""}, + alias: []string{"mean_max"}, }, } { fields := influxql.Fields{} @@ -1104,14 +1242,167 @@ } got := fields.Names() if !reflect.DeepEqual(got, test.out) { - t.Errorf("get fileds name:\nexp=%v\ngot=%v\n", test.out, got) + t.Errorf("get fields name:\nexp=%v\ngot=%v\n", test.out, got) } alias := fields.AliasNames() if !reflect.DeepEqual(alias, test.alias) { - t.Errorf("get fileds alias name:\nexp=%v\ngot=%v\n", test.alias, alias) + t.Errorf("get fields alias name:\nexp=%v\ngot=%v\n", test.alias, alias) + } + } + +} + +func TestSelect_ColumnNames(t *testing.T) { + for i, tt := range []struct { + stmt *influxql.SelectStatement + columns []string + }{ + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_1_1"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_2"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "total"}, Alias: "value"}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value_1", "value", "value_2"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + }), + TimeAlias: "timestamp", + }, + columns: []string{"timestamp", "value"}, + }, + } { + columns := tt.stmt.ColumnNames() + if !reflect.DeepEqual(columns, tt.columns) { + t.Errorf("%d. expected %s, got %s", i, tt.columns, columns) } } +} + +func TestSelect_Privileges(t *testing.T) { + stmt := &influxql.SelectStatement{ + Target: &influxql.Target{ + Measurement: &influxql.Measurement{Database: "db2"}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db0"}, + &influxql.Measurement{Database: "db1"}, + }, + } + + exp := influxql.ExecutionPrivileges{ + influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, + } + + got, err := stmt.RequiredPrivileges() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(exp, got) { + t.Errorf("exp: %v, got: %v", exp, got) + } +} + +func TestSources_Names(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + &influxql.Measurement{ + Name: "mem", + }, + }) + + names := sources.Names() + if names[0] != "cpu" { + t.Errorf("expected cpu, got %s", names[0]) + } + if names[1] != "mem" { + t.Errorf("expected mem, got %s", names[1]) + } +} + +func TestSources_HasSystemSource(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "_measurements", + }, + }) + + ok := sources.HasSystemSource() + if !ok { + t.Errorf("expected to find a system source, found none") + } + + sources = influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + }) + + ok = sources.HasSystemSource() + if ok { + t.Errorf("expected to find no system source, found one") + } +} + +// Parse statements that might appear valid but should return an error. +// If allowed to execute, at least some of these statements would result in a panic. +func TestParse_Errors(t *testing.T) { + for _, tt := range []struct { + tmpl string + good string + bad string + }{ + // Second argument to derivative must be duration + {tmpl: `SELECT derivative(f, %s) FROM m`, good: "1h", bad: "true"}, + } { + good := fmt.Sprintf(tt.tmpl, tt.good) + if _, err := influxql.ParseStatement(good); err != nil { + t.Fatalf("statement %q should have parsed correctly but returned error: %s", good, err) + } + + bad := fmt.Sprintf(tt.tmpl, tt.bad) + if _, err := influxql.ParseStatement(bad); err == nil { + t.Fatalf("statement %q should have resulted in a parse error but did not", bad) + } + } } // Valuer represents a simple wrapper around a map to implement the influxql.Valuer interface. @@ -1123,6 +1414,15 @@ return } +// MustTimeRange will parse a time range. Panic on error. +func MustTimeRange(expr influxql.Expr) (min, max time.Time) { + min, max, err := influxql.TimeRange(expr) + if err != nil { + panic(err) + } + return min, max +} + // mustParseTime parses an IS0-8601 string. Panic on error. func mustParseTime(s string) time.Time { t, err := time.Parse(time.RFC3339, s) diff -Nru influxdb-0.10.0+dfsg1/influxql/call_iterator.go influxdb-1.1.1+dfsg1/influxql/call_iterator.go --- influxdb-0.10.0+dfsg1/influxql/call_iterator.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/call_iterator.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1294 @@ +package influxql + +import ( + "bytes" + "container/heap" + "fmt" + "math" + "sort" + "time" +) + +/* +This file contains iterator implementations for each function call available +in InfluxQL. Call iterators are separated into two groups: + +1. Map/reduce-style iterators - these are passed to IteratorCreator so that + processing can be at the low-level storage and aggregates are returned. + +2. Raw aggregate iterators - these require the full set of data for a window. + These are handled by the select() function and raw points are streamed in + from the low-level storage. + +There are helpers to aid in building aggregate iterators. For simple map/reduce +iterators, you can use the reduceIterator types and pass a reduce function. This +reduce function is passed a previous and current value and the new timestamp, +value, and auxilary fields are returned from it. + +For raw aggregate iterators, you can use the reduceSliceIterators which pass +in a slice of all points to the function and return a point. For more complex +iterator types, you may need to create your own iterators by hand. + +Once your iterator is complete, you'll need to add it to the NewCallIterator() +function if it is to be available to IteratorCreators and add it to the select() +function to allow it to be included during planning. +*/ + +// NewCallIterator returns a new iterator for a Call. +func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + name := opt.Expr.(*Call).Name + switch name { + case "count": + return newCountIterator(input, opt) + case "min": + return newMinIterator(input, opt) + case "max": + return newMaxIterator(input, opt) + case "sum": + return newSumIterator(input, opt) + case "first": + return newFirstIterator(input, opt) + case "last": + return newLastIterator(input, opt) + case "mean": + return newMeanIterator(input, opt) + default: + return nil, fmt.Errorf("unsupported function call: %s", name) + } +} + +// newCountIterator returns an iterator for operating on a count() call. +func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + // FIXME: Wrap iterator in int-type iterator and always output int value. + + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &floatReduceIntegerIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &stringReduceIntegerIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &booleanReduceIntegerIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported count iterator type: %T", input) + } +} + +// FloatCountReduce returns the count of points. +func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// IntegerCountReduce returns the count of points. +func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// StringCountReduce returns the count of points. +func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// BooleanCountReduce returns the count of points. +func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// newMinIterator returns an iterator for operating on a min() call. +func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMinReduce, nil) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMinReduce, nil) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMinReduce, nil) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported min iterator type: %T", input) + } +} + +// FloatMinReduce returns the minimum value between prev & curr. +func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMinReduce returns the minimum value between prev & curr. +func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMinReduce returns the minimum value between prev & curr. +func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newMaxIterator returns an iterator for operating on a max() call. +func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMaxReduce, nil) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMaxReduce, nil) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMaxReduce, nil) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported max iterator type: %T", input) + } +} + +// FloatMaxReduce returns the maximum value between prev & curr. +func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMaxReduce returns the maximum value between prev & curr. +func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMaxReduce returns the minimum value between prev & curr. +func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newSumIterator returns an iterator for operating on a sum() call. +func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported sum iterator type: %T", input) + } +} + +// FloatSumReduce returns the sum prev value & curr value. +func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// IntegerSumReduce returns the sum prev value & curr value. +func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// newFirstIterator returns an iterator for operating on a first() call. +func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatFirstReduce, nil) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerFirstReduce, nil) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringFirstReduce, nil) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanFirstReduce, nil) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported first iterator type: %T", input) + } +} + +// FloatFirstReduce returns the first point sorted by time. +func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerFirstReduce returns the first point sorted by time. +func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// StringFirstReduce returns the first point sorted by time. +func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanFirstReduce returns the first point sorted by time. +func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newLastIterator returns an iterator for operating on a last() call. +func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatLastReduce, nil) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerLastReduce, nil) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringLastReduce, nil) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanLastReduce, nil) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported last iterator type: %T", input) + } +} + +// FloatLastReduce returns the last point sorted by time. +func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerLastReduce returns the last point sorted by time. +func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// StringLastReduce returns the first point sorted by time. +func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanLastReduce returns the first point sorted by time. +func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// NewDistinctIterator returns an iterator for operating on a distinct() call. +func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDistinctReducer() + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDistinctReducer() + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringDistinctReducer() + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanDistinctReducer() + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) + } +} + +// newMeanIterator returns an iterator for operating on a mean() call. +func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMeanReducer() + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMeanReducer() + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported mean iterator type: %T", input) + } +} + +// NewMedianIterator returns an iterator for operating on a median() call. +func NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + return newMedianIterator(input, opt) +} + +// newMedianIterator returns an iterator for operating on a median() call. +func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatMedianReduceSlice returns the median value within a window. +func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(floatPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}} +} + +// IntegerMedianReduceSlice returns the median value within a window. +func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint { + if len(a) == 1 { + return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(integerPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} +} + +// newModeIterator returns an iterator for operating on a mode() call. +func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatModeReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSliceFuncReducer(StringModeReduceSlice) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatModeReduceSlice returns the mode value within a window. +func FloatModeReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + // fmt.Println(a[0]) + sort.Sort(floatPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []FloatPoint{{Time: ZeroTime, Value: mostMode}} +} + +// IntegerModeReduceSlice returns the mode value within a window. +func IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint { + if len(a) == 1 { + return a + } + sort.Sort(integerPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []IntegerPoint{{Time: ZeroTime, Value: mostMode}} +} + +// StringModeReduceSlice returns the mode value within a window. +func StringModeReduceSlice(a []StringPoint) []StringPoint { + if len(a) == 1 { + return a + } + + sort.Sort(stringPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []StringPoint{{Time: ZeroTime, Value: mostMode}} +} + +// BooleanModeReduceSlice returns the mode value within a window. +func BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint { + if len(a) == 1 { + return a + } + + trueFreq := 0 + falsFreq := 0 + mostMode := false + + for _, p := range a { + if p.Value { + trueFreq++ + } else { + falsFreq++ + } + } + // In case either of true or false are mode then retuned mode value wont be + // of metric with oldest timestamp + if trueFreq >= falsFreq { + mostMode = true + } + + return []BooleanPoint{{Time: ZeroTime, Value: mostMode}} +} + +// newStddevIterator returns an iterator for operating on a stddev() call. +func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) + } +} + +// FloatStddevReduceSlice returns the stddev value within a window. +func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + count++ + mean += (p.Value - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + variance += math.Pow(p.Value-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// IntegerStddevReduceSlice returns the stddev value within a window. +func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + count++ + mean += (float64(p.Value) - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + variance += math.Pow(float64(p.Value)-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// newSpreadIterator returns an iterator for operating on a spread() call. +func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported spread iterator type: %T", input) + } +} + +// FloatSpreadReduceSlice returns the spread value within a window. +func FloatSpreadReduceSlice(a []FloatPoint) []FloatPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + min = math.Min(min, p.Value) + max = math.Max(max, p.Value) + } + return []FloatPoint{{Time: ZeroTime, Value: max - min}} +} + +// IntegerSpreadReduceSlice returns the spread value within a window. +func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + if p.Value < min { + min = p.Value + } + if p.Value > max { + max = p.Value + } + } + return []IntegerPoint{{Time: ZeroTime, Value: max - min}} +} + +func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + aggregateFn := NewFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(aggregateFn) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(aggregateFn) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported top iterator type: %T", input) + } +} + +// NewFloatTopReduceSliceFunc returns the top values within a window. +func NewFloatTopReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool { + return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring higher values and breaking ties + // based on the earliest time for a point. + h := floatPointsSortBy(a, func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]FloatPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(FloatPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(floatPointsByTime(points)) + } + return points + } +} + +// NewIntegerTopReduceSliceFunc returns the top values within a window. +func NewIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool { + return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring higher values and breaking ties + // based on the earliest time for a point. + h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]IntegerPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(IntegerPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(integerPointsByTime(points)) + } + return points + } +} + +func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + aggregateFn := NewFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(aggregateFn) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(aggregateFn) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) + } +} + +// NewFloatBottomReduceSliceFunc returns the bottom values within a window. +func NewFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool { + return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring lower values and breaking ties + // based on the earliest time for a point. + h := floatPointsSortBy(a, func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]FloatPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(FloatPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(floatPointsByTime(points)) + } + return points + } +} + +// NewIntegerBottomReduceSliceFunc returns the bottom values within a window. +func NewIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool { + return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring lower values and breaking ties + // based on the earliest time for a point. + h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]IntegerPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(IntegerPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(integerPointsByTime(points)) + } + return points + } +} + +func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint { + pointMap := make(map[string]FloatPoint) + for _, p := range a { + keyBuf := bytes.NewBuffer(nil) + for i, index := range tags { + if i > 0 { + keyBuf.WriteString(",") + } + fmt.Fprintf(keyBuf, "%s", p.Aux[index]) + } + key := keyBuf.String() + + cur, ok := pointMap[key] + if ok { + if cmpFunc(&cur, &p) { + pointMap[key] = p + } + } else { + pointMap[key] = p + } + } + + // Recreate the original array with our new filtered list. + points := make([]FloatPoint, 0, len(pointMap)) + for _, p := range pointMap { + points = append(points, p) + } + return points +} + +func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint { + pointMap := make(map[string]IntegerPoint) + for _, p := range a { + keyBuf := bytes.NewBuffer(nil) + for i, index := range tags { + if i > 0 { + keyBuf.WriteString(",") + } + fmt.Fprintf(keyBuf, "%s", p.Aux[index]) + } + key := keyBuf.String() + + cur, ok := pointMap[key] + if ok { + if cmpFunc(&cur, &p) { + pointMap[key] = p + } + } else { + pointMap[key] = p + } + } + + // Recreate the original array with our new filtered list. + points := make([]IntegerPoint, 0, len(pointMap)) + for _, p := range pointMap { + points = append(points, p) + } + return points +} + +// newPercentileIterator returns an iterator for operating on a percentile() call. +func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) + } +} + +// NewFloatPercentileReduceSliceFunc returns the percentile value within a window. +func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(floatPointsByValue(a)) + return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: a[i].Aux}} + } +} + +// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window. +func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(integerPointsByValue(a)) + return []IntegerPoint{{Time: ZeroTime, Value: a[i].Value, Aux: a[i].Aux}} + } +} + +// newDerivativeIterator returns an iterator for operating on a derivative() call. +func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) + } +} + +// newDifferenceIterator returns an iterator for operating on a difference() call. +func newDifferenceIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDifferenceReducer() + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDifferenceReducer() + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported difference iterator type: %T", input) + } +} + +// newElapsedIterator returns an iterator for operating on a elapsed() call. +func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatElapsedReducer(interval) + return fn, fn + } + return newFloatStreamIntegerIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerElapsedReducer(interval) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanElapsedReducer(interval) + return fn, fn + } + return newBooleanStreamIntegerIterator(input, createFn, opt), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringElapsedReducer(interval) + return fn, fn + } + return newStringStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// newMovingAverageIterator returns an iterator for operating on a moving_average() call. +func newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMovingAverageReducer(n) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMovingAverageReducer(n) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported moving average iterator type: %T", input) + } +} + +// newCumulativeSumIterator returns an iterator for operating on a cumulative_sum() call. +func newCumulativeSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatCumulativeSumReducer() + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerCumulativeSumReducer() + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported cumulative sum iterator type: %T", input) + } +} + +// newHoltWintersIterator returns an iterator for operating on a elapsed() call. +func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// NewSampleIterator returns an iterator +func NewSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + return newSampleIterator(input, opt, size) +} + +// newSampleIterator returns an iterator +func newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSampleReducer(size) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSampleReducer(size) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSampleReducer(size) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSampleReducer(size) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/call_iterator_test.go influxdb-1.1.1+dfsg1/influxql/call_iterator_test.go --- influxdb-0.10.0+dfsg1/influxql/call_iterator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/call_iterator_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,944 @@ +package influxql_test + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a float iterator can be created for a count() call. +func TestCallIterator_Count_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a count() call. +func TestCallIterator_Count_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a count() call. +func TestCallIterator_Count_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a count() call. +func TestCallIterator_Count_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a min() call. +func TestCallIterator_Min_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a min() call. +func TestCallIterator_Min_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a min() call. +func TestCallIterator_Min_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a max() call. +func TestCallIterator_Max_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a max() call. +func TestCallIterator_Max_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a max() call. +func TestCallIterator_Max_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a sum() call. +func TestCallIterator_Sum_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a sum() call. +func TestCallIterator_Sum_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a first() call. +func TestCallIterator_First_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a first() call. +func TestCallIterator_First_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a first() call. +func TestCallIterator_First_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a first() call. +func TestCallIterator_First_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a last() call. +func TestCallIterator_Last_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a last() call. +func TestCallIterator_Last_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a last() call. +func TestCallIterator_Last_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a last() call. +func TestCallIterator_Last_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a mode() call. +func TestCallIterator_Mode_Float(t *testing.T) { + itr, _ := influxql.NewModeIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a mode() call. +func TestCallIterator_Mode_Integer(t *testing.T) { + itr, _ := influxql.NewModeIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, + {&influxql.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, + {&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a mode() call. +func TestCallIterator_Mode_String(t *testing.T) { + itr, _ := influxql.NewModeIterator(&StringIterator{Points: []influxql.StringPoint{ + {Time: 0, Value: "15", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "11", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 1, Value: "10", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: "20", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 22, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: "25", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 0, Value: "10", Tags: ParseTags("host=hostA")}}, + {&influxql.StringPoint{Time: 1, Value: "11", Tags: ParseTags("host=hostB")}}, + {&influxql.StringPoint{Time: 5, Value: "21", Tags: ParseTags("host=hostA")}}, + {&influxql.StringPoint{Time: 20, Value: "8", Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a modBooleanl. +func TestCallIterator_Mode_Boolean(t *testing.T) { + itr, _ := influxql.NewModeIterator(&BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}}, + {&influxql.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestNewCallIterator_UnsupportedExprName(t *testing.T) { + _, err := influxql.NewCallIterator( + &FloatIterator{}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`foobar("value")`), + }, + ) + + if err == nil || err.Error() != "unsupported function call: foobar" { + t.Errorf("unexpected error: %s", err) + } +} + +func BenchmarkCountIterator_1K(b *testing.B) { benchmarkCountIterator(b, 1000) } +func BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) } +func BenchmarkCountIterator_1M(b *testing.B) { benchmarkCountIterator(b, 1000000) } + +func benchmarkCountIterator(b *testing.B, pointN int) { + benchmarkCallIterator(b, influxql.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, pointN) +} + +func benchmarkCallIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu", Value: 100} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { return &p }, + } + + // Execute call against input. + itr, err := influxql.NewCallIterator(&input, opt) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkSampleIterator_1k(b *testing.B) { benchmarkSampleIterator(b, 1000) } +func BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) } +func BenchmarkSampleIterator_1M(b *testing.B) { benchmarkSampleIterator(b, 1000000) } + +func benchmarkSampleIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(i) + return &p + }, + } + + for i := 0; i < b.N; i++ { + // Execute call against input. + itr, err := influxql.NewSampleIterator(&input, influxql.IteratorOptions{}, 100) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkDistinctIterator_1K(b *testing.B) { benchmarkDistinctIterator(b, 1000) } +func BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) } +func BenchmarkDistinctIterator_1M(b *testing.B) { benchmarkDistinctIterator(b, 1000000) } + +func benchmarkDistinctIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(i % 10) + return &p + }, + } + + // Execute call against input. + itr, err := influxql.NewDistinctIterator(&input, influxql.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkModeIterator_1K(b *testing.B) { benchmarkModeIterator(b, 1000) } +func BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) } +func BenchmarkModeIterator_1M(b *testing.B) { benchmarkModeIterator(b, 1000000) } + +func benchmarkModeIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(10) + return &p + }, + } + + // Execute call against input. + itr, err := influxql.NewModeIterator(&input, influxql.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +type FloatPointGenerator struct { + i int + N int + Fn func(i int) *influxql.FloatPoint +} + +func (g *FloatPointGenerator) Close() error { return nil } +func (g *FloatPointGenerator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} } + +func (g *FloatPointGenerator) Next() (*influxql.FloatPoint, error) { + if g.i == g.N { + return nil, nil + } + p := g.Fn(g.i) + g.i++ + return p, nil +} diff -Nru influxdb-0.10.0+dfsg1/influxql/cast.go influxdb-1.1.1+dfsg1/influxql/cast.go --- influxdb-0.10.0+dfsg1/influxql/cast.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/cast.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,41 @@ +package influxql + +func castToFloat(v interface{}) float64 { + switch v := v.(type) { + case float64: + return v + case int64: + return float64(v) + default: + return float64(0) + } +} + +func castToInteger(v interface{}) int64 { + switch v := v.(type) { + case float64: + return int64(v) + case int64: + return v + default: + return int64(0) + } +} + +func castToString(v interface{}) string { + switch v := v.(type) { + case string: + return v + default: + return "" + } +} + +func castToBoolean(v interface{}) bool { + switch v := v.(type) { + case bool: + return v + default: + return false + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/doc.go influxdb-1.1.1+dfsg1/influxql/doc.go --- influxdb-0.10.0+dfsg1/influxql/doc.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/doc.go 2016-12-06 21:36:15.000000000 +0000 @@ -16,11 +16,6 @@ SELECT value FROM cpu_load WHERE host = 'influxdb.com' -Two or more series can be combined into a single query and executed together: - - SELECT cpu0.value + cpu1.value - FROM cpu_load AS cpu0 INNER JOIN cpu_load cpu1 ON cpu0.host = cpu1.host - Limits and ordering can be set on selection queries as well: SELECT value FROM cpu_load LIMIT 100 ORDER DESC; diff -Nru influxdb-0.10.0+dfsg1/influxql/emitter.go influxdb-1.1.1+dfsg1/influxql/emitter.go --- influxdb-0.10.0+dfsg1/influxql/emitter.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/emitter.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,215 @@ +package influxql + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/models" +) + +// Emitter groups values together by name, +type Emitter struct { + buf []Point + itrs []Iterator + ascending bool + chunkSize int + + tags Tags + row *models.Row + + // The columns to attach to each row. + Columns []string + + // Removes the "time" column from output. + // Used for meta queries where time does not apply. + OmitTime bool +} + +// NewEmitter returns a new instance of Emitter that pulls from itrs. +func NewEmitter(itrs []Iterator, ascending bool, chunkSize int) *Emitter { + return &Emitter{ + buf: make([]Point, len(itrs)), + itrs: itrs, + ascending: ascending, + chunkSize: chunkSize, + } +} + +// Close closes the underlying iterators. +func (e *Emitter) Close() error { + return Iterators(e.itrs).Close() +} + +// Emit returns the next row from the iterators. +func (e *Emitter) Emit() (*models.Row, error) { + // Immediately end emission if there are no iterators. + if len(e.itrs) == 0 { + return nil, nil + } + + // Continually read from iterators until they are exhausted. + for { + // Fill buffer. Return row if no more points remain. + t, name, tags, err := e.loadBuf() + if err != nil { + return nil, err + } else if t == ZeroTime { + row := e.row + e.row = nil + return row, nil + } + + // Read next set of values from all iterators at a given time/name/tags. + // If no values are returned then return row. + values := e.readAt(t, name, tags) + if values == nil { + row := e.row + e.row = nil + return row, nil + } + + // If there's no row yet then create one. + // If the name and tags match the existing row, append to that row if + // the number of values doesn't exceed the chunk size. + // Otherwise return existing row and add values to next emitted row. + if e.row == nil { + e.createRow(name, tags, values) + } else if e.row.Name == name && e.tags.Equals(&tags) && (e.chunkSize <= 0 || len(e.row.Values) < e.chunkSize) { + e.row.Values = append(e.row.Values, values) + } else { + row := e.row + e.createRow(name, tags, values) + return row, nil + } + } +} + +// loadBuf reads in points into empty buffer slots. +// Returns the next time/name/tags to emit for. +func (e *Emitter) loadBuf() (t int64, name string, tags Tags, err error) { + t = ZeroTime + + for i := range e.itrs { + // Load buffer, if empty. + if e.buf[i] == nil { + e.buf[i], err = e.readIterator(e.itrs[i]) + if err != nil { + break + } + } + + // Skip if buffer is empty. + p := e.buf[i] + if p == nil { + continue + } + itrTime, itrName, itrTags := p.time(), p.name(), p.tags() + + // Initialize range values if not set. + if t == ZeroTime { + t, name, tags = itrTime, itrName, itrTags + continue + } + + // Update range values if lower and emitter is in time ascending order. + if e.ascending { + if (itrName < name) || (itrName == name && itrTags.ID() < tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime < t) { + t, name, tags = itrTime, itrName, itrTags + } + continue + } + + // Update range values if higher and emitter is in time descending order. + if (itrName < name) || (itrName == name && itrTags.ID() < tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime < t) { + t, name, tags = itrTime, itrName, itrTags + } + } + + return +} + +// createRow creates a new row attached to the emitter. +func (e *Emitter) createRow(name string, tags Tags, values []interface{}) { + e.tags = tags + e.row = &models.Row{ + Name: name, + Tags: tags.KeyValues(), + Columns: e.Columns, + Values: [][]interface{}{values}, + } +} + +// readAt returns the next slice of values from the iterators at time/name/tags. +// Returns nil values once the iterators are exhausted. +func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { + // If time is included then move colums over by one. + offset := 1 + if e.OmitTime { + offset = 0 + } + + values := make([]interface{}, len(e.itrs)+offset) + if !e.OmitTime { + values[0] = time.Unix(0, t).UTC() + } + + for i, p := range e.buf { + // Skip if buffer is empty. + if p == nil { + values[i+offset] = nil + continue + } + + // Skip point if it doesn't match time/name/tags. + pTags := p.tags() + if p.time() != t || p.name() != name || !pTags.Equals(&tags) { + values[i+offset] = nil + continue + } + + // Read point value. + values[i+offset] = p.value() + + // Clear buffer. + e.buf[i] = nil + } + + return values +} + +// readIterator reads the next point from itr. +func (e *Emitter) readIterator(itr Iterator) (Point, error) { + if itr == nil { + return nil, nil + } + + switch itr := itr.(type) { + case FloatIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case IntegerIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case StringIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case BooleanIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + default: + panic(fmt.Sprintf("unsupported iterator: %T", itr)) + } + return nil, nil +} diff -Nru influxdb-0.10.0+dfsg1/influxql/emitter_test.go influxdb-1.1.1+dfsg1/influxql/emitter_test.go --- influxdb-0.10.0+dfsg1/influxql/emitter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/emitter_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,124 @@ +package influxql_test + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure the emitter can group iterators together into rows. +func TestEmitter_Emit(t *testing.T) { + // Build an emitter that pulls from two iterators. + e := influxql.NewEmitter([]influxql.Iterator{ + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=north"), Time: 0, Value: 4}, + {Name: "mem", Time: 4, Value: 5}, + }}, + }, true, 0) + e.Columns = []string{"col1", "col2"} + + // Verify the cpu region=west is emitted first. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(0): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(1), nil}, + {time.Unix(0, 1).UTC(), float64(2), float64(4)}, + }, + }) { + t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) + } + + // Verify the cpu region=north is emitted next. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(1): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "north"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), nil, float64(4)}, + }, + }) { + t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) + } + + // Verify the mem series is emitted last. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(2): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "mem", + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 4).UTC(), nil, float64(5)}, + }, + }) { + t.Fatalf("unexpected row(2): %s", spew.Sdump(row)) + } + + // Verify EOF. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(eof): %s", err) + } else if row != nil { + t.Fatalf("unexpected eof: %s", spew.Sdump(row)) + } +} + +// Ensure the emitter will limit the chunked output from a series. +func TestEmitter_ChunkSize(t *testing.T) { + // Build an emitter that pulls from one iterator with multiple points in the same series. + e := influxql.NewEmitter([]influxql.Iterator{ + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, + }}, + }, true, 1) + e.Columns = []string{"col1"} + + // Verify the cpu region=west is emitted first. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(0): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(1)}, + }, + }) { + t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) + } + + // Verify the cpu region=north is emitted next. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(1): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1"}, + Values: [][]interface{}{ + {time.Unix(0, 1).UTC(), float64(2)}, + }, + }) { + t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) + } + + // Verify EOF. + if row, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(eof): %s", err) + } else if row != nil { + t.Fatalf("unexpected eof: %s", spew.Sdump(row)) + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/functions.gen.go influxdb-1.1.1+dfsg1/influxql/functions.gen.go --- influxdb-0.10.0+dfsg1/influxql/functions.gen.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/functions.gen.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1669 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: functions.gen.go.tmpl + +package influxql + +import ( + "math/rand" + "sort" + "time" +) + +// FloatPointAggregator aggregates points to produce a single point. +type FloatPointAggregator interface { + AggregateFloat(p *FloatPoint) +} + +// FloatBulkPointAggregator aggregates multiple points at a time. +type FloatBulkPointAggregator interface { + AggregateFloatBulk(points []FloatPoint) +} + +// AggregateFloatPoints feeds a slice of FloatPoint into an +// aggregator. If the aggregator is a FloatBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateFloatPoints(a FloatPointAggregator, points []FloatPoint) { + switch a := a.(type) { + case FloatBulkPointAggregator: + a.AggregateFloatBulk(points) + default: + for _, p := range points { + a.AggregateFloat(&p) + } + } +} + +// FloatPointEmitter produces a single point from an aggregate. +type FloatPointEmitter interface { + Emit() []FloatPoint +} + +// FloatReduceFunc is the function called by a FloatPoint reducer. +type FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{}) + +// FloatFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncReducer struct { + prev *FloatPoint + fn FloatReduceFunc +} + +// NewFloatFuncReducer creates a new FloatFuncFloatReducer. +func NewFloatFuncReducer(fn FloatReduceFunc, prev *FloatPoint) *FloatFuncReducer { + return &FloatFuncReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// FloatReduceSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint + +// FloatSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncReducer struct { + points []FloatPoint + fn FloatReduceSliceFunc +} + +// NewFloatSliceFuncReducer creates a new FloatSliceFuncReducer. +func NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer { + return &FloatSliceFuncReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// FloatReduceIntegerFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{}) + +// FloatFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncIntegerReducer struct { + prev *IntegerPoint + fn FloatReduceIntegerFunc +} + +// NewFloatFuncIntegerReducer creates a new FloatFuncIntegerReducer. +func NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc, prev *IntegerPoint) *FloatFuncIntegerReducer { + return &FloatFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint + +// FloatSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncIntegerReducer struct { + points []FloatPoint + fn FloatReduceIntegerSliceFunc +} + +// NewFloatSliceFuncIntegerReducer creates a new FloatSliceFuncIntegerReducer. +func NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer { + return &FloatSliceFuncIntegerReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// FloatReduceStringFunc is the function called by a FloatPoint reducer. +type FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{}) + +// FloatFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncStringReducer struct { + prev *StringPoint + fn FloatReduceStringFunc +} + +// NewFloatFuncStringReducer creates a new FloatFuncStringReducer. +func NewFloatFuncStringReducer(fn FloatReduceStringFunc, prev *StringPoint) *FloatFuncStringReducer { + return &FloatFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint + +// FloatSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncStringReducer struct { + points []FloatPoint + fn FloatReduceStringSliceFunc +} + +// NewFloatSliceFuncStringReducer creates a new FloatSliceFuncStringReducer. +func NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer { + return &FloatSliceFuncStringReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// FloatReduceBooleanFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{}) + +// FloatFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncBooleanReducer struct { + prev *BooleanPoint + fn FloatReduceBooleanFunc +} + +// NewFloatFuncBooleanReducer creates a new FloatFuncBooleanReducer. +func NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc, prev *BooleanPoint) *FloatFuncBooleanReducer { + return &FloatFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint + +// FloatSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncBooleanReducer struct { + points []FloatPoint + fn FloatReduceBooleanSliceFunc +} + +// NewFloatSliceFuncBooleanReducer creates a new FloatSliceFuncBooleanReducer. +func NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer { + return &FloatSliceFuncBooleanReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// FloatDistinctReducer returns the distinct points in a series. +type FloatDistinctReducer struct { + m map[float64]FloatPoint +} + +// NewFloatDistinctReducer creates a new FloatDistinctReducer. +func NewFloatDistinctReducer() *FloatDistinctReducer { + return &FloatDistinctReducer{m: make(map[float64]FloatPoint)} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *FloatDistinctReducer) Emit() []FloatPoint { + points := make([]FloatPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(floatPoints(points)) + return points +} + +// FloatElapsedReducer calculates the elapsed of the aggregated points. +type FloatElapsedReducer struct { + unitConversion int64 + prev FloatPoint + curr FloatPoint +} + +// NewFloatElapsedReducer creates a new FloatElapsedReducer. +func NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer { + return &FloatElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *FloatElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// FloatSampleReduces implements a reservoir sampling to calculate a random subset of points +type FloatSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points floatPoints // the reservoir +} + +// NewFloatSampleReducer creates a new FloatSampleReducer +func NewFloatSampleReducer(size int) *FloatSampleReducer { + return &FloatSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(floatPoints, size), + } +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + r.points[r.count-1] = *p + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := rand.Intn(r.count) + if rnd < len(r.points) { + r.points[rnd] = *p + } +} + +// Emit emits the reservoir sample as many points. +func (r *FloatSampleReducer) Emit() []FloatPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// IntegerPointAggregator aggregates points to produce a single point. +type IntegerPointAggregator interface { + AggregateInteger(p *IntegerPoint) +} + +// IntegerBulkPointAggregator aggregates multiple points at a time. +type IntegerBulkPointAggregator interface { + AggregateIntegerBulk(points []IntegerPoint) +} + +// AggregateIntegerPoints feeds a slice of IntegerPoint into an +// aggregator. If the aggregator is a IntegerBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateIntegerPoints(a IntegerPointAggregator, points []IntegerPoint) { + switch a := a.(type) { + case IntegerBulkPointAggregator: + a.AggregateIntegerBulk(points) + default: + for _, p := range points { + a.AggregateInteger(&p) + } + } +} + +// IntegerPointEmitter produces a single point from an aggregate. +type IntegerPointEmitter interface { + Emit() []IntegerPoint +} + +// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{}) + +// IntegerFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncFloatReducer struct { + prev *FloatPoint + fn IntegerReduceFloatFunc +} + +// NewIntegerFuncFloatReducer creates a new IntegerFuncFloatReducer. +func NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc, prev *FloatPoint) *IntegerFuncFloatReducer { + return &IntegerFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint + +// IntegerSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncFloatReducer struct { + points []IntegerPoint + fn IntegerReduceFloatSliceFunc +} + +// NewIntegerSliceFuncFloatReducer creates a new IntegerSliceFuncFloatReducer. +func NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer { + return &IntegerSliceFuncFloatReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// IntegerReduceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{}) + +// IntegerFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncReducer struct { + prev *IntegerPoint + fn IntegerReduceFunc +} + +// NewIntegerFuncReducer creates a new IntegerFuncIntegerReducer. +func NewIntegerFuncReducer(fn IntegerReduceFunc, prev *IntegerPoint) *IntegerFuncReducer { + return &IntegerFuncReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint + +// IntegerSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncReducer struct { + points []IntegerPoint + fn IntegerReduceSliceFunc +} + +// NewIntegerSliceFuncReducer creates a new IntegerSliceFuncReducer. +func NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer { + return &IntegerSliceFuncReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// IntegerReduceStringFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{}) + +// IntegerFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncStringReducer struct { + prev *StringPoint + fn IntegerReduceStringFunc +} + +// NewIntegerFuncStringReducer creates a new IntegerFuncStringReducer. +func NewIntegerFuncStringReducer(fn IntegerReduceStringFunc, prev *StringPoint) *IntegerFuncStringReducer { + return &IntegerFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint + +// IntegerSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncStringReducer struct { + points []IntegerPoint + fn IntegerReduceStringSliceFunc +} + +// NewIntegerSliceFuncStringReducer creates a new IntegerSliceFuncStringReducer. +func NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer { + return &IntegerSliceFuncStringReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{}) + +// IntegerFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncBooleanReducer struct { + prev *BooleanPoint + fn IntegerReduceBooleanFunc +} + +// NewIntegerFuncBooleanReducer creates a new IntegerFuncBooleanReducer. +func NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc, prev *BooleanPoint) *IntegerFuncBooleanReducer { + return &IntegerFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint + +// IntegerSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncBooleanReducer struct { + points []IntegerPoint + fn IntegerReduceBooleanSliceFunc +} + +// NewIntegerSliceFuncBooleanReducer creates a new IntegerSliceFuncBooleanReducer. +func NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer { + return &IntegerSliceFuncBooleanReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// IntegerDistinctReducer returns the distinct points in a series. +type IntegerDistinctReducer struct { + m map[int64]IntegerPoint +} + +// NewIntegerDistinctReducer creates a new IntegerDistinctReducer. +func NewIntegerDistinctReducer() *IntegerDistinctReducer { + return &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *IntegerDistinctReducer) Emit() []IntegerPoint { + points := make([]IntegerPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(integerPoints(points)) + return points +} + +// IntegerElapsedReducer calculates the elapsed of the aggregated points. +type IntegerElapsedReducer struct { + unitConversion int64 + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerElapsedReducer creates a new IntegerElapsedReducer. +func NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer { + return &IntegerElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *IntegerElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// IntegerSampleReduces implements a reservoir sampling to calculate a random subset of points +type IntegerSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points integerPoints // the reservoir +} + +// NewIntegerSampleReducer creates a new IntegerSampleReducer +func NewIntegerSampleReducer(size int) *IntegerSampleReducer { + return &IntegerSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(integerPoints, size), + } +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + r.points[r.count-1] = *p + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := rand.Intn(r.count) + if rnd < len(r.points) { + r.points[rnd] = *p + } +} + +// Emit emits the reservoir sample as many points. +func (r *IntegerSampleReducer) Emit() []IntegerPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// StringPointAggregator aggregates points to produce a single point. +type StringPointAggregator interface { + AggregateString(p *StringPoint) +} + +// StringBulkPointAggregator aggregates multiple points at a time. +type StringBulkPointAggregator interface { + AggregateStringBulk(points []StringPoint) +} + +// AggregateStringPoints feeds a slice of StringPoint into an +// aggregator. If the aggregator is a StringBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateStringPoints(a StringPointAggregator, points []StringPoint) { + switch a := a.(type) { + case StringBulkPointAggregator: + a.AggregateStringBulk(points) + default: + for _, p := range points { + a.AggregateString(&p) + } + } +} + +// StringPointEmitter produces a single point from an aggregate. +type StringPointEmitter interface { + Emit() []StringPoint +} + +// StringReduceFloatFunc is the function called by a StringPoint reducer. +type StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{}) + +// StringFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncFloatReducer struct { + prev *FloatPoint + fn StringReduceFloatFunc +} + +// NewStringFuncFloatReducer creates a new StringFuncFloatReducer. +func NewStringFuncFloatReducer(fn StringReduceFloatFunc, prev *FloatPoint) *StringFuncFloatReducer { + return &StringFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncFloatReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// StringReduceFloatSliceFunc is the function called by a StringPoint reducer. +type StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint + +// StringSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncFloatReducer struct { + points []StringPoint + fn StringReduceFloatSliceFunc +} + +// NewStringSliceFuncFloatReducer creates a new StringSliceFuncFloatReducer. +func NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer { + return &StringSliceFuncFloatReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// StringReduceIntegerFunc is the function called by a StringPoint reducer. +type StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{}) + +// StringFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncIntegerReducer struct { + prev *IntegerPoint + fn StringReduceIntegerFunc +} + +// NewStringFuncIntegerReducer creates a new StringFuncIntegerReducer. +func NewStringFuncIntegerReducer(fn StringReduceIntegerFunc, prev *IntegerPoint) *StringFuncIntegerReducer { + return &StringFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer. +type StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint + +// StringSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncIntegerReducer struct { + points []StringPoint + fn StringReduceIntegerSliceFunc +} + +// NewStringSliceFuncIntegerReducer creates a new StringSliceFuncIntegerReducer. +func NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer { + return &StringSliceFuncIntegerReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// StringReduceFunc is the function called by a StringPoint reducer. +type StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{}) + +// StringFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncReducer struct { + prev *StringPoint + fn StringReduceFunc +} + +// NewStringFuncReducer creates a new StringFuncStringReducer. +func NewStringFuncReducer(fn StringReduceFunc, prev *StringPoint) *StringFuncReducer { + return &StringFuncReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// StringReduceSliceFunc is the function called by a StringPoint reducer. +type StringReduceSliceFunc func(a []StringPoint) []StringPoint + +// StringSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncReducer struct { + points []StringPoint + fn StringReduceSliceFunc +} + +// NewStringSliceFuncReducer creates a new StringSliceFuncReducer. +func NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer { + return &StringSliceFuncReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// StringReduceBooleanFunc is the function called by a StringPoint reducer. +type StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{}) + +// StringFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncBooleanReducer struct { + prev *BooleanPoint + fn StringReduceBooleanFunc +} + +// NewStringFuncBooleanReducer creates a new StringFuncBooleanReducer. +func NewStringFuncBooleanReducer(fn StringReduceBooleanFunc, prev *BooleanPoint) *StringFuncBooleanReducer { + return &StringFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer. +type StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint + +// StringSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncBooleanReducer struct { + points []StringPoint + fn StringReduceBooleanSliceFunc +} + +// NewStringSliceFuncBooleanReducer creates a new StringSliceFuncBooleanReducer. +func NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer { + return &StringSliceFuncBooleanReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// StringDistinctReducer returns the distinct points in a series. +type StringDistinctReducer struct { + m map[string]StringPoint +} + +// NewStringDistinctReducer creates a new StringDistinctReducer. +func NewStringDistinctReducer() *StringDistinctReducer { + return &StringDistinctReducer{m: make(map[string]StringPoint)} +} + +// AggregateString aggregates a point into the reducer. +func (r *StringDistinctReducer) AggregateString(p *StringPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *StringDistinctReducer) Emit() []StringPoint { + points := make([]StringPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, StringPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(stringPoints(points)) + return points +} + +// StringElapsedReducer calculates the elapsed of the aggregated points. +type StringElapsedReducer struct { + unitConversion int64 + prev StringPoint + curr StringPoint +} + +// NewStringElapsedReducer creates a new StringElapsedReducer. +func NewStringElapsedReducer(interval Interval) *StringElapsedReducer { + return &StringElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: StringPoint{Nil: true}, + curr: StringPoint{Nil: true}, + } +} + +// AggregateString aggregates a point into the reducer and updates the current window. +func (r *StringElapsedReducer) AggregateString(p *StringPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *StringElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// StringSampleReduces implements a reservoir sampling to calculate a random subset of points +type StringSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points stringPoints // the reservoir +} + +// NewStringSampleReducer creates a new StringSampleReducer +func NewStringSampleReducer(size int) *StringSampleReducer { + return &StringSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(stringPoints, size), + } +} + +// AggregateString aggregates a point into the reducer. +func (r *StringSampleReducer) AggregateString(p *StringPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + r.points[r.count-1] = *p + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := rand.Intn(r.count) + if rnd < len(r.points) { + r.points[rnd] = *p + } +} + +// Emit emits the reservoir sample as many points. +func (r *StringSampleReducer) Emit() []StringPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// BooleanPointAggregator aggregates points to produce a single point. +type BooleanPointAggregator interface { + AggregateBoolean(p *BooleanPoint) +} + +// BooleanBulkPointAggregator aggregates multiple points at a time. +type BooleanBulkPointAggregator interface { + AggregateBooleanBulk(points []BooleanPoint) +} + +// AggregateBooleanPoints feeds a slice of BooleanPoint into an +// aggregator. If the aggregator is a BooleanBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateBooleanPoints(a BooleanPointAggregator, points []BooleanPoint) { + switch a := a.(type) { + case BooleanBulkPointAggregator: + a.AggregateBooleanBulk(points) + default: + for _, p := range points { + a.AggregateBoolean(&p) + } + } +} + +// BooleanPointEmitter produces a single point from an aggregate. +type BooleanPointEmitter interface { + Emit() []BooleanPoint +} + +// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{}) + +// BooleanFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncFloatReducer struct { + prev *FloatPoint + fn BooleanReduceFloatFunc +} + +// NewBooleanFuncFloatReducer creates a new BooleanFuncFloatReducer. +func NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc, prev *FloatPoint) *BooleanFuncFloatReducer { + return &BooleanFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint + +// BooleanSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncFloatReducer struct { + points []BooleanPoint + fn BooleanReduceFloatSliceFunc +} + +// NewBooleanSliceFuncFloatReducer creates a new BooleanSliceFuncFloatReducer. +func NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer { + return &BooleanSliceFuncFloatReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{}) + +// BooleanFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncIntegerReducer struct { + prev *IntegerPoint + fn BooleanReduceIntegerFunc +} + +// NewBooleanFuncIntegerReducer creates a new BooleanFuncIntegerReducer. +func NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc, prev *IntegerPoint) *BooleanFuncIntegerReducer { + return &BooleanFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint + +// BooleanSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncIntegerReducer struct { + points []BooleanPoint + fn BooleanReduceIntegerSliceFunc +} + +// NewBooleanSliceFuncIntegerReducer creates a new BooleanSliceFuncIntegerReducer. +func NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer { + return &BooleanSliceFuncIntegerReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// BooleanReduceStringFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{}) + +// BooleanFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncStringReducer struct { + prev *StringPoint + fn BooleanReduceStringFunc +} + +// NewBooleanFuncStringReducer creates a new BooleanFuncStringReducer. +func NewBooleanFuncStringReducer(fn BooleanReduceStringFunc, prev *StringPoint) *BooleanFuncStringReducer { + return &BooleanFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint + +// BooleanSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncStringReducer struct { + points []BooleanPoint + fn BooleanReduceStringSliceFunc +} + +// NewBooleanSliceFuncStringReducer creates a new BooleanSliceFuncStringReducer. +func NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer { + return &BooleanSliceFuncStringReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// BooleanReduceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{}) + +// BooleanFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncReducer struct { + prev *BooleanPoint + fn BooleanReduceFunc +} + +// NewBooleanFuncReducer creates a new BooleanFuncBooleanReducer. +func NewBooleanFuncReducer(fn BooleanReduceFunc, prev *BooleanPoint) *BooleanFuncReducer { + return &BooleanFuncReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint + +// BooleanSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncReducer struct { + points []BooleanPoint + fn BooleanReduceSliceFunc +} + +// NewBooleanSliceFuncReducer creates a new BooleanSliceFuncReducer. +func NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer { + return &BooleanSliceFuncReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// BooleanDistinctReducer returns the distinct points in a series. +type BooleanDistinctReducer struct { + m map[bool]BooleanPoint +} + +// NewBooleanDistinctReducer creates a new BooleanDistinctReducer. +func NewBooleanDistinctReducer() *BooleanDistinctReducer { + return &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)} +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *BooleanDistinctReducer) Emit() []BooleanPoint { + points := make([]BooleanPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(booleanPoints(points)) + return points +} + +// BooleanElapsedReducer calculates the elapsed of the aggregated points. +type BooleanElapsedReducer struct { + unitConversion int64 + prev BooleanPoint + curr BooleanPoint +} + +// NewBooleanElapsedReducer creates a new BooleanElapsedReducer. +func NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer { + return &BooleanElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: BooleanPoint{Nil: true}, + curr: BooleanPoint{Nil: true}, + } +} + +// AggregateBoolean aggregates a point into the reducer and updates the current window. +func (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *BooleanElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// BooleanSampleReduces implements a reservoir sampling to calculate a random subset of points +type BooleanSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points booleanPoints // the reservoir +} + +// NewBooleanSampleReducer creates a new BooleanSampleReducer +func NewBooleanSampleReducer(size int) *BooleanSampleReducer { + return &BooleanSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(booleanPoints, size), + } +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + r.points[r.count-1] = *p + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := rand.Intn(r.count) + if rnd < len(r.points) { + r.points[rnd] = *p + } +} + +// Emit emits the reservoir sample as many points. +func (r *BooleanSampleReducer) Emit() []BooleanPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} diff -Nru influxdb-0.10.0+dfsg1/influxql/functions.gen.go.tmpl influxdb-1.1.1+dfsg1/influxql/functions.gen.go.tmpl --- influxdb-0.10.0+dfsg1/influxql/functions.gen.go.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/functions.gen.go.tmpl 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,219 @@ +package influxql + +import ( +"sort" +"time" +"math/rand" +) + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}PointAggregator aggregates points to produce a single point. +type {{$k.Name}}PointAggregator interface { + Aggregate{{$k.Name}}(p *{{$k.Name}}Point) +} + +// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time. +type {{$k.Name}}BulkPointAggregator interface { + Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) +} + +// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an +// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will +// use the AggregateBulk method. +func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) { + switch a := a.(type) { + case {{$k.Name}}BulkPointAggregator: + a.Aggregate{{$k.Name}}Bulk(points) + default: + for _, p := range points { + a.Aggregate{{$k.Name}}(&p) + } + } +} + +// {{$k.Name}}PointEmitter produces a single point from an aggregate. +type {{$k.Name}}PointEmitter interface { + Emit() []{{$k.Name}}Point +} + +{{range $v := $types}} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{}) + +// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + prev *{{$v.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func +} + +// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer. +func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev} +} + +// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the +// current and new point to modify the current point. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &{{$v.Name}}Point{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return []{{$v.Name}}Point{*r.prev} +} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point + +// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + points []{{$k.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc +} + +// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer. +func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} +} + +// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.points = append(r.points, *p) +} + +// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice. +// This is a more efficient version of calling Aggregate{{$k.Name}} on each point. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return r.fn(r.points) +} +{{end}} + +// {{$k.Name}}DistinctReducer returns the distinct points in a series. +type {{$k.Name}}DistinctReducer struct { + m map[{{$k.Type}}]{{$k.Name}}Point +} + +// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer. +func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer { + return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)} +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point { + points := make([]{{$k.Name}}Point, 0, len(r.m)) + for _, p := range r.m { + points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value}) + } + sort.Sort({{$k.name}}Points(points)) + return points +} + +// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points. +type {{$k.Name}}ElapsedReducer struct { + unitConversion int64 + prev {{$k.Name}}Point + curr {{$k.Name}}Point +} + +// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer. +func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer { + return &{{$k.Name}}ElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: {{$k.Name}}Point{Nil: true}, + curr: {{$k.Name}}Point{Nil: true}, + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window. +func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// {{$k.Name}}SampleReduces implements a reservoir sampling to calculate a random subset of points +type {{$k.Name}}SampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points {{$k.name}}Points // the reservoir +} + +// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer +func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer { + return &{{$k.Name}}SampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make({{$k.name}}Points, size), + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + r.points[r.count-1] = *p + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := rand.Intn(r.count) + if rnd < len(r.points) { + r.points[rnd] = *p + } +} + +// Emit emits the reservoir sample as many points. +func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + + +{{end}}{{end}} diff -Nru influxdb-0.10.0+dfsg1/influxql/functions.go influxdb-1.1.1+dfsg1/influxql/functions.go --- influxdb-0.10.0+dfsg1/influxql/functions.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/functions.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,759 @@ +package influxql + +import ( + "math" + "time" + + "github.com/influxdata/influxdb/influxql/neldermead" +) + +// FloatMeanReducer calculates the mean of the aggregated points. +type FloatMeanReducer struct { + sum float64 + count uint32 +} + +// NewFloatMeanReducer creates a new FloatMeanReducer. +func NewFloatMeanReducer() *FloatMeanReducer { + return &FloatMeanReducer{} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * float64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *FloatMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: r.sum / float64(r.count), + Aggregated: r.count, + }} +} + +// IntegerMeanReducer calculates the mean of the aggregated points. +type IntegerMeanReducer struct { + sum int64 + count uint32 +} + +// NewIntegerMeanReducer creates a new IntegerMeanReducer. +func NewIntegerMeanReducer() *IntegerMeanReducer { + return &IntegerMeanReducer{} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * int64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *IntegerMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: float64(r.sum) / float64(r.count), + Aggregated: r.count, + }} +} + +// FloatDerivativeReducer calculates the derivative of the aggregated points. +type FloatDerivativeReducer struct { + interval Interval + prev FloatPoint + curr FloatPoint + isNonNegative bool + ascending bool +} + +// NewFloatDerivativeReducer creates a new FloatDerivativeReducer. +func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { + return &FloatDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *FloatDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := r.curr.Value - r.prev.Value + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDerivativeReducer calculates the derivative of the aggregated points. +type IntegerDerivativeReducer struct { + interval Interval + prev IntegerPoint + curr IntegerPoint + isNonNegative bool + ascending bool +} + +// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. +func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { + return &IntegerDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *IntegerDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := float64(r.curr.Value - r.prev.Value) + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatDifferenceReducer calculates the derivative of the aggregated points. +type FloatDifferenceReducer struct { + prev FloatPoint + curr FloatPoint +} + +// NewFloatDifferenceReducer creates a new FloatDifferenceReducer. +func NewFloatDifferenceReducer() *FloatDifferenceReducer { + return &FloatDifferenceReducer{ + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *FloatDifferenceReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDifferenceReducer calculates the derivative of the aggregated points. +type IntegerDifferenceReducer struct { + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. +func NewIntegerDifferenceReducer() *IntegerDifferenceReducer { + return &IntegerDifferenceReducer{ + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + return []IntegerPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatMovingAverageReducer calculates the moving average of the aggregated points. +type FloatMovingAverageReducer struct { + pos int + sum float64 + time int64 + buf []float64 +} + +// NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer. +func NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer { + return &FloatMovingAverageReducer{ + buf: make([]float64, 0, n), + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateFloat and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *FloatMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: r.sum / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// IntegerMovingAverageReducer calculates the moving average of the aggregated points. +type IntegerMovingAverageReducer struct { + pos int + sum int64 + time int64 + buf []int64 +} + +// NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer. +func NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer { + return &IntegerMovingAverageReducer{ + buf: make([]int64, 0, n), + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateInteger and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *IntegerMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: float64(r.sum) / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// FloatCumulativeSumReducer cumulates the values from each point. +type FloatCumulativeSumReducer struct { + curr FloatPoint +} + +// NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer. +func NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer { + return &FloatCumulativeSumReducer{ + curr: FloatPoint{Nil: true}, + } +} + +func (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *FloatCumulativeSumReducer) Emit() []FloatPoint { + var pts []FloatPoint + if !r.curr.Nil { + pts = []FloatPoint{r.curr} + } + return pts +} + +// IntegerCumulativeSumReducer cumulates the values from each point. +type IntegerCumulativeSumReducer struct { + curr IntegerPoint +} + +// NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer. +func NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer { + return &IntegerCumulativeSumReducer{ + curr: IntegerPoint{Nil: true}, + } +} + +func (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint { + var pts []IntegerPoint + if !r.curr.Nil { + pts = []IntegerPoint{r.curr} + } + return pts +} + +// FloatHoltWintersReducer forecasts a series into the future. +// This is done using the Holt-Winters damped method. +// 1. Using the series the initial values are calculated using a SSE. +// 2. The series is forecasted into the future using the iterative relations. +type FloatHoltWintersReducer struct { + // Season period + m int + seasonal bool + + // Horizon + h int + + // Interval between points + interval int64 + // interval / 2 -- used to perform rounding + halfInterval int64 + + // Whether to include all data or only future values + includeFitData bool + + // NelderMead optimizer + optim *neldermead.Optimizer + // Small difference bound for the optimizer + epsilon float64 + + y []float64 + points []FloatPoint +} + +const ( + // Arbitrary weight for initializing some intial guesses. + // This should be in the range [0,1] + hwWeight = 0.5 + // Epsilon value for the minimization process + hwDefaultEpsilon = 1.0e-4 + // Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi. + // Keep in mind that this grid is N^4 so we should keep N small + // The starting lower guess + hwGuessLower = 0.3 + // The upper bound on the grid + hwGuessUpper = 1.0 + // The step between guesses + hwGuessStep = 0.4 +) + +// NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer. +func NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer { + seasonal := true + if m < 2 { + seasonal = false + } + return &FloatHoltWintersReducer{ + h: h, + m: m, + seasonal: seasonal, + includeFitData: includeFitData, + interval: int64(interval), + halfInterval: int64(interval) / 2, + optim: neldermead.New(), + epsilon: hwDefaultEpsilon, + } +} + +func (r *FloatHoltWintersReducer) aggregate(time int64, value float64) { + r.points = append(r.points, FloatPoint{ + Time: time, + Value: value, + }) +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Time, p.Value) +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(p.Time, float64(p.Value)) +} + +func (r *FloatHoltWintersReducer) roundTime(t int64) int64 { + // Overflow safe round function + remainder := t % r.interval + if remainder > r.halfInterval { + // Round up + return (t/r.interval + 1) * r.interval + } + // Round down + return (t / r.interval) * r.interval +} + +// Emit returns the points generated by the HoltWinters algorithm. +func (r *FloatHoltWintersReducer) Emit() []FloatPoint { + if l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 { + return nil + } + // First fill in r.y with values and NaNs for missing values + start, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time) + count := (stop - start) / r.interval + if count <= 0 { + return nil + } + r.y = make([]float64, 1, count) + r.y[0] = r.points[0].Value + t := r.roundTime(r.points[0].Time) + for _, p := range r.points[1:] { + rounded := r.roundTime(p.Time) + if rounded <= t { + // Drop values that occur for the same time bucket + continue + } + t += r.interval + // Add any missing values before the next point + for rounded != t { + // Add in a NaN so we can skip it later. + r.y = append(r.y, math.NaN()) + t += r.interval + } + r.y = append(r.y, p.Value) + } + + // Seasonality + m := r.m + + // Starting guesses + // NOTE: Since these values are guesses + // in the cases where we were missing data, + // we can just skip the value and call it good. + + l0 := 0.0 + if r.seasonal { + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + l0 += (1 / float64(m)) * r.y[i] + } + } + } else { + l0 += hwWeight * r.y[0] + } + + b0 := 0.0 + if r.seasonal { + for i := 0; i < m && m+i < len(r.y); i++ { + if !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) { + b0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i]) + } + } + } else { + if !math.IsNaN(r.y[1]) { + b0 = hwWeight * (r.y[1] - r.y[0]) + } + } + + var s []float64 + if r.seasonal { + s = make([]float64, m) + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + s[i] = r.y[i] / l0 + } else { + s[i] = 0 + } + } + } + + parameters := make([]float64, 6+len(s)) + parameters[4] = l0 + parameters[5] = b0 + o := len(parameters) - len(s) + for i := range s { + parameters[i+o] = s[i] + } + + // Determine best fit for the various parameters + minSSE := math.Inf(1) + var bestParams []float64 + for alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep { + for beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep { + for gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep { + for phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep { + parameters[0] = alpha + parameters[1] = beta + parameters[2] = gamma + parameters[3] = phi + sse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1) + if sse < minSSE || bestParams == nil { + minSSE = sse + bestParams = params + } + } + } + } + } + + // Forecast + forecasted := r.forecast(r.h, bestParams) + var points []FloatPoint + if r.includeFitData { + start := r.points[0].Time + points = make([]FloatPoint, 0, len(forecasted)) + for i, v := range forecasted { + if !math.IsNaN(v) { + t := start + r.interval*(int64(i)) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } else { + stop := r.points[len(r.points)-1].Time + points = make([]FloatPoint, 0, r.h) + for i, v := range forecasted[len(r.y):] { + if !math.IsNaN(v) { + t := stop + r.interval*(int64(i)+1) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } + // Clear data set + r.y = r.y[0:0] + return points +} + +// Using the recursive relations compute the next values +func (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) { + lT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp) + bT = beta*(lT-lTp) + (1-beta)*phi*bTp + sT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm + yTh = (lT + phiH*bT) * sTmh + return +} + +// Forecast the data h points into the future. +func (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 { + // Constrain parameters + r.constrain(params) + + yT := r.y[0] + + phi := params[3] + phiH := phi + + lT := params[4] + bT := params[5] + sT := 0.0 + + // seasonals is a ring buffer of past sT values + var seasonals []float64 + var m, so int + if r.seasonal { + seasonals = params[6:] + m = len(params[6:]) + if m == 1 { + seasonals[0] = 1 + } + // Season index offset + so = m - 1 + } + + forecasted := make([]float64, len(r.y)+h) + forecasted[0] = yT + l := len(r.y) + var hm int + stm, stmh := 1.0, 1.0 + for t := 1; t < l+h; t++ { + if r.seasonal { + hm = t % m + stm = seasonals[(t-m+so)%m] + stmh = seasonals[(t-m+hm+so)%m] + } + yT, lT, bT, sT = r.next( + params[0], // alpha + params[1], // beta + params[2], // gamma + phi, + phiH, + yT, + lT, + bT, + stm, + stmh, + ) + phiH += math.Pow(phi, float64(t)) + + if r.seasonal { + seasonals[(t+so)%m] = sT + so++ + } + + forecasted[t] = yT + } + return forecasted +} + +// Compute sum squared error for the given parameters. +func (r *FloatHoltWintersReducer) sse(params []float64) float64 { + sse := 0.0 + forecasted := r.forecast(0, params) + for i := range forecasted { + // Skip missing values since we cannot use them to compute an error. + if !math.IsNaN(r.y[i]) { + // Compute error + if math.IsNaN(forecasted[i]) { + // Penalize forecasted NaNs + return math.Inf(1) + } + diff := forecasted[i] - r.y[i] + sse += diff * diff + } + } + return sse +} + +// Constrain alpha, beta, gamma, phi in the range [0, 1] +func (r *FloatHoltWintersReducer) constrain(x []float64) { + // alpha + if x[0] > 1 { + x[0] = 1 + } + if x[0] < 0 { + x[0] = 0 + } + // beta + if x[1] > 1 { + x[1] = 1 + } + if x[1] < 0 { + x[1] = 0 + } + // gamma + if x[2] > 1 { + x[2] = 1 + } + if x[2] < 0 { + x[2] = 0 + } + // phi + if x[3] > 1 { + x[3] = 1 + } + if x[3] < 0 { + x[3] = 0 + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/functions_test.go influxdb-1.1.1+dfsg1/influxql/functions_test.go --- influxdb-0.10.0+dfsg1/influxql/functions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/functions_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,493 @@ +package influxql_test + +import ( + "math" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +func almostEqual(got, exp float64) bool { + return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got) +} + +func TestHoltWinters_AusTourists(t *testing.T) { + hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []influxql.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 2, Value: 19.148496}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 8, Value: 35.123753}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 18, Value: 19.775244}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 29, Value: 46.213153}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 33, Value: 48.901525}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 39, Value: 40.978263}, + {Time: 40, Value: 43.772491}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 49, Value: 51.85064132137853}, + {Time: 50, Value: 43.26055282315273}, + {Time: 51, Value: 41.827258044814464}, + {Time: 52, Value: 54.3990354591749}, + {Time: 53, Value: 54.62334472770803}, + {Time: 54, Value: 45.57155693625209}, + {Time: 55, Value: 44.06051240252263}, + {Time: 56, Value: 57.30029870759433}, + {Time: 57, Value: 57.53591513519172}, + {Time: 58, Value: 47.999008139396096}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_AusTourists_Missing(t *testing.T) { + hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []influxql.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 49, Value: 54.84533610387743}, + {Time: 50, Value: 41.19329421863249}, + {Time: 51, Value: 45.71673175112451}, + {Time: 52, Value: 56.05759298805955}, + {Time: 53, Value: 59.32337460282217}, + {Time: 54, Value: 44.75280096850461}, + {Time: 55, Value: 49.98865098113751}, + {Time: 56, Value: 61.86084934967605}, + {Time: 57, Value: 65.95805633454883}, + {Time: 58, Value: 50.1502170480547}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation(t *testing.T) { + series := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 9, Value: 39.80}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 14, Value: 105.70}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 18, Value: 179.30}, + {Time: 19, Value: 203.20}, + } + hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.957405463559748}, + {Time: 3, Value: 7.012210102535647}, + {Time: 4, Value: 10.099589257439924}, + {Time: 5, Value: 14.229926188104242}, + {Time: 6, Value: 19.418878968703797}, + {Time: 7, Value: 25.68749172281409}, + {Time: 8, Value: 33.062351305731305}, + {Time: 9, Value: 41.575791076125206}, + {Time: 10, Value: 51.26614395589263}, + {Time: 11, Value: 62.178047564264595}, + {Time: 12, Value: 74.36280483872488}, + {Time: 13, Value: 87.87880423073163}, + {Time: 14, Value: 102.79200429905801}, + {Time: 15, Value: 119.17648832929542}, + {Time: 16, Value: 137.11509549747296}, + {Time: 17, Value: 156.70013608313175}, + {Time: 18, Value: 178.03419933863566}, + {Time: 19, Value: 201.23106385518594}, + {Time: 20, Value: 226.4167216525905}, + {Time: 21, Value: 253.73052878285205}, + {Time: 22, Value: 283.32649700397553}, + {Time: 23, Value: 315.37474308085984}, + {Time: 24, Value: 350.06311454009256}, + {Time: 25, Value: 387.59901328556873}, + {Time: 26, Value: 428.21144141893404}, + {Time: 27, Value: 472.1532969569147}, + {Time: 28, Value: 519.7039509590035}, + {Time: 29, Value: 571.1721419458248}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation_Missing(t *testing.T) { + series := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 19, Value: 203.20}, + } + hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.8931364428135105}, + {Time: 3, Value: 6.962653629047061}, + {Time: 4, Value: 10.056207765903274}, + {Time: 5, Value: 14.18435088129532}, + {Time: 6, Value: 19.362939306110846}, + {Time: 7, Value: 25.613247940326584}, + {Time: 8, Value: 32.96213087008264}, + {Time: 9, Value: 41.442230043017204}, + {Time: 10, Value: 51.09223428526052}, + {Time: 11, Value: 61.95719155158485}, + {Time: 12, Value: 74.08887794968567}, + {Time: 13, Value: 87.54622778052787}, + {Time: 14, Value: 102.39582960014131}, + {Time: 15, Value: 118.7124941463221}, + {Time: 16, Value: 136.57990089987464}, + {Time: 17, Value: 156.09133107941278}, + {Time: 18, Value: 177.35049601833734}, + {Time: 19, Value: 200.472471161683}, + {Time: 20, Value: 225.58474737097785}, + {Time: 21, Value: 252.82841286206823}, + {Time: 22, Value: 282.35948095261017}, + {Time: 23, Value: 314.3503808953992}, + {Time: 24, Value: 348.99163145856954}, + {Time: 25, Value: 386.49371962730555}, + {Time: 26, Value: 427.08920989407727}, + {Time: 27, Value: 471.0351131332573}, + {Time: 28, Value: 518.615548088049}, + {Time: 29, Value: 570.1447331101863}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} +func TestHoltWinters_RoundTime(t *testing.T) { + maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano() + data := []influxql.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10}, + {Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2}, + {Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11}, + } + hw := influxql.NewFloatHoltWintersReducer(2, 2, true, time.Second) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second), Value: 10.006729104838234}, + {Time: maxTime - int64(3*time.Second), Value: 1.998341814469269}, + {Time: maxTime - int64(2*time.Second), Value: 10.997858830631172}, + {Time: maxTime - int64(1*time.Second), Value: 4.085860238030013}, + {Time: maxTime - int64(0*time.Second), Value: 11.35713604403339}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_MaxTime(t *testing.T) { + data := []influxql.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2}, + } + hw := influxql.NewFloatHoltWintersReducer(1, 0, true, 1) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2.001516944066403}, + {Time: influxql.MaxTime + 1, Value: 2.5365248972488343}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +// TestSample_AllSamplesSeen attempts to verify that it is possible +// to get every subsample in a reasonable number of iterations. +// +// The idea here is that 6 iterations should be enough to hit every possible +// sequence atleast once. +func TestSample_AllSamplesSeen(t *testing.T) { + + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + // List of all the possible subsamples + samples := [][]influxql.FloatPoint{ + { + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + }, + { + {Time: 1, Value: 1}, + {Time: 3, Value: 3}, + }, + { + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }, + } + + // 6 iterations should be more than sufficient to garentee that + // we hit every possible subsample. + for i := 0; i < 6; i++ { + s := influxql.NewFloatSampleReducer(2) + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + // if samples is empty we've seen every sample, so we're done + if len(samples) == 0 { + return + } + + for i, sample := range samples { + // if we find a sample that it matches, remove it from + // this list of possible samples + if deep.Equal(sample, points) { + samples = append(samples[:i], samples[i+1:]...) + } + } + + } + + // If we missed a sample, report the error + if exp, got := 0, len(samples); exp != got { + t.Fatalf("expected to get every sample: got %d, exp %d", got, exp) + } + +} + +func TestSample_SampleSizeLessThanNumPoints(t *testing.T) { + s := influxql.NewFloatSampleReducer(2) + + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := 2, len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } +} + +func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) { + s := influxql.NewFloatSampleReducer(4) + + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := len(ps), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + if !deep.Equal(ps, points) { + t.Fatalf("unexpected points: %s", spew.Sdump(points)) + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/influxql.go influxdb-1.1.1+dfsg1/influxql/influxql.go --- influxdb-0.10.0+dfsg1/influxql/influxql.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/influxql.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,7 @@ +package influxql // import "github.com/influxdata/influxdb/influxql" + +//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl +//go:generate tmpl -data=@tmpldata point.gen.go.tmpl +//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl + +//go:generate protoc --gogo_out=. internal/internal.proto diff -Nru influxdb-0.10.0+dfsg1/influxql/INFLUXQL.md influxdb-1.1.1+dfsg1/influxql/INFLUXQL.md --- influxdb-0.10.0+dfsg1/influxql/INFLUXQL.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/INFLUXQL.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,789 +0,0 @@ -# The Influx Query Language Specification - -## Introduction - -This is a reference for the Influx Query Language ("InfluxQL"). - -InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data. - -## Notation - -The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go. - -``` -Production = production_name "=" [ Expression ] "." . -Expression = Alternative { "|" Alternative } . -Alternative = Term { Term } . -Term = production_name | token [ "…" token ] | Group | Option | Repetition . -Group = "(" Expression ")" . -Option = "[" Expression "]" . -Repetition = "{" Expression "}" . -``` - -Notation operators in order of increasing precedence: - -``` -| alternation -() grouping -[] option (0 or 1 times) -{} repetition (0 to n times) -``` - -## Query representation - -### Characters - -InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). - -``` -newline = /* the Unicode code point U+000A */ . -unicode_char = /* an arbitrary Unicode code point except newline */ . -``` - -## Letters and digits - -Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter. - -Only decimal digits are supported. - -``` -letter = ascii_letter | "_" . -ascii_letter = "A" … "Z" | "a" … "z" . -digit = "0" … "9" . -``` - -## Identifiers - -Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field keys. - -The rules: - -- double quoted identifiers can contain any unicode character other than a new line -- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) -- unquoted identifiers must start with an upper or lowercase ASCII character or "_" -- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" - -``` -identifier = unquoted_identifier | quoted_identifier . -unquoted_identifier = ( letter ) { letter | digit } . -quoted_identifier = `"` unicode_char { unicode_char } `"` . -``` - -#### Examples: - -``` -cpu -_cpu_stats -"1h" -"anything really" -"1_Crazy-1337.identifier>NAME👍" -``` - -## Keywords - -``` -ALL ALTER ANY AS ASC BEGIN -BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT -DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP -DURATION END EVERY EXISTS EXPLAIN FIELD -FOR FORCE FROM GRANT GRANTS GROUP -GROUPS IF IN INF INNER INSERT -INTO KEY KEYS LIMIT SHOW MEASUREMENT -MEASUREMENTS NOT OFFSET ON ORDER PASSWORD -POLICY POLICIES PRIVILEGES QUERIES QUERY READ -REPLICATION RESAMPLE RETENTION REVOKE SELECT SERIES -SERVER SERVERS SET SHARD SHARDS SLIMIT -SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG TO -USER USERS VALUES WHERE WITH WRITE -``` - -## Literals - -### Integers - -InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported. - -``` -int_lit = ( "1" … "9" ) { digit } . -``` - -### Floats - -InfluxQL supports floating-point literals. Exponents are not currently supported. - -``` -float_lit = int_lit "." int_lit . -``` - -### Strings - -String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`). - -``` -string_lit = `'` { unicode_char } `'` . -``` - -### Durations - -Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. - -### Duration units -| Units | Meaning | -|--------|-----------------------------------------| -| u or µ | microseconds (1 millionth of a second) | -| ms | milliseconds (1 thousandth of a second) | -| s | second | -| m | minute | -| h | hour | -| d | day | -| w | week | - -``` -duration_lit = int_lit duration_unit . -duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" . -``` - -### Dates & Times - -The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: - -InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM - -``` -time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . -``` - -### Booleans - -``` -bool_lit = TRUE | FALSE . -``` - -### Regular Expressions - -``` -regex_lit = "/" { unicode_char } "/" . -``` - -## Queries - -A query is composed of one or more statements separated by a semicolon. - -``` -query = statement { ";" statement } . - -statement = alter_retention_policy_stmt | - create_continuous_query_stmt | - create_database_stmt | - create_retention_policy_stmt | - create_subscription_stmt | - create_user_stmt | - delete_stmt | - drop_continuous_query_stmt | - drop_database_stmt | - drop_measurement_stmt | - drop_retention_policy_stmt | - drop_series_stmt | - drop_subscription_stmt | - drop_user_stmt | - grant_stmt | - show_continuous_queries_stmt | - show_databases_stmt | - show_field_keys_stmt | - show_grants_stmt | - show_measurements_stmt | - show_retention_policies | - show_series_stmt | - show_shard_groups_stmt | - show_shards_stmt | - show_subscriptions_stmt| - show_tag_keys_stmt | - show_tag_values_stmt | - show_users_stmt | - revoke_stmt | - select_stmt . -``` - -## Statements - -### ALTER RETENTION POLICY - -``` -alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause - retention_policy_option - [ retention_policy_option ] - [ retention_policy_option ] . -``` - -#### Examples: - -```sql --- Set default retention policy for mydb to 1h.cpu. -ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT; - --- Change duration and replication factor. -ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4 -``` - -### CREATE CONTINUOUS QUERY - -``` -create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause - [ "RESAMPLE" resample_opts ] - "BEGIN" select_stmt "END" . - -query_name = identifier . - -resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . -every_stmt = "EVERY" duration_lit -for_stmt = "FOR" duration_lit -``` - -#### Examples: - -```sql --- selects from default retention policy and writes into 6_months retention policy -CREATE CONTINUOUS QUERY "10m_event_count" -ON db_name -BEGIN - SELECT count(value) - INTO "6_months".events - FROM events - GROUP BY time(10m) -END; - --- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy -CREATE CONTINUOUS QUERY "1h_event_count" -ON db_name -BEGIN - SELECT sum(count) as count - INTO "2_years".events - FROM "6_months".events - GROUP BY time(1h) -END; - --- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time --- when resample is used, at least one of "EVERY" or "FOR" must be used -CREATE CONTINUOUS QUERY "cpu_mean" -ON db_name -RESAMPLE EVERY 10s FOR 2m -BEGIN - SELECT mean(value) - INTO "cpu_mean" - FROM "cpu" - GROUP BY time(1m) -END; -``` - -### CREATE DATABASE - -``` -create_database_stmt = "CREATE DATABASE" db_name . -``` - -#### Example: - -```sql -CREATE DATABASE foo -``` - -### CREATE RETENTION POLICY - -``` -create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause - retention_policy_duration - retention_policy_replication - [ "DEFAULT" ] . -``` - -#### Examples - -```sql --- Create a retention policy. -CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2; - --- Create a retention policy and set it as the default. -CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT; -``` - -### CREATE SUBSCRIPTION - -``` -create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . -``` - -#### Examples: - -```sql --- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that send data to 'example.com:9090' via UDP. -CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ALL 'udp://example.com:9090' ; - --- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. -CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090'; -``` - -### CREATE USER - -``` -create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password - [ "WITH ALL PRIVILEGES" ] . -``` - -#### Examples: - -```sql --- Create a normal database user. -CREATE USER jdoe WITH PASSWORD '1337password'; - --- Create a cluster admin. --- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. -CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; -``` - -### DELETE - -``` -delete_stmt = "DELETE FROM" measurement where_clause . -``` - -#### Example: - -```sql --- delete data points from the cpu measurement where the region tag --- equals 'uswest' -DELETE FROM cpu WHERE region = 'uswest'; -``` - -### DROP CONTINUOUS QUERY - -``` -drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . -``` - -#### Example: - -```sql -DROP CONTINUOUS QUERY myquery ON mydb; -``` - -### DROP DATABASE - -``` -drop_database_stmt = "DROP DATABASE" db_name . -``` - -#### Example: - -```sql -DROP DATABASE mydb; -``` - -### DROP MEASUREMENT - -``` -drop_measurement_stmt = "DROP MEASUREMENT" measurement_name . -``` - -#### Examples: - -```sql --- drop the cpu measurement -DROP MEASUREMENT cpu; -``` - -### DROP RETENTION POLICY - -``` -drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . -``` - -#### Example: - -```sql --- drop the retention policy named 1h.cpu from mydb -DROP RETENTION POLICY "1h.cpu" ON mydb; -``` - -### DROP SERIES - -``` -drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . -``` - -#### Example: - -```sql - -``` - -### DROP SUBSCRIPTION - -``` -drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . -``` - -#### Example: - -```sql -DROP SUBSCRIPTION sub0 ON "mydb"."default"; - -``` - -### DROP USER - -``` -drop_user_stmt = "DROP USER" user_name . -``` - -#### Example: - -```sql -DROP USER jdoe; - -``` - -### GRANT - -NOTE: Users can be granted privileges on databases that do not exist. - -``` -grant_stmt = "GRANT" privilege [ on_clause ] to_clause . -``` - -#### Examples: - -```sql --- grant cluster admin privileges -GRANT ALL TO jdoe; - --- grant read access to a database -GRANT READ ON mydb TO jdoe; -``` - -### SHOW CONTINUOUS QUERIES - -``` -show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . -``` - -#### Example: - -```sql --- show all continuous queries -SHOW CONTINUOUS QUERIES; -``` - -### SHOW DATABASES - -``` -show_databases_stmt = "SHOW DATABASES" . -``` - -#### Example: - -```sql --- show all databases -SHOW DATABASES; -``` - -### SHOW FIELD KEYS - -``` -show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . -``` - -#### Examples: - -```sql --- show field keys from all measurements -SHOW FIELD KEYS; - --- show field keys from specified measurement -SHOW FIELD KEYS FROM cpu; -``` - -### SHOW GRANTS - -``` -show_grants_stmt = "SHOW GRANTS FOR" user_name . -``` - -#### Example: - -```sql --- show grants for jdoe -SHOW GRANTS FOR jdoe; -``` - -### SHOW MEASUREMENTS - -``` -show_measurements_stmt = "SHOW MEASUREMENTS" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . -``` - -```sql --- show all measurements -SHOW MEASUREMENTS; - --- show measurements where region tag = 'uswest' AND host tag = 'serverA' -SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA'; -``` - -### SHOW RETENTION POLICIES - -``` -show_retention_policies = "SHOW RETENTION POLICIES" on_clause . -``` - -#### Example: - -```sql --- show all retention policies on a database -SHOW RETENTION POLICIES ON mydb; -``` - -### SHOW SERIES - -``` -show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . -``` - -#### Example: - -```sql - -``` - -### SHOW SHARD GROUPS - -``` -show_shard_groups_stmt = "SHOW SHARD GROUPS" . -``` - -#### Example: - -```sql -SHOW SHARD GROUPS; -``` - -### SHOW SHARDS - -``` -show_shards_stmt = "SHOW SHARDS" . -``` - -#### Example: - -```sql -SHOW SHARDS; -``` - -### SHOW SUBSCRIPTIONS - -``` -show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . -``` - -#### Example: - -```sql -SHOW SUBSCRIPTIONS; -``` - -### SHOW TAG KEYS - -``` -show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] - [ limit_clause ] [ offset_clause ] . -``` - -#### Examples: - -```sql --- show all tag keys -SHOW TAG KEYS; - --- show all tag keys from the cpu measurement -SHOW TAG KEYS FROM cpu; - --- show all tag keys from the cpu measurement where the region key = 'uswest' -SHOW TAG KEYS FROM cpu WHERE region = 'uswest'; - --- show all tag keys where the host key = 'serverA' -SHOW TAG KEYS WHERE host = 'serverA'; -``` - -### SHOW TAG VALUES - -``` -show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] - [ group_by_clause ] [ limit_clause ] [ offset_clause ] . -``` - -#### Examples: - -```sql --- show all tag values across all measurements for the region tag -SHOW TAG VALUES WITH TAG = 'region'; - --- show tag values from the cpu measurement for the region tag -SHOW TAG VALUES FROM cpu WITH KEY = 'region'; - --- show tag values from the cpu measurement for region & host tag keys where service = 'redis' -SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE service = 'redis'; -``` - -### SHOW USERS - -``` -show_users_stmt = "SHOW USERS" . -``` - -#### Example: - -```sql --- show all users -SHOW USERS; -``` - -### REVOKE - -``` -revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . -``` - -#### Examples: - -```sql --- revoke cluster admin from jdoe -REVOKE ALL PRIVILEGES FROM jdoe; - --- revoke read privileges from jdoe on mydb -REVOKE READ ON mydb FROM jdoe; -``` - -### SELECT - -``` -select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] - [ group_by_clause ] [ order_by_clause ] [ limit_clause ] - [ offset_clause ] [ slimit_clause ] [ soffset_clause ] . -``` - -#### Examples: - -```sql --- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals -SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0); - --- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy -SELECT mean(value) INTO cpu_1h.:MEASUREMENT FROM /cpu.*/ -``` - -## Clauses - -``` -from_clause = "FROM" measurements . - -group_by_clause = "GROUP BY" dimensions fill(fill_option). - -into_clause = "INTO" ( measurement | back_ref ). - -limit_clause = "LIMIT" int_lit . - -offset_clause = "OFFSET" int_lit . - -slimit_clause = "SLIMIT" int_lit . - -soffset_clause = "SOFFSET" int_lit . - -on_clause = "ON" db_name . - -order_by_clause = "ORDER BY" sort_fields . - -to_clause = "TO" user_name . - -where_clause = "WHERE" expr . - -with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . - -with_tag_clause = "WITH KEY" ( "=" tag_key | "IN (" tag_keys ")" ) . -``` - -## Expressions - -``` -binary_op = "+" | "-" | "*" | "/" | "AND" | "OR" | "=" | "!=" | "<" | - "<=" | ">" | ">=" . - -expr = unary_expr { binary_op unary_expr } . - -unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | - float_lit | bool_lit | duration_lit | regex_lit . -``` - -## Other - -``` -alias = "AS" identifier . - -back_ref = ( policy_name ".:MEASUREMENT" ) | - ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . - -db_name = identifier . - -dimension = expr . - -dimensions = dimension { "," dimension } . - -field_key = identifier . - -field = expr [ alias ] . - -fields = field { "," field } . - -fill_option = "null" | "none" | "previous" | int_lit | float_lit . - -host = string_lit . - -measurement = measurement_name | - ( policy_name "." measurement_name ) | - ( db_name "." [ policy_name ] "." measurement_name ) . - -measurements = measurement { "," measurement } . - -measurement_name = identifier . - -password = string_lit . - -policy_name = identifier . - -privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . - -query_name = identifier . - -retention_policy = identifier . - -retention_policy_option = retention_policy_duration | - retention_policy_replication | - "DEFAULT" . - -retention_policy_duration = "DURATION" duration_lit . -retention_policy_replication = "REPLICATION" int_lit - -series_id = int_lit . - -sort_field = field_key [ ASC | DESC ] . - -sort_fields = sort_field { "," sort_field } . - -subscription_name = identifier . - -tag_key = identifier . - -tag_keys = tag_key { "," tag_key } . - -user_name = identifier . - -var_ref = measurement . -``` diff -Nru influxdb-0.10.0+dfsg1/influxql/internal/internal.pb.go influxdb-1.1.1+dfsg1/influxql/internal/internal.pb.go --- influxdb-0.10.0+dfsg1/influxql/internal/internal.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/internal/internal.pb.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,528 @@ +// Code generated by protoc-gen-gogo. +// source: internal/internal.proto +// DO NOT EDIT! + +/* +Package influxql is a generated protocol buffer package. + +It is generated from these files: + internal/internal.proto + +It has these top-level messages: + Point + Aux + IteratorOptions + Measurements + Measurement + Interval + IteratorStats + VarRef +*/ +package influxql + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Point struct { + Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` + Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"` + Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"` + Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"` + Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"` + Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"` + FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } + +func (m *Point) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Point) GetTags() string { + if m != nil && m.Tags != nil { + return *m.Tags + } + return "" +} + +func (m *Point) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *Point) GetNil() bool { + if m != nil && m.Nil != nil { + return *m.Nil + } + return false +} + +func (m *Point) GetAux() []*Aux { + if m != nil { + return m.Aux + } + return nil +} + +func (m *Point) GetAggregated() uint32 { + if m != nil && m.Aggregated != nil { + return *m.Aggregated + } + return 0 +} + +func (m *Point) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Point) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Point) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Point) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Point) GetStats() *IteratorStats { + if m != nil { + return m.Stats + } + return nil +} + +type Aux struct { + DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` + FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Aux) Reset() { *m = Aux{} } +func (m *Aux) String() string { return proto.CompactTextString(m) } +func (*Aux) ProtoMessage() {} +func (*Aux) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } + +func (m *Aux) GetDataType() int32 { + if m != nil && m.DataType != nil { + return *m.DataType + } + return 0 +} + +func (m *Aux) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Aux) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Aux) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Aux) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +type IteratorOptions struct { + Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"` + Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"` + Fields []*VarRef `protobuf:"bytes,17,rep,name=Fields" json:"Fields,omitempty"` + Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` + Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` + Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` + Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` + FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` + Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` + StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"` + EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"` + Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"` + Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"` + Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"` + SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` + SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` + Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorOptions) Reset() { *m = IteratorOptions{} } +func (m *IteratorOptions) String() string { return proto.CompactTextString(m) } +func (*IteratorOptions) ProtoMessage() {} +func (*IteratorOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } + +func (m *IteratorOptions) GetExpr() string { + if m != nil && m.Expr != nil { + return *m.Expr + } + return "" +} + +func (m *IteratorOptions) GetAux() []string { + if m != nil { + return m.Aux + } + return nil +} + +func (m *IteratorOptions) GetFields() []*VarRef { + if m != nil { + return m.Fields + } + return nil +} + +func (m *IteratorOptions) GetSources() []*Measurement { + if m != nil { + return m.Sources + } + return nil +} + +func (m *IteratorOptions) GetInterval() *Interval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *IteratorOptions) GetDimensions() []string { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *IteratorOptions) GetFill() int32 { + if m != nil && m.Fill != nil { + return *m.Fill + } + return 0 +} + +func (m *IteratorOptions) GetFillValue() float64 { + if m != nil && m.FillValue != nil { + return *m.FillValue + } + return 0 +} + +func (m *IteratorOptions) GetCondition() string { + if m != nil && m.Condition != nil { + return *m.Condition + } + return "" +} + +func (m *IteratorOptions) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *IteratorOptions) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *IteratorOptions) GetAscending() bool { + if m != nil && m.Ascending != nil { + return *m.Ascending + } + return false +} + +func (m *IteratorOptions) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *IteratorOptions) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *IteratorOptions) GetSLimit() int64 { + if m != nil && m.SLimit != nil { + return *m.SLimit + } + return 0 +} + +func (m *IteratorOptions) GetSOffset() int64 { + if m != nil && m.SOffset != nil { + return *m.SOffset + } + return 0 +} + +func (m *IteratorOptions) GetDedupe() bool { + if m != nil && m.Dedupe != nil { + return *m.Dedupe + } + return false +} + +type Measurements struct { + Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurements) Reset() { *m = Measurements{} } +func (m *Measurements) String() string { return proto.CompactTextString(m) } +func (*Measurements) ProtoMessage() {} +func (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} } + +func (m *Measurements) GetItems() []*Measurement { + if m != nil { + return m.Items + } + return nil +} + +type Measurement struct { + Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` + RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` + Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` + Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` + IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurement) Reset() { *m = Measurement{} } +func (m *Measurement) String() string { return proto.CompactTextString(m) } +func (*Measurement) ProtoMessage() {} +func (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} } + +func (m *Measurement) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +func (m *Measurement) GetRetentionPolicy() string { + if m != nil && m.RetentionPolicy != nil { + return *m.RetentionPolicy + } + return "" +} + +func (m *Measurement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Measurement) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *Measurement) GetIsTarget() bool { + if m != nil && m.IsTarget != nil { + return *m.IsTarget + } + return false +} + +type Interval struct { + Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"` + Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Interval) Reset() { *m = Interval{} } +func (m *Interval) String() string { return proto.CompactTextString(m) } +func (*Interval) ProtoMessage() {} +func (*Interval) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } + +func (m *Interval) GetDuration() int64 { + if m != nil && m.Duration != nil { + return *m.Duration + } + return 0 +} + +func (m *Interval) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +type IteratorStats struct { + SeriesN *int64 `protobuf:"varint,1,opt,name=SeriesN" json:"SeriesN,omitempty"` + PointN *int64 `protobuf:"varint,2,opt,name=PointN" json:"PointN,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorStats) Reset() { *m = IteratorStats{} } +func (m *IteratorStats) String() string { return proto.CompactTextString(m) } +func (*IteratorStats) ProtoMessage() {} +func (*IteratorStats) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } + +func (m *IteratorStats) GetSeriesN() int64 { + if m != nil && m.SeriesN != nil { + return *m.SeriesN + } + return 0 +} + +func (m *IteratorStats) GetPointN() int64 { + if m != nil && m.PointN != nil { + return *m.PointN + } + return 0 +} + +type VarRef struct { + Val *string `protobuf:"bytes,1,req,name=Val" json:"Val,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *VarRef) Reset() { *m = VarRef{} } +func (m *VarRef) String() string { return proto.CompactTextString(m) } +func (*VarRef) ProtoMessage() {} +func (*VarRef) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} } + +func (m *VarRef) GetVal() string { + if m != nil && m.Val != nil { + return *m.Val + } + return "" +} + +func (m *VarRef) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func init() { + proto.RegisterType((*Point)(nil), "influxql.Point") + proto.RegisterType((*Aux)(nil), "influxql.Aux") + proto.RegisterType((*IteratorOptions)(nil), "influxql.IteratorOptions") + proto.RegisterType((*Measurements)(nil), "influxql.Measurements") + proto.RegisterType((*Measurement)(nil), "influxql.Measurement") + proto.RegisterType((*Interval)(nil), "influxql.Interval") + proto.RegisterType((*IteratorStats)(nil), "influxql.IteratorStats") + proto.RegisterType((*VarRef)(nil), "influxql.VarRef") +} + +func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } + +var fileDescriptorInternal = []byte{ + // 685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x54, 0xd1, 0x6a, 0xdc, 0x3a, + 0x10, 0xc5, 0xf6, 0x7a, 0x63, 0x6b, 0xb3, 0x37, 0xb9, 0x22, 0xf7, 0x46, 0x94, 0xd2, 0x1a, 0x3f, + 0x19, 0x4a, 0x37, 0x90, 0xd7, 0x42, 0x61, 0xdb, 0x24, 0xb0, 0xd0, 0x6e, 0x82, 0x1c, 0xf2, 0xae, + 0x66, 0x67, 0x8d, 0xc0, 0x2b, 0x6f, 0x65, 0xb9, 0x6c, 0xde, 0xfa, 0x1b, 0xfd, 0x86, 0x7e, 0x4c, + 0x7f, 0xa9, 0x68, 0x64, 0xaf, 0x9d, 0x14, 0x9a, 0x27, 0xcf, 0x39, 0x33, 0x92, 0x7c, 0x66, 0x8e, + 0x44, 0x4e, 0xa5, 0x32, 0xa0, 0x95, 0x28, 0xcf, 0xba, 0x60, 0xb6, 0xd5, 0x95, 0xa9, 0x68, 0x24, + 0xd5, 0xba, 0x6c, 0x76, 0x5f, 0xcb, 0xf4, 0x97, 0x4f, 0xc2, 0x9b, 0x4a, 0x2a, 0x43, 0x29, 0x19, + 0x2d, 0xc5, 0x06, 0x98, 0x97, 0xf8, 0x59, 0xcc, 0x31, 0xb6, 0xdc, 0xad, 0x28, 0x6a, 0xe6, 0x3b, + 0xce, 0xc6, 0xc8, 0xc9, 0x0d, 0xb0, 0x20, 0xf1, 0xb3, 0x80, 0x63, 0x4c, 0x8f, 0x49, 0xb0, 0x94, + 0x25, 0x1b, 0x25, 0x7e, 0x16, 0x71, 0x1b, 0xd2, 0xd7, 0x24, 0x98, 0x37, 0x3b, 0x16, 0x26, 0x41, + 0x36, 0x39, 0x9f, 0xce, 0xba, 0xf3, 0x66, 0xf3, 0x66, 0xc7, 0x6d, 0x86, 0xbe, 0x22, 0x64, 0x5e, + 0x14, 0x1a, 0x0a, 0x61, 0x60, 0xc5, 0xc6, 0x89, 0x97, 0x4d, 0xf9, 0x80, 0xb1, 0xf9, 0xab, 0xb2, + 0x12, 0xe6, 0x4e, 0x94, 0x0d, 0xb0, 0x83, 0xc4, 0xcb, 0x3c, 0x3e, 0x60, 0x68, 0x4a, 0x0e, 0x17, + 0xca, 0x40, 0x01, 0xda, 0x55, 0x44, 0x89, 0x97, 0x05, 0xfc, 0x11, 0x47, 0x13, 0x32, 0xc9, 0x8d, + 0x96, 0xaa, 0x70, 0x25, 0x71, 0xe2, 0x65, 0x31, 0x1f, 0x52, 0x76, 0x97, 0x0f, 0x55, 0x55, 0x82, + 0x50, 0xae, 0x84, 0x24, 0x5e, 0x16, 0xf1, 0x47, 0x1c, 0x7d, 0x4b, 0xc2, 0xdc, 0x08, 0x53, 0xb3, + 0x49, 0xe2, 0x65, 0x93, 0xf3, 0xd3, 0x5e, 0xcc, 0xc2, 0x80, 0x16, 0xa6, 0xd2, 0x98, 0xe6, 0xae, + 0x2a, 0xfd, 0xe9, 0xa1, 0x74, 0xfa, 0x82, 0x44, 0x17, 0xc2, 0x88, 0xdb, 0x87, 0xad, 0xeb, 0x69, + 0xc8, 0xf7, 0xf8, 0x89, 0x38, 0xff, 0x59, 0x71, 0xc1, 0xf3, 0xe2, 0x46, 0xcf, 0x8b, 0x0b, 0xff, + 0x14, 0x97, 0x7e, 0x1f, 0x91, 0xa3, 0x4e, 0xc6, 0xf5, 0xd6, 0xc8, 0x4a, 0xe1, 0x84, 0x2f, 0x77, + 0x5b, 0xcd, 0x3c, 0xdc, 0x12, 0x63, 0x3b, 0x61, 0x3b, 0x4f, 0x3f, 0x09, 0xb2, 0xd8, 0x0d, 0x30, + 0x23, 0xe3, 0x2b, 0x09, 0xe5, 0xaa, 0x66, 0xff, 0xe2, 0x90, 0x8f, 0xfb, 0xbe, 0xdc, 0x09, 0xcd, + 0x61, 0xcd, 0xdb, 0x3c, 0x3d, 0x23, 0x07, 0x79, 0xd5, 0xe8, 0x7b, 0xa8, 0x59, 0x80, 0xa5, 0xff, + 0xf5, 0xa5, 0x9f, 0x41, 0xd4, 0x8d, 0x86, 0x0d, 0x28, 0xc3, 0xbb, 0x2a, 0x3a, 0x23, 0x91, 0x95, + 0xaa, 0xbf, 0x89, 0x12, 0x75, 0x4d, 0xce, 0xe9, 0xa0, 0xe9, 0x6d, 0x86, 0xef, 0x6b, 0x6c, 0x3b, + 0x2f, 0xe4, 0x06, 0x54, 0x6d, 0x7f, 0x1f, 0x3d, 0x17, 0xf3, 0x01, 0x63, 0x05, 0x5d, 0xc9, 0xb2, + 0x44, 0x97, 0x85, 0x1c, 0x63, 0xfa, 0x92, 0xc4, 0xf6, 0x3b, 0xb4, 0x57, 0x4f, 0xd8, 0xec, 0xc7, + 0x4a, 0xad, 0xa4, 0x6d, 0x08, 0x5a, 0x2b, 0xe6, 0x3d, 0x61, 0xb3, 0xb9, 0x11, 0xda, 0xe0, 0x3d, + 0x88, 0x71, 0x36, 0x3d, 0x41, 0x19, 0x39, 0xb8, 0x54, 0x2b, 0xcc, 0x11, 0xcc, 0x75, 0xd0, 0xae, + 0x9b, 0xd7, 0xf7, 0xa0, 0x56, 0x52, 0x15, 0xe8, 0xa6, 0x88, 0xf7, 0x04, 0x3d, 0x21, 0xe1, 0x27, + 0xb9, 0x91, 0x86, 0x1d, 0xe2, 0x2a, 0x07, 0xe8, 0xff, 0x64, 0x7c, 0xbd, 0x5e, 0xd7, 0x60, 0xd8, + 0x14, 0xe9, 0x16, 0x59, 0x3e, 0x77, 0xe5, 0xff, 0x38, 0xde, 0x21, 0x7b, 0x7a, 0xde, 0x2e, 0x38, + 0x72, 0xa7, 0xe7, 0xfd, 0x8a, 0x0b, 0x58, 0x35, 0x5b, 0x60, 0xc7, 0x78, 0x74, 0x8b, 0xd2, 0x77, + 0xe4, 0x70, 0x30, 0x85, 0x9a, 0xbe, 0x21, 0xe1, 0xc2, 0xc0, 0xa6, 0x66, 0xde, 0xdf, 0x86, 0xe5, + 0x6a, 0xd2, 0x1f, 0x1e, 0x99, 0x0c, 0xe8, 0xce, 0xf5, 0x5f, 0x44, 0x0d, 0xad, 0x7f, 0xf6, 0x98, + 0x66, 0xe4, 0x88, 0x83, 0x01, 0x65, 0x7b, 0x78, 0x53, 0x95, 0xf2, 0xfe, 0x01, 0xad, 0x1f, 0xf3, + 0xa7, 0xf4, 0xfe, 0x2d, 0x0a, 0x9c, 0x03, 0xf1, 0x2d, 0x3a, 0x21, 0x21, 0x87, 0x02, 0x76, 0xad, + 0xd3, 0x1d, 0xb0, 0xe7, 0x2d, 0xea, 0x5b, 0xa1, 0x0b, 0x30, 0xad, 0xbf, 0xf7, 0x38, 0x7d, 0xdf, + 0xdb, 0x08, 0xff, 0xab, 0xd1, 0x02, 0xe7, 0xe9, 0x61, 0x5f, 0xf6, 0x78, 0xd0, 0x62, 0x7f, 0xd8, + 0xe2, 0x74, 0x4e, 0xa6, 0x8f, 0x6e, 0x38, 0xf6, 0x16, 0xb4, 0x84, 0x7a, 0xd9, 0xee, 0xd1, 0x41, + 0xbb, 0x05, 0xbe, 0xa2, 0xcb, 0x6e, 0x0b, 0x87, 0xd2, 0x19, 0x19, 0xbb, 0xcb, 0x60, 0x2f, 0xd0, + 0x9d, 0x28, 0xdb, 0xd7, 0xd5, 0x86, 0xf8, 0x90, 0xda, 0xc7, 0xc1, 0x77, 0xae, 0xb4, 0xf1, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x01, 0x5d, 0x11, 0xb2, 0x05, 0x00, 0x00, +} diff -Nru influxdb-0.10.0+dfsg1/influxql/internal/internal.proto influxdb-1.1.1+dfsg1/influxql/internal/internal.proto --- influxdb-0.10.0+dfsg1/influxql/internal/internal.proto 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/internal/internal.proto 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,72 @@ +package influxql; + +message Point { + required string Name = 1; + required string Tags = 2; + required int64 Time = 3; + required bool Nil = 4; + repeated Aux Aux = 5; + optional uint32 Aggregated = 6; + + optional double FloatValue = 7; + optional int64 IntegerValue = 8; + optional string StringValue = 9; + optional bool BooleanValue = 10; + + optional IteratorStats Stats = 11; +} + +message Aux { + required int32 DataType = 1; + optional double FloatValue = 2; + optional int64 IntegerValue = 3; + optional string StringValue = 4; + optional bool BooleanValue = 5; +} + +message IteratorOptions { + optional string Expr = 1; + repeated string Aux = 2; + repeated VarRef Fields = 17; + repeated Measurement Sources = 3; + optional Interval Interval = 4; + repeated string Dimensions = 5; + optional int32 Fill = 6; + optional double FillValue = 7; + optional string Condition = 8; + optional int64 StartTime = 9; + optional int64 EndTime = 10; + optional bool Ascending = 11; + optional int64 Limit = 12; + optional int64 Offset = 13; + optional int64 SLimit = 14; + optional int64 SOffset = 15; + optional bool Dedupe = 16; +} + +message Measurements { + repeated Measurement Items = 1; +} + +message Measurement { + optional string Database = 1; + optional string RetentionPolicy = 2; + optional string Name = 3; + optional string Regex = 4; + optional bool IsTarget = 5; +} + +message Interval { + optional int64 Duration = 1; + optional int64 Offset = 2; +} + +message IteratorStats { + optional int64 SeriesN = 1; + optional int64 PointN = 2; +} + +message VarRef { + required string Val = 1; + optional int32 Type = 2; +} diff -Nru influxdb-0.10.0+dfsg1/influxql/iterator.gen.go influxdb-1.1.1+dfsg1/influxql/iterator.gen.go --- influxdb-0.10.0+dfsg1/influxql/iterator.gen.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/iterator.gen.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,9664 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: iterator.gen.go.tmpl + +package influxql + +import ( + "container/heap" + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = 10 * time.Second + +// FloatIterator represents a stream of float points. +type FloatIterator interface { + Iterator + Next() (*FloatPoint, error) +} + +// newFloatIterators converts a slice of Iterator to a slice of FloatIterator. +// Drop and closes any iterator in itrs that is not a FloatIterator and cannot +// be cast to a FloatIterator. +func newFloatIterators(itrs []Iterator) []FloatIterator { + a := make([]FloatIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + a = append(a, itr) + + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) + + default: + itr.Close() + } + } + return a +} + +// bufFloatIterator represents a buffered FloatIterator. +type bufFloatIterator struct { + itr FloatIterator + buf *FloatPoint +} + +// newBufFloatIterator returns a buffered FloatIterator. +func newBufFloatIterator(itr FloatIterator) *bufFloatIterator { + return &bufFloatIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufFloatIterator) peek() (*FloatPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufFloatIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufFloatIterator) Next() (*FloatPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v } + +// floatMergeIterator represents an iterator that combines multiple float iterators. +type floatMergeIterator struct { + inputs []FloatIterator + heap *floatMergeHeap + init bool + + // Current iterator and window. + curr *floatMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newFloatMergeIterator returns a new instance of floatMergeIterator. +func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator { + itr := &floatMergeIterator{ + inputs: inputs, + heap: &floatMergeHeap{ + items: make([]*floatMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufFloatIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatMergeIterator) Next() (*FloatPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*floatMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if window.tags != p.Tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// floatMergeHeap represents a heap of floatMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type floatMergeHeap struct { + opt IteratorOptions + items []*floatMergeHeapItem +} + +func (h floatMergeHeap) Len() int { return len(h.items) } +func (h floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h floatMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *floatMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatMergeHeapItem)) +} + +func (h *floatMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatMergeHeapItem struct { + itr *bufFloatIterator +} + +// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type floatSortedMergeIterator struct { + inputs []FloatIterator + opt IteratorOptions + heap floatSortedMergeHeap + init bool +} + +// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. +func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { + itr := &floatSortedMergeIterator{ + inputs: inputs, + heap: make(floatSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap = append(itr.heap, &floatSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*floatSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + + if len(itr.heap) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*floatSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p, nil +} + +// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. +type floatSortedMergeHeap []*floatSortedMergeHeapItem + +func (h floatSortedMergeHeap) Len() int { return len(h) } +func (h floatSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h floatSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *floatSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*floatSortedMergeHeapItem)) +} + +func (h *floatSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type floatSortedMergeHeapItem struct { + point *FloatPoint + err error + itr FloatIterator + ascending bool +} + +// floatParallelIterator represents an iterator that pulls data in a separate goroutine. +type floatParallelIterator struct { + input FloatIterator + ch chan floatPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newFloatParallelIterator returns a new instance of floatParallelIterator. +func newFloatParallelIterator(input FloatIterator) *floatParallelIterator { + itr := &floatParallelIterator{ + input: input, + ch: make(chan floatPointError, 1), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *floatParallelIterator) Next() (*FloatPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *floatParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + + select { + case <-itr.closing: + return + case itr.ch <- floatPointError{point: p, err: err}: + } + } +} + +type floatPointError struct { + point *FloatPoint + err error +} + +// floatLimitIterator represents an iterator that limits points per group. +type floatLimitIterator struct { + input FloatIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newFloatLimitIterator returns a new instance of floatLimitIterator. +func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator { + return &floatLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *floatLimitIterator) Next() (*FloatPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil, nil + } + continue + } + + return p, nil + } +} + +type floatFillIterator struct { + input *bufFloatIterator + prev FloatPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) *floatFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = float64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &floatFillIterator{ + input: newBufFloatIterator(input), + prev: FloatPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatFillIterator) Close() error { return itr.input.Close() } + +func (itr *floatFillIterator) Next() (*FloatPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = FloatPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &FloatPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } + if next != nil { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToFloat(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p, nil +} + +// floatIntervalIterator represents a float implementation of IntervalIterator. +type floatIntervalIterator struct { + input FloatIterator + opt IteratorOptions +} + +func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator { + return &floatIntervalIterator{input: input, opt: opt} +} + +func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *floatIntervalIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// floatInterruptIterator represents a float implementation of InterruptIterator. +type floatInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + count int +} + +func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator { + return &floatInterruptIterator{input: input, closing: closing} +} + +func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *floatInterruptIterator) Next() (*FloatPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, nil + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator. +type floatCloseInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator { + itr := &floatCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *floatCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *floatCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *floatCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxFloatPoint represents a combination of a point and an error for the AuxIterator. +type auxFloatPoint struct { + point *FloatPoint + err error +} + +// floatAuxIterator represents a float implementation of AuxIterator. +type floatAuxIterator struct { + input *bufFloatIterator + output chan auxFloatPoint + fields auxIteratorFields + background bool +} + +func newFloatAuxIterator(input FloatIterator, opt IteratorOptions) *floatAuxIterator { + return &floatAuxIterator{ + input: newBufFloatIterator(input), + output: make(chan auxFloatPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *floatAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *floatAuxIterator) Start() { go itr.stream() } +func (itr *floatAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatAuxIterator) Close() error { return itr.input.Close() } +func (itr *floatAuxIterator) Next() (*FloatPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *floatAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *floatAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val, expr.Type), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *floatAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *floatAuxIterator) ExpandSources(sources Sources) (Sources, error) { + return nil, errors.New("not implemented") +} + +func (itr *floatAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxFloatPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxFloatPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// floatChanIterator represents a new instance of floatChanIterator. +type floatChanIterator struct { + buf struct { + i int + filled bool + points [2]FloatPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *floatChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *floatChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case float64: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: v} + + case int64: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: float64(v)} + + default: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *floatChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *floatChanIterator) Next() (*FloatPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// floatReduceFloatIterator executes a reducer for every interval and buffers the result. +type floatReduceFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceFloatPoint stores the reduced data for a name/tag combination. +type floatReduceFloatPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*floatReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// floatStreamFloatIterator streams inputs into the iterator and emits points gradually. +type floatStreamFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + opt IteratorOptions + m map[string]*floatReduceFloatPoint + points []FloatPoint +} + +// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator. +func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator { + return &floatStreamFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*floatReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) float64) *floatExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatExprIterator) Next() (*FloatPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// floatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatExprFunc func(a, b float64) float64 + +// floatReduceIntegerIterator executes a reducer for every interval and buffers the result. +type floatReduceIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceIntegerPoint stores the reduced data for a name/tag combination. +type floatReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*floatReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type floatStreamIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + opt IteratorOptions + m map[string]*floatReduceIntegerPoint + points []IntegerPoint +} + +// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator. +func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator { + return &floatStreamIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*floatReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatIntegerExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatIntegerExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatIntegerExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) int64) *floatIntegerExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatIntegerExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// floatIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatIntegerExprFunc func(a, b float64) int64 + +// floatReduceStringIterator executes a reducer for every interval and buffers the result. +type floatReduceStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceStringPoint stores the reduced data for a name/tag combination. +type floatReduceStringPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*floatReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// floatStreamStringIterator streams inputs into the iterator and emits points gradually. +type floatStreamStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + opt IteratorOptions + m map[string]*floatReduceStringPoint + points []StringPoint +} + +// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator. +func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator { + return &floatStreamStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*floatReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatStringExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatStringExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatStringExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) string) *floatStringExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatStringExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatStringExprIterator) Next() (*StringPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// floatStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatStringExprFunc func(a, b float64) string + +// floatReduceBooleanIterator executes a reducer for every interval and buffers the result. +type floatReduceBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceBooleanPoint stores the reduced data for a name/tag combination. +type floatReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*floatReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type floatStreamBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + opt IteratorOptions + m map[string]*floatReduceBooleanPoint + points []BooleanPoint +} + +// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator. +func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator { + return &floatStreamBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*floatReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatBooleanExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatBooleanExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatBooleanExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) bool) *floatBooleanExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatBooleanExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// floatBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatBooleanExprFunc func(a, b float64) bool + +// floatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatTransformIterator struct { + input FloatIterator + fn floatTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *floatTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// floatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatTransformFunc func(p *FloatPoint) *FloatPoint + +// floatBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatBoolTransformIterator struct { + input FloatIterator + fn floatBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *floatBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// floatBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatBoolTransformFunc func(p *FloatPoint) *BooleanPoint + +// floatDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type floatDedupeIterator struct { + input FloatIterator + m map[string]struct{} // lookup of points already sent +} + +// newFloatDedupeIterator returns a new instance of floatDedupeIterator. +func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator { + return &floatDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// floatReaderIterator represents an iterator that streams from a reader. +type floatReaderIterator struct { + r io.Reader + dec *FloatPointDecoder +} + +// newFloatReaderIterator returns a new instance of floatReaderIterator. +func newFloatReaderIterator(r io.Reader, stats IteratorStats) *floatReaderIterator { + dec := NewFloatPointDecoder(r) + dec.stats = stats + + return &floatReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *floatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatReaderIterator) Next() (*FloatPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &FloatPoint{} + if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// IntegerIterator represents a stream of integer points. +type IntegerIterator interface { + Iterator + Next() (*IntegerPoint, error) +} + +// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. +// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot +// be cast to a IntegerIterator. +func newIntegerIterators(itrs []Iterator) []IntegerIterator { + a := make([]IntegerIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case IntegerIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufIntegerIterator represents a buffered IntegerIterator. +type bufIntegerIterator struct { + itr IntegerIterator + buf *IntegerPoint +} + +// newBufIntegerIterator returns a buffered IntegerIterator. +func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator { + return &bufIntegerIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufIntegerIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v } + +// integerMergeIterator represents an iterator that combines multiple integer iterators. +type integerMergeIterator struct { + inputs []IntegerIterator + heap *integerMergeHeap + init bool + + // Current iterator and window. + curr *integerMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newIntegerMergeIterator returns a new instance of integerMergeIterator. +func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator { + itr := &integerMergeIterator{ + inputs: inputs, + heap: &integerMergeHeap{ + items: make([]*integerMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufIntegerIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*integerMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if window.tags != p.Tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// integerMergeHeap represents a heap of integerMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type integerMergeHeap struct { + opt IteratorOptions + items []*integerMergeHeapItem +} + +func (h integerMergeHeap) Len() int { return len(h.items) } +func (h integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h integerMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *integerMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerMergeHeapItem)) +} + +func (h *integerMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerMergeHeapItem struct { + itr *bufIntegerIterator +} + +// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type integerSortedMergeIterator struct { + inputs []IntegerIterator + opt IteratorOptions + heap integerSortedMergeHeap + init bool +} + +// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. +func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { + itr := &integerSortedMergeIterator{ + inputs: inputs, + heap: make(integerSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap = append(itr.heap, &integerSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*integerSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + + if len(itr.heap) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*integerSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p, nil +} + +// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. +type integerSortedMergeHeap []*integerSortedMergeHeapItem + +func (h integerSortedMergeHeap) Len() int { return len(h) } +func (h integerSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h integerSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *integerSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*integerSortedMergeHeapItem)) +} + +func (h *integerSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type integerSortedMergeHeapItem struct { + point *IntegerPoint + err error + itr IntegerIterator + ascending bool +} + +// integerParallelIterator represents an iterator that pulls data in a separate goroutine. +type integerParallelIterator struct { + input IntegerIterator + ch chan integerPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newIntegerParallelIterator returns a new instance of integerParallelIterator. +func newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator { + itr := &integerParallelIterator{ + input: input, + ch: make(chan integerPointError, 1), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *integerParallelIterator) Next() (*IntegerPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *integerParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + + select { + case <-itr.closing: + return + case itr.ch <- integerPointError{point: p, err: err}: + } + } +} + +type integerPointError struct { + point *IntegerPoint + err error +} + +// integerLimitIterator represents an iterator that limits points per group. +type integerLimitIterator struct { + input IntegerIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newIntegerLimitIterator returns a new instance of integerLimitIterator. +func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator { + return &integerLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil, nil + } + continue + } + + return p, nil + } +} + +type integerFillIterator struct { + input *bufIntegerIterator + prev IntegerPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOptions) *integerFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = int64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &integerFillIterator{ + input: newBufIntegerIterator(input), + prev: IntegerPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFillIterator) Close() error { return itr.input.Close() } + +func (itr *integerFillIterator) Next() (*IntegerPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = IntegerPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &IntegerPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } + if next != nil { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToInteger(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p, nil +} + +// integerIntervalIterator represents a integer implementation of IntervalIterator. +type integerIntervalIterator struct { + input IntegerIterator + opt IteratorOptions +} + +func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator { + return &integerIntervalIterator{input: input, opt: opt} +} + +func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// integerInterruptIterator represents a integer implementation of InterruptIterator. +type integerInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + count int +} + +func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator { + return &integerInterruptIterator{input: input, closing: closing} +} + +func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, nil + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator. +type integerCloseInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator { + itr := &integerCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *integerCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *integerCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *integerCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxIntegerPoint represents a combination of a point and an error for the AuxIterator. +type auxIntegerPoint struct { + point *IntegerPoint + err error +} + +// integerAuxIterator represents a integer implementation of AuxIterator. +type integerAuxIterator struct { + input *bufIntegerIterator + output chan auxIntegerPoint + fields auxIteratorFields + background bool +} + +func newIntegerAuxIterator(input IntegerIterator, opt IteratorOptions) *integerAuxIterator { + return &integerAuxIterator{ + input: newBufIntegerIterator(input), + output: make(chan auxIntegerPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *integerAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *integerAuxIterator) Start() { go itr.stream() } +func (itr *integerAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerAuxIterator) Close() error { return itr.input.Close() } +func (itr *integerAuxIterator) Next() (*IntegerPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *integerAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *integerAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val, expr.Type), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *integerAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *integerAuxIterator) ExpandSources(sources Sources) (Sources, error) { + return nil, errors.New("not implemented") +} + +func (itr *integerAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxIntegerPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxIntegerPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// integerChanIterator represents a new instance of integerChanIterator. +type integerChanIterator struct { + buf struct { + i int + filled bool + points [2]IntegerPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *integerChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *integerChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case int64: + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *integerChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *integerChanIterator) Next() (*IntegerPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// integerReduceFloatIterator executes a reducer for every interval and buffers the result. +type integerReduceFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceFloatPoint stores the reduced data for a name/tag combination. +type integerReduceFloatPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*integerReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// integerStreamFloatIterator streams inputs into the iterator and emits points gradually. +type integerStreamFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + opt IteratorOptions + m map[string]*integerReduceFloatPoint + points []FloatPoint +} + +// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator. +func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator { + return &integerStreamFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*integerReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerFloatExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerFloatExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerFloatExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) float64) *integerFloatExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerFloatExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// integerFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerFloatExprFunc func(a, b int64) float64 + +// integerReduceIntegerIterator executes a reducer for every interval and buffers the result. +type integerReduceIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceIntegerPoint stores the reduced data for a name/tag combination. +type integerReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*integerReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type integerStreamIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + opt IteratorOptions + m map[string]*integerReduceIntegerPoint + points []IntegerPoint +} + +// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator. +func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator { + return &integerStreamIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*integerReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) int64) *integerExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerExprIterator) Next() (*IntegerPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// integerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerExprFunc func(a, b int64) int64 + +// integerReduceStringIterator executes a reducer for every interval and buffers the result. +type integerReduceStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceStringPoint stores the reduced data for a name/tag combination. +type integerReduceStringPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*integerReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// integerStreamStringIterator streams inputs into the iterator and emits points gradually. +type integerStreamStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + opt IteratorOptions + m map[string]*integerReduceStringPoint + points []StringPoint +} + +// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator. +func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator { + return &integerStreamStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*integerReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerStringExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerStringExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerStringExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) string) *integerStringExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerStringExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerStringExprIterator) Next() (*StringPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// integerStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerStringExprFunc func(a, b int64) string + +// integerReduceBooleanIterator executes a reducer for every interval and buffers the result. +type integerReduceBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceBooleanPoint stores the reduced data for a name/tag combination. +type integerReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*integerReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type integerStreamBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + opt IteratorOptions + m map[string]*integerReduceBooleanPoint + points []BooleanPoint +} + +// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator. +func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator { + return &integerStreamBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*integerReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerBooleanExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerBooleanExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerBooleanExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) bool) *integerBooleanExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerBooleanExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// integerBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerBooleanExprFunc func(a, b int64) bool + +// integerTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerTransformIterator struct { + input IntegerIterator + fn integerTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerTransformIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// integerTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerTransformFunc func(p *IntegerPoint) *IntegerPoint + +// integerBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerBoolTransformIterator struct { + input IntegerIterator + fn integerBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// integerBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerBoolTransformFunc func(p *IntegerPoint) *BooleanPoint + +// integerDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type integerDedupeIterator struct { + input IntegerIterator + m map[string]struct{} // lookup of points already sent +} + +// newIntegerDedupeIterator returns a new instance of integerDedupeIterator. +func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator { + return &integerDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// integerReaderIterator represents an iterator that streams from a reader. +type integerReaderIterator struct { + r io.Reader + dec *IntegerPointDecoder +} + +// newIntegerReaderIterator returns a new instance of integerReaderIterator. +func newIntegerReaderIterator(r io.Reader, stats IteratorStats) *integerReaderIterator { + dec := NewIntegerPointDecoder(r) + dec.stats = stats + + return &integerReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *integerReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerReaderIterator) Next() (*IntegerPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &IntegerPoint{} + if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// StringIterator represents a stream of string points. +type StringIterator interface { + Iterator + Next() (*StringPoint, error) +} + +// newStringIterators converts a slice of Iterator to a slice of StringIterator. +// Drop and closes any iterator in itrs that is not a StringIterator and cannot +// be cast to a StringIterator. +func newStringIterators(itrs []Iterator) []StringIterator { + a := make([]StringIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case StringIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufStringIterator represents a buffered StringIterator. +type bufStringIterator struct { + itr StringIterator + buf *StringPoint +} + +// newBufStringIterator returns a buffered StringIterator. +func newBufStringIterator(itr StringIterator) *bufStringIterator { + return &bufStringIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufStringIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufStringIterator) peek() (*StringPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufStringIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufStringIterator) Next() (*StringPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v } + +// stringMergeIterator represents an iterator that combines multiple string iterators. +type stringMergeIterator struct { + inputs []StringIterator + heap *stringMergeHeap + init bool + + // Current iterator and window. + curr *stringMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newStringMergeIterator returns a new instance of stringMergeIterator. +func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator { + itr := &stringMergeIterator{ + inputs: inputs, + heap: &stringMergeHeap{ + items: make([]*stringMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufStringIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringMergeIterator) Next() (*StringPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*stringMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if window.tags != p.Tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// stringMergeHeap represents a heap of stringMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type stringMergeHeap struct { + opt IteratorOptions + items []*stringMergeHeapItem +} + +func (h stringMergeHeap) Len() int { return len(h.items) } +func (h stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h stringMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *stringMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringMergeHeapItem)) +} + +func (h *stringMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringMergeHeapItem struct { + itr *bufStringIterator +} + +// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type stringSortedMergeIterator struct { + inputs []StringIterator + opt IteratorOptions + heap stringSortedMergeHeap + init bool +} + +// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. +func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { + itr := &stringSortedMergeIterator{ + inputs: inputs, + heap: make(stringSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap = append(itr.heap, &stringSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*stringSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + + if len(itr.heap) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*stringSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p, nil +} + +// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. +type stringSortedMergeHeap []*stringSortedMergeHeapItem + +func (h stringSortedMergeHeap) Len() int { return len(h) } +func (h stringSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h stringSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *stringSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*stringSortedMergeHeapItem)) +} + +func (h *stringSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type stringSortedMergeHeapItem struct { + point *StringPoint + err error + itr StringIterator + ascending bool +} + +// stringParallelIterator represents an iterator that pulls data in a separate goroutine. +type stringParallelIterator struct { + input StringIterator + ch chan stringPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newStringParallelIterator returns a new instance of stringParallelIterator. +func newStringParallelIterator(input StringIterator) *stringParallelIterator { + itr := &stringParallelIterator{ + input: input, + ch: make(chan stringPointError, 1), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *stringParallelIterator) Next() (*StringPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *stringParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + + select { + case <-itr.closing: + return + case itr.ch <- stringPointError{point: p, err: err}: + } + } +} + +type stringPointError struct { + point *StringPoint + err error +} + +// stringLimitIterator represents an iterator that limits points per group. +type stringLimitIterator struct { + input StringIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newStringLimitIterator returns a new instance of stringLimitIterator. +func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator { + return &stringLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *stringLimitIterator) Next() (*StringPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil, nil + } + continue + } + + return p, nil + } +} + +type stringFillIterator struct { + input *bufStringIterator + prev StringPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) *stringFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = "" + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &stringFillIterator{ + input: newBufStringIterator(input), + prev: StringPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringFillIterator) Close() error { return itr.input.Close() } + +func (itr *stringFillIterator) Next() (*StringPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = StringPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &StringPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + fallthrough + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToString(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p, nil +} + +// stringIntervalIterator represents a string implementation of IntervalIterator. +type stringIntervalIterator struct { + input StringIterator + opt IteratorOptions +} + +func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator { + return &stringIntervalIterator{input: input, opt: opt} +} + +func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *stringIntervalIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// stringInterruptIterator represents a string implementation of InterruptIterator. +type stringInterruptIterator struct { + input StringIterator + closing <-chan struct{} + count int +} + +func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator { + return &stringInterruptIterator{input: input, closing: closing} +} + +func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *stringInterruptIterator) Next() (*StringPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, nil + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator. +type stringCloseInterruptIterator struct { + input StringIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator { + itr := &stringCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *stringCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *stringCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *stringCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxStringPoint represents a combination of a point and an error for the AuxIterator. +type auxStringPoint struct { + point *StringPoint + err error +} + +// stringAuxIterator represents a string implementation of AuxIterator. +type stringAuxIterator struct { + input *bufStringIterator + output chan auxStringPoint + fields auxIteratorFields + background bool +} + +func newStringAuxIterator(input StringIterator, opt IteratorOptions) *stringAuxIterator { + return &stringAuxIterator{ + input: newBufStringIterator(input), + output: make(chan auxStringPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *stringAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *stringAuxIterator) Start() { go itr.stream() } +func (itr *stringAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringAuxIterator) Close() error { return itr.input.Close() } +func (itr *stringAuxIterator) Next() (*StringPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *stringAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *stringAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val, expr.Type), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *stringAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *stringAuxIterator) ExpandSources(sources Sources) (Sources, error) { + return nil, errors.New("not implemented") +} + +func (itr *stringAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxStringPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxStringPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// stringChanIterator represents a new instance of stringChanIterator. +type stringChanIterator struct { + buf struct { + i int + filled bool + points [2]StringPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *stringChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *stringChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case string: + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *stringChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *stringChanIterator) Next() (*StringPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// stringReduceFloatIterator executes a reducer for every interval and buffers the result. +type stringReduceFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceFloatPoint stores the reduced data for a name/tag combination. +type stringReduceFloatPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*stringReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// stringStreamFloatIterator streams inputs into the iterator and emits points gradually. +type stringStreamFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + opt IteratorOptions + m map[string]*stringReduceFloatPoint + points []FloatPoint +} + +// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator. +func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator { + return &stringStreamFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*stringReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringFloatExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringFloatExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringFloatExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) float64) *stringFloatExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringFloatExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// stringFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringFloatExprFunc func(a, b string) float64 + +// stringReduceIntegerIterator executes a reducer for every interval and buffers the result. +type stringReduceIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceIntegerPoint stores the reduced data for a name/tag combination. +type stringReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*stringReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type stringStreamIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + opt IteratorOptions + m map[string]*stringReduceIntegerPoint + points []IntegerPoint +} + +// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator. +func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator { + return &stringStreamIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*stringReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringIntegerExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringIntegerExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringIntegerExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) int64) *stringIntegerExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringIntegerExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// stringIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringIntegerExprFunc func(a, b string) int64 + +// stringReduceStringIterator executes a reducer for every interval and buffers the result. +type stringReduceStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceStringPoint stores the reduced data for a name/tag combination. +type stringReduceStringPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*stringReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// stringStreamStringIterator streams inputs into the iterator and emits points gradually. +type stringStreamStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + opt IteratorOptions + m map[string]*stringReduceStringPoint + points []StringPoint +} + +// newStringStreamStringIterator returns a new instance of stringStreamStringIterator. +func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator { + return &stringStreamStringIterator{ + input: newBufStringIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*stringReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) string) *stringExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringExprIterator) Next() (*StringPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// stringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringExprFunc func(a, b string) string + +// stringReduceBooleanIterator executes a reducer for every interval and buffers the result. +type stringReduceBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceBooleanPoint stores the reduced data for a name/tag combination. +type stringReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*stringReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type stringStreamBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + opt IteratorOptions + m map[string]*stringReduceBooleanPoint + points []BooleanPoint +} + +// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator. +func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator { + return &stringStreamBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*stringReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringBooleanExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringBooleanExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringBooleanExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) bool) *stringBooleanExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringBooleanExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// stringBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringBooleanExprFunc func(a, b string) bool + +// stringTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringTransformIterator struct { + input StringIterator + fn stringTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *stringTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringTransformIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// stringTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringTransformFunc func(p *StringPoint) *StringPoint + +// stringBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringBoolTransformIterator struct { + input StringIterator + fn stringBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *stringBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// stringBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringBoolTransformFunc func(p *StringPoint) *BooleanPoint + +// stringDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type stringDedupeIterator struct { + input StringIterator + m map[string]struct{} // lookup of points already sent +} + +// newStringDedupeIterator returns a new instance of stringDedupeIterator. +func newStringDedupeIterator(input StringIterator) *stringDedupeIterator { + return &stringDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *stringDedupeIterator) Next() (*StringPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// stringReaderIterator represents an iterator that streams from a reader. +type stringReaderIterator struct { + r io.Reader + dec *StringPointDecoder +} + +// newStringReaderIterator returns a new instance of stringReaderIterator. +func newStringReaderIterator(r io.Reader, stats IteratorStats) *stringReaderIterator { + dec := NewStringPointDecoder(r) + dec.stats = stats + + return &stringReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *stringReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringReaderIterator) Next() (*StringPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &StringPoint{} + if err := itr.dec.DecodeStringPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// BooleanIterator represents a stream of boolean points. +type BooleanIterator interface { + Iterator + Next() (*BooleanPoint, error) +} + +// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. +// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot +// be cast to a BooleanIterator. +func newBooleanIterators(itrs []Iterator) []BooleanIterator { + a := make([]BooleanIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case BooleanIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufBooleanIterator represents a buffered BooleanIterator. +type bufBooleanIterator struct { + itr BooleanIterator + buf *BooleanPoint +} + +// newBufBooleanIterator returns a buffered BooleanIterator. +func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator { + return &bufBooleanIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufBooleanIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v } + +// booleanMergeIterator represents an iterator that combines multiple boolean iterators. +type booleanMergeIterator struct { + inputs []BooleanIterator + heap *booleanMergeHeap + init bool + + // Current iterator and window. + curr *booleanMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newBooleanMergeIterator returns a new instance of booleanMergeIterator. +func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator { + itr := &booleanMergeIterator{ + inputs: inputs, + heap: &booleanMergeHeap{ + items: make([]*booleanMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufBooleanIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if window.tags != p.Tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// booleanMergeHeap represents a heap of booleanMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type booleanMergeHeap struct { + opt IteratorOptions + items []*booleanMergeHeapItem +} + +func (h booleanMergeHeap) Len() int { return len(h.items) } +func (h booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h booleanMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *booleanMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanMergeHeapItem)) +} + +func (h *booleanMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanMergeHeapItem struct { + itr *bufBooleanIterator +} + +// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type booleanSortedMergeIterator struct { + inputs []BooleanIterator + opt IteratorOptions + heap booleanSortedMergeHeap + init bool +} + +// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. +func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { + itr := &booleanSortedMergeIterator{ + inputs: inputs, + heap: make(booleanSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap = append(itr.heap, &booleanSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*booleanSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + + if len(itr.heap) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*booleanSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p, nil +} + +// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. +type booleanSortedMergeHeap []*booleanSortedMergeHeapItem + +func (h booleanSortedMergeHeap) Len() int { return len(h) } +func (h booleanSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h booleanSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *booleanSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*booleanSortedMergeHeapItem)) +} + +func (h *booleanSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type booleanSortedMergeHeapItem struct { + point *BooleanPoint + err error + itr BooleanIterator + ascending bool +} + +// booleanParallelIterator represents an iterator that pulls data in a separate goroutine. +type booleanParallelIterator struct { + input BooleanIterator + ch chan booleanPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newBooleanParallelIterator returns a new instance of booleanParallelIterator. +func newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator { + itr := &booleanParallelIterator{ + input: input, + ch: make(chan booleanPointError, 1), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *booleanParallelIterator) Next() (*BooleanPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *booleanParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + + select { + case <-itr.closing: + return + case itr.ch <- booleanPointError{point: p, err: err}: + } + } +} + +type booleanPointError struct { + point *BooleanPoint + err error +} + +// booleanLimitIterator represents an iterator that limits points per group. +type booleanLimitIterator struct { + input BooleanIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newBooleanLimitIterator returns a new instance of booleanLimitIterator. +func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator { + return &booleanLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil, nil + } + continue + } + + return p, nil + } +} + +type booleanFillIterator struct { + input *bufBooleanIterator + prev BooleanPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOptions) *booleanFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = false + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &booleanFillIterator{ + input: newBufBooleanIterator(input), + prev: BooleanPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanFillIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = BooleanPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &BooleanPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + fallthrough + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToBoolean(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p, nil +} + +// booleanIntervalIterator represents a boolean implementation of IntervalIterator. +type booleanIntervalIterator struct { + input BooleanIterator + opt IteratorOptions +} + +func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator { + return &booleanIntervalIterator{input: input, opt: opt} +} + +func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// booleanInterruptIterator represents a boolean implementation of InterruptIterator. +type booleanInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + count int +} + +func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator { + return &booleanInterruptIterator{input: input, closing: closing} +} + +func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, nil + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator. +type booleanCloseInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator { + itr := &booleanCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *booleanCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *booleanCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *booleanCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxBooleanPoint represents a combination of a point and an error for the AuxIterator. +type auxBooleanPoint struct { + point *BooleanPoint + err error +} + +// booleanAuxIterator represents a boolean implementation of AuxIterator. +type booleanAuxIterator struct { + input *bufBooleanIterator + output chan auxBooleanPoint + fields auxIteratorFields + background bool +} + +func newBooleanAuxIterator(input BooleanIterator, opt IteratorOptions) *booleanAuxIterator { + return &booleanAuxIterator{ + input: newBufBooleanIterator(input), + output: make(chan auxBooleanPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *booleanAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *booleanAuxIterator) Start() { go itr.stream() } +func (itr *booleanAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanAuxIterator) Close() error { return itr.input.Close() } +func (itr *booleanAuxIterator) Next() (*BooleanPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *booleanAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *booleanAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val, expr.Type), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *booleanAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *booleanAuxIterator) ExpandSources(sources Sources) (Sources, error) { + return nil, errors.New("not implemented") +} + +func (itr *booleanAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxBooleanPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxBooleanPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// booleanChanIterator represents a new instance of booleanChanIterator. +type booleanChanIterator struct { + buf struct { + i int + filled bool + points [2]BooleanPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *booleanChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *booleanChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case bool: + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *booleanChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *booleanChanIterator) Next() (*BooleanPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// booleanReduceFloatIterator executes a reducer for every interval and buffers the result. +type booleanReduceFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceFloatPoint stores the reduced data for a name/tag combination. +type booleanReduceFloatPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*booleanReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. +type booleanStreamFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + opt IteratorOptions + m map[string]*booleanReduceFloatPoint + points []FloatPoint +} + +// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator. +func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator { + return &booleanStreamFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*booleanReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanFloatExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanFloatExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanFloatExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) float64) *booleanFloatExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanFloatExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// booleanFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanFloatExprFunc func(a, b bool) float64 + +// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result. +type booleanReduceIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceIntegerPoint stores the reduced data for a name/tag combination. +type booleanReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*booleanReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type booleanStreamIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + opt IteratorOptions + m map[string]*booleanReduceIntegerPoint + points []IntegerPoint +} + +// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator. +func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator { + return &booleanStreamIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*booleanReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanIntegerExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanIntegerExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanIntegerExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) int64) *booleanIntegerExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanIntegerExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// booleanIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanIntegerExprFunc func(a, b bool) int64 + +// booleanReduceStringIterator executes a reducer for every interval and buffers the result. +type booleanReduceStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceStringPoint stores the reduced data for a name/tag combination. +type booleanReduceStringPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*booleanReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// booleanStreamStringIterator streams inputs into the iterator and emits points gradually. +type booleanStreamStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + opt IteratorOptions + m map[string]*booleanReduceStringPoint + points []StringPoint +} + +// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator. +func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator { + return &booleanStreamStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*booleanReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanStringExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanStringExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanStringExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) string) *booleanStringExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanStringExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanStringExprIterator) Next() (*StringPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// booleanStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanStringExprFunc func(a, b bool) string + +// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result. +type booleanReduceBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceBooleanPoint stores the reduced data for a name/tag combination. +type booleanReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*booleanReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type booleanStreamBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + opt IteratorOptions + m map[string]*booleanReduceBooleanPoint + points []BooleanPoint +} + +// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator. +func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator { + return &booleanStreamBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + opt: opt, + m: make(map[string]*booleanReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) bool) *booleanExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// booleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanExprFunc func(a, b bool) bool + +// booleanTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanTransformIterator struct { + input BooleanIterator + fn booleanTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *booleanTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// booleanTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanBoolTransformIterator struct { + input BooleanIterator + fn booleanBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *booleanBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// booleanBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanBoolTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type booleanDedupeIterator struct { + input BooleanIterator + m map[string]struct{} // lookup of points already sent +} + +// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator. +func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator { + return &booleanDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// booleanReaderIterator represents an iterator that streams from a reader. +type booleanReaderIterator struct { + r io.Reader + dec *BooleanPointDecoder +} + +// newBooleanReaderIterator returns a new instance of booleanReaderIterator. +func newBooleanReaderIterator(r io.Reader, stats IteratorStats) *booleanReaderIterator { + dec := NewBooleanPointDecoder(r) + dec.stats = stats + + return &booleanReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *booleanReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &BooleanPoint{} + if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer + + // Frequency with which stats are emitted. + StatsInterval time.Duration +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{ + w: w, + + StatsInterval: DefaultStatsInterval, + } +} + +// EncodeIterator encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +// encodeFloatIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewFloatPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeFloatPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeIntegerIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewIntegerPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeIntegerPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeStringIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewStringPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeStringPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeBooleanIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewBooleanPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeBooleanPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encode a stats object in the point stream. +func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Stats: encodeIteratorStats(&stats), + }) + if err != nil { + return err + } + + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} diff -Nru influxdb-0.10.0+dfsg1/influxql/iterator.gen.go.tmpl influxdb-1.1.1+dfsg1/influxql/iterator.gen.go.tmpl --- influxdb-0.10.0+dfsg1/influxql/iterator.gen.go.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/iterator.gen.go.tmpl 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1586 @@ +package influxql + +import ( + "container/heap" + "errors" + "encoding/binary" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = 10 * time.Second + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}Iterator represents a stream of {{$k.name}} points. +type {{$k.Name}}Iterator interface { + Iterator + Next() (*{{$k.Name}}Point, error) +} + +// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. +// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot +// be cast to a {{$k.Name}}Iterator. +func new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator { + a := make([]{{$k.Name}}Iterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case {{$k.Name}}Iterator: + a = append(a, itr) +{{if eq .Name "Float"}} + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) +{{end}} + default: + itr.Close() + } + } + return a +} + + +// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator. +type buf{{$k.Name}}Iterator struct { + itr {{$k.Name}}Iterator + buf *{{$k.Name}}Point +} + +// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator. +func newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator { + return &buf{{$k.Name}}Iterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v } + +// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators. +type {{$k.name}}MergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}MergeHeap + init bool + + // Current iterator and window. + curr *{{$k.name}}MergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator. +func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator { + itr := &{{$k.name}}MergeIterator{ + inputs: inputs, + heap: &{{$k.name}}MergeHeap{ + items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBuf{{$k.Name}}Iterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if window.tags != p.Tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type {{$k.name}}MergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}MergeHeapItem +} + +func (h {{$k.name}}MergeHeap) Len() int { return len(h.items) } +func (h {{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h {{$k.name}}MergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + + +func (h *{{$k.name}}MergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}MergeHeapItem)) +} + +func (h *{{$k.name}}MergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}MergeHeapItem struct { + itr *buf{{$k.Name}}Iterator +} + +// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type {{$k.name}}SortedMergeIterator struct { + inputs []{{$k.Name}}Iterator + opt IteratorOptions + heap {{$k.name}}SortedMergeHeap + init bool +} + +// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. +func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { + itr := &{{$k.name}}SortedMergeIterator{ + inputs: inputs, + heap: make({{$k.name}}SortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap = append(itr.heap, &{{$k.name}}SortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + + if len(itr.heap) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*{{$k.name}}SortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p, nil +} + +// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. +type {{$k.name}}SortedMergeHeap []*{{$k.name}}SortedMergeHeapItem + +func (h {{$k.name}}SortedMergeHeap) Len() int { return len(h) } +func (h {{$k.name}}SortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h {{$k.name}}SortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*{{$k.name}}SortedMergeHeapItem)) +} + +func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type {{$k.name}}SortedMergeHeapItem struct { + point *{{$k.Name}}Point + err error + itr {{$k.Name}}Iterator + ascending bool +} + +// {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine. +type {{$k.name}}ParallelIterator struct { + input {{$k.Name}}Iterator + ch chan {{$k.name}}PointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// new{{$k.Name}}ParallelIterator returns a new instance of {{$k.name}}ParallelIterator. +func new{{$k.Name}}ParallelIterator(input {{$k.Name}}Iterator) *{{$k.name}}ParallelIterator { + itr := &{{$k.name}}ParallelIterator{ + input: input, + ch: make(chan {{$k.name}}PointError, 1), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}ParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}ParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ParallelIterator) Next() (*{{$k.Name}}Point, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *{{$k.name}}ParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + + select { + case <-itr.closing: + return + case itr.ch <- {{$k.name}}PointError{point: p, err: err}: + } + } +} + +type {{$k.name}}PointError struct { + point *{{$k.Name}}Point + err error +} + +// {{$k.name}}LimitIterator represents an iterator that limits points per group. +type {{$k.name}}LimitIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator. +func new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator { + return &{{$k.name}}LimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil, nil + } + continue + } + + return p, nil + } +} + +type {{$k.name}}FillIterator struct { + input *buf{{$k.Name}}Iterator + prev {{$k.Name}}Point + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt IteratorOptions) *{{$k.name}}FillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = {{$k.Zero}} + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &{{$k.name}}FillIterator{ + input: newBuf{{$k.Name}}Iterator(input), + prev: {{$k.Name}}Point{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = {{$k.Name}}Point{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &{{$k.Name}}Point{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + {{- if or (eq $k.Name "Float") (eq $k.Name "Integer")}} + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } + if next != nil { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linear{{$k.Name}}(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + {{else}} + fallthrough + {{- end}} + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castTo{{$k.Name}}(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p, nil +} + +// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. +type {{$k.name}}IntervalIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions +} + +func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator { + return &{{$k.name}}IntervalIterator{input: input, opt: opt} +} + +func (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator. +type {{$k.name}}InterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + count int +} + +func new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}InterruptIterator { + return &{{$k.name}}InterruptIterator{input: input, closing: closing} +} + +func (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count & 0xFF == 0xFF { + select { + case <-itr.closing: + return nil, nil + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// {{$k.name}}CloseInterruptIterator represents a {{$k.name}} implementation of CloseInterruptIterator. +type {{$k.name}}CloseInterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func new{{$k.Name}}CloseInterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}CloseInterruptIterator { + itr := &{{$k.name}}CloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *{{$k.name}}CloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *{{$k.name}}CloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *{{$k.name}}CloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *{{$k.name}}CloseInterruptIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// aux{{$k.Name}}Point represents a combination of a point and an error for the AuxIterator. +type aux{{$k.Name}}Point struct { + point *{{$k.Name}}Point + err error +} + +// {{$k.name}}AuxIterator represents a {{$k.name}} implementation of AuxIterator. +type {{$k.name}}AuxIterator struct { + input *buf{{$k.Name}}Iterator + output chan aux{{$k.Name}}Point + fields auxIteratorFields + background bool +} + +func new{{$k.Name}}AuxIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}AuxIterator { + return &{{$k.name}}AuxIterator{ + input: newBuf{{$k.Name}}Iterator(input), + output: make(chan aux{{$k.Name}}Point, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *{{$k.name}}AuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *{{$k.name}}AuxIterator) Start() { go itr.stream() } +func (itr *{{$k.name}}AuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}AuxIterator) Close() error { return itr.input.Close() } +func (itr *{{$k.name}}AuxIterator) Next() (*{{$k.Name}}Point, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *{{$k.name}}AuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } + +func (itr *{{$k.name}}AuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val, expr.Type), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *{{$k.name}}AuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *{{$k.name}}AuxIterator) ExpandSources(sources Sources) (Sources, error) { + return nil, errors.New("not implemented") +} + +func (itr *{{.name}}AuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- aux{{$k.Name}}Point{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- aux{{$k.Name}}Point{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// {{$k.name}}ChanIterator represents a new instance of {{$k.name}}ChanIterator. +type {{$k.name}}ChanIterator struct { + buf struct { + i int + filled bool + points [2]{{$k.Name}}Point + } + err error + cond *sync.Cond + done bool +} + +func (itr *{{$k.name}}ChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *{{$k.name}}ChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case {{$k.Type}}: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: v} +{{if eq $k.Name "Float"}} + case int64: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: float64(v)} +{{end}} + default: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *{{$k.name}}ChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *{{$k.name}}ChanIterator) Next() (*{{$k.Name}}Point, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +{{range $v := $types}} + +// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. +type {{$k.name}}Reduce{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + opt IteratorOptions + points []{{$v.Name}}Point +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. +type {{$k.name}}Reduce{{$v.Name}}Point struct { + Name string + Tags Tags + Aggregator {{$k.Name}}PointAggregator + Emitter {{$v.Name}}PointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + // Calculate next window. + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) + + // Create points by tags. + m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 { + sort.Sort(reverseStringSlice(keys)) + } + + a := make([]{{$v.Name}}Point, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points)-1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a, nil +} + +// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. +type {{$k.name}}Stream{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + opt IteratorOptions + m map[string]*{{$k.name}}Reduce{{$v.Name}}Point + points []{{$v.Name}}Point +} + +// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator. +func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator { + return &{{$k.name}}Stream{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + opt: opt, + m: make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator struct { + left *buf{{$k.Name}}Iterator + right *buf{{$k.Name}}Iterator + fn {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc + points []{{$k.Name}}Point // must be size 2 + storePrev bool +} + +func new{{$k.Name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator(left, right {{$k.Name}}Iterator, opt IteratorOptions, fn func(a, b {{$k.Type}}) {{$v.Type}}) *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator { + var points []{{$k.Name}}Point + switch opt.Fill { + case NullFill, PreviousFill: + points = []{{$k.Name}}Point{ {Nil: true}, {Nil: true} } + case NumberFill: + value := castTo{{$k.Name}}(opt.FillValue) + points = []{{$k.Name}}Point{ {Value: value}, {Value: value} } + } + return &{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator{ + left: newBuf{{$k.Name}}Iterator(left), + right: newBuf{{$k.Name}}Iterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Next() (*{{$v.Name}}Point, error) { + for { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } + + if a == nil && b == nil { + return nil, nil + } else if itr.points == nil && (a == nil || b == nil ) { + return nil, nil + } + + if a != nil && b != nil { + if a.Time > b.Time { + itr.left.unread(a) + a = nil + } else if a.Time < b.Time { + itr.right.unread(b) + b = nil + } + } + + if a == nil || a.Nil { + if itr.points == nil { + continue + } + p := *b + p.Value = itr.points[0].Value + p.Nil = itr.points[0].Nil + a = &p + } else if b == nil || b.Nil { + if itr.points == nil { + continue + } + p := *a + p.Value = itr.points[1].Value + p.Nil = itr.points[1].Nil + b = &p + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + +{{if eq $k.Name $v.Name}} + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil +{{else}} + p := &{{$v.Name}}Point{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil +{{end}} + } +} + +// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc func(a, b {{$k.Type}}) {{$v.Type}} +{{end}} + +// {{$k.name}}TransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}TransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}TransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}TransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}TransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}TransformIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// {{$k.name}}TransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}TransformFunc func(p *{{$k.Name}}Point) *{{$k.Name}}Point + +// {{$k.name}}BoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}BoolTransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}BoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}BoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}BoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}BoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// {{$k.name}}BoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}BoolTransformFunc func(p *{{$k.Name}}Point) *BooleanPoint + +// {{$k.name}}DedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type {{$k.name}}DedupeIterator struct { + input {{$k.Name}}Iterator + m map[string]struct{} // lookup of points already sent +} + +// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator. +func new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator { + return &{{$k.name}}DedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// {{$k.name}}ReaderIterator represents an iterator that streams from a reader. +type {{$k.name}}ReaderIterator struct { + r io.Reader + dec *{{$k.Name}}PointDecoder +} + +// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. +func new{{$k.Name}}ReaderIterator(r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator { + dec := New{{$k.Name}}PointDecoder(r) + dec.stats = stats + + return &{{$k.name}}ReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *{{$k.name}}ReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *{{$k.name}}ReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &{{$k.Name}}Point{} + if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} +{{end}} + + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer + + // Frequency with which stats are emitted. + StatsInterval time.Duration +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{ + w: w, + + StatsInterval: DefaultStatsInterval, + } +} + +// EncodeIterator encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +{{range .}} +// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := New{{.Name}}PointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.Encode{{.Name}}Point(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +{{end}} + +// encode a stats object in the point stream. +func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Stats: encodeIteratorStats(&stats), + }) + if err != nil { + return err + } + + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +{{end}} diff -Nru influxdb-0.10.0+dfsg1/influxql/iterator.go influxdb-1.1.1+dfsg1/influxql/iterator.go --- influxdb-0.10.0+dfsg1/influxql/iterator.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/iterator.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1223 @@ +package influxql + +import ( + "errors" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/influxdata/influxdb/models" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// ErrUnknownCall is returned when operating on an unknown function call. +var ErrUnknownCall = errors.New("unknown call") + +const ( + // MinTime is used as the minimum time value when computing an unbounded range. + // This time is one less than the MinNanoTime so that the first minimum + // time can be used as a sentinel value to signify that it is the default + // value rather than explicitly set by the user. + MinTime = models.MinNanoTime - 1 + + // MaxTime is used as the maximum time value when computing an unbounded range. + // This time is 2262-04-11 23:47:16.854775806 +0000 UTC + MaxTime = models.MaxNanoTime +) + +// Iterator represents a generic interface for all Iterators. +// Most iterator operations are done on the typed sub-interfaces. +type Iterator interface { + Stats() IteratorStats + Close() error +} + +// Iterators represents a list of iterators. +type Iterators []Iterator + +// Stats returns the aggregation of all iterator stats. +func (a Iterators) Stats() IteratorStats { + var stats IteratorStats + for _, itr := range a { + stats.Add(itr.Stats()) + } + return stats +} + +// Close closes all iterators. +func (a Iterators) Close() error { + for _, itr := range a { + itr.Close() + } + return nil +} + +// filterNonNil returns a slice of iterators that removes all nil iterators. +func (a Iterators) filterNonNil() []Iterator { + other := make([]Iterator, 0, len(a)) + for _, itr := range a { + if itr == nil { + continue + } + other = append(other, itr) + } + return other +} + +// castType determines what type to cast the set of iterators to. +// An iterator type is chosen using this hierarchy: +// float > integer > string > boolean +func (a Iterators) castType() DataType { + if len(a) == 0 { + return Unknown + } + + typ := DataType(Boolean) + for _, input := range a { + switch input.(type) { + case FloatIterator: + // Once a float iterator is found, short circuit the end. + return Float + case IntegerIterator: + if typ > Integer { + typ = Integer + } + case StringIterator: + if typ > String { + typ = String + } + case BooleanIterator: + // Boolean is the lowest type. + } + } + return typ +} + +// cast casts an array of iterators to a single type. +// Iterators that are not compatible or cannot be cast to the +// chosen iterator type are closed and dropped. +func (a Iterators) cast() interface{} { + typ := a.castType() + switch typ { + case Float: + return newFloatIterators(a) + case Integer: + return newIntegerIterators(a) + case String: + return newStringIterators(a) + case Boolean: + return newBooleanIterators(a) + } + return a +} + +// Merge combines all iterators into a single iterator. +// A sorted merge iterator or a merge iterator can be used based on opt. +func (a Iterators) Merge(opt IteratorOptions) (Iterator, error) { + // Merge into a single iterator. + if opt.MergeSorted() { + itr := NewSortedMergeIterator(a, opt) + if itr != nil && opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return itr, nil + } + + itr := NewMergeIterator(a, opt) + if itr == nil { + return nil, nil + } + + if opt.Expr != nil { + if expr, ok := opt.Expr.(*Call); ok && expr.Name == "count" { + opt.Expr = &Call{ + Name: "sum", + Args: expr.Args, + } + } + } + + if opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return NewCallIterator(itr, opt) +} + +// NewMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be merge iterators or only contain a single name/tag in +// sorted order. The iterator will output all points by window, name/tag, then +// time. This iterator is useful when you need all of the points for an +// interval. +func NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if n := len(inputs); n == 0 { + return nil + } else if n == 1 { + return inputs[0] + } + + // Aggregate functions can use a more relaxed sorting so that points + // within a window are grouped. This is much more efficient. + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerMergeIterator(inputs, opt) + case []StringIterator: + return newStringMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported merge iterator type: %T", inputs)) + } +} + +// NewParallelMergeIterator returns an iterator that breaks input iterators +// into groups and processes them in parallel. +func NewParallelMergeIterator(inputs []Iterator, opt IteratorOptions, parallelism int) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } else if len(inputs) == 1 { + return inputs[0] + } + + // Limit parallelism to the number of inputs. + if len(inputs) < parallelism { + parallelism = len(inputs) + } + + // Determine the number of inputs per output iterator. + n := len(inputs) / parallelism + + // Group iterators together. + outputs := make([]Iterator, parallelism) + for i := range outputs { + var slice []Iterator + if i < len(outputs)-1 { + slice = inputs[i*n : (i+1)*n] + } else { + slice = inputs[i*n:] + } + + outputs[i] = newParallelIterator(NewMergeIterator(slice, opt)) + } + + // Merge all groups together. + return NewMergeIterator(outputs, opt) +} + +// NewSortedMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be sorted merge iterators or only contain a single +// name/tag in sorted order. The iterator will output all points by name/tag, +// then time. This iterator is useful when you need all points for a name/tag +// to be in order. +func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } + + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatSortedMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerSortedMergeIterator(inputs, opt) + case []StringIterator: + return newStringSortedMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanSortedMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported sorted merge iterator type: %T", inputs)) + } +} + +// newParallelIterator returns an iterator that runs in a separate goroutine. +func newParallelIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch itr := input.(type) { + case FloatIterator: + return newFloatParallelIterator(itr) + case IntegerIterator: + return newIntegerParallelIterator(itr) + case StringIterator: + return newStringParallelIterator(itr) + case BooleanIterator: + return newBooleanParallelIterator(itr) + default: + panic(fmt.Sprintf("unsupported parallel iterator type: %T", itr)) + } +} + +// NewLimitIterator returns an iterator that limits the number of points per grouping. +func NewLimitIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatLimitIterator(input, opt) + case IntegerIterator: + return newIntegerLimitIterator(input, opt) + case StringIterator: + return newStringLimitIterator(input, opt) + case BooleanIterator: + return newBooleanLimitIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) + } +} + +// NewDedupeIterator returns an iterator that only outputs unique points. +// This iterator maintains a serialized copy of each row so it is inefficient +// to use on large datasets. It is intended for small datasets such as meta queries. +func NewDedupeIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatDedupeIterator(input) + case IntegerIterator: + return newIntegerDedupeIterator(input) + case StringIterator: + return newStringDedupeIterator(input) + case BooleanIterator: + return newBooleanDedupeIterator(input) + default: + panic(fmt.Sprintf("unsupported dedupe iterator type: %T", input)) + } +} + +// NewFillIterator returns an iterator that fills in missing points in an aggregate. +func NewFillIterator(input Iterator, expr Expr, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatFillIterator(input, expr, opt) + case IntegerIterator: + return newIntegerFillIterator(input, expr, opt) + case StringIterator: + return newStringFillIterator(input, expr, opt) + case BooleanIterator: + return newBooleanFillIterator(input, expr, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewIntervalIterator returns an iterator that sets the time on each point to the interval. +func NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatIntervalIterator(input, opt) + case IntegerIterator: + return newIntegerIntervalIterator(input, opt) + case StringIterator: + return newStringIntervalIterator(input, opt) + case BooleanIterator: + return newBooleanIntervalIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewInterruptIterator returns an iterator that will stop producing output when a channel +// has been closed on the passed in channel. +func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerInterruptIterator(input, closing) + case StringIterator: + return newStringInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported interrupt iterator type: %T", input)) + } +} + +// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an +// iterator when a channel has been closed. +func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatCloseInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerCloseInterruptIterator(input, closing) + case StringIterator: + return newStringCloseInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanCloseInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported close iterator iterator type: %T", input)) + } +} + +// AuxIterator represents an iterator that can split off separate auxilary iterators. +type AuxIterator interface { + Iterator + IteratorCreator + + // Auxilary iterator + Iterator(name string, typ DataType) Iterator + + // Start starts writing to the created iterators. + Start() + + // Backgrounds the iterator so that, when start is called, it will + // continuously read from the iterator. + Background() +} + +// NewAuxIterator returns a new instance of AuxIterator. +func NewAuxIterator(input Iterator, opt IteratorOptions) AuxIterator { + switch input := input.(type) { + case FloatIterator: + return newFloatAuxIterator(input, opt) + case IntegerIterator: + return newIntegerAuxIterator(input, opt) + case StringIterator: + return newStringAuxIterator(input, opt) + case BooleanIterator: + return newBooleanAuxIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported aux iterator type: %T", input)) + } +} + +// auxIteratorField represents an auxilary field within an AuxIterator. +type auxIteratorField struct { + name string // field name + typ DataType // detected data type + itrs []Iterator // auxillary iterators + mu sync.Mutex + opt IteratorOptions +} + +func (f *auxIteratorField) append(itr Iterator) { + f.mu.Lock() + defer f.mu.Unlock() + f.itrs = append(f.itrs, itr) +} + +func (f *auxIteratorField) close() { + f.mu.Lock() + defer f.mu.Unlock() + for _, itr := range f.itrs { + itr.Close() + } +} + +type auxIteratorFields []*auxIteratorField + +// newAuxIteratorFields returns a new instance of auxIteratorFields from a list of field names. +func newAuxIteratorFields(opt IteratorOptions) auxIteratorFields { + fields := make(auxIteratorFields, len(opt.Aux)) + for i, ref := range opt.Aux { + fields[i] = &auxIteratorField{name: ref.Val, typ: ref.Type, opt: opt} + } + return fields +} + +func (a auxIteratorFields) close() { + for _, f := range a { + f.close() + } +} + +// iterator creates a new iterator for a named auxilary field. +func (a auxIteratorFields) iterator(name string, typ DataType) Iterator { + for _, f := range a { + // Skip field if it's name doesn't match. + // Exit if no points were received by the iterator. + if f.name != name || (typ != Unknown && f.typ != typ) { + continue + } + + // Create channel iterator by data type. + switch f.typ { + case Float: + itr := &floatChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case Integer: + itr := &integerChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case String, Tag: + itr := &stringChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case Boolean: + itr := &booleanChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + default: + break + } + } + + return &nilFloatIterator{} +} + +// send sends a point to all field iterators. +func (a auxIteratorFields) send(p Point) (ok bool) { + values := p.aux() + for i, f := range a { + v := values[i] + + tags := p.tags() + tags = tags.Subset(f.opt.Dimensions) + + // Send new point for each aux iterator. + // Primitive pointers represent nil values. + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *integerChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *stringChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *booleanChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } + return ok +} + +func (a auxIteratorFields) sendError(err error) { + for _, f := range a { + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + itr.setErr(err) + case *integerChanIterator: + itr.setErr(err) + case *stringChanIterator: + itr.setErr(err) + case *booleanChanIterator: + itr.setErr(err) + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } +} + +// DrainIterator reads all points from an iterator. +func DrainIterator(itr Iterator) { + defer itr.Close() + switch itr := itr.(type) { + case FloatIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case IntegerIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case StringIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case BooleanIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } +} + +// DrainIterators reads all points from all iterators. +func DrainIterators(itrs []Iterator) { + defer Iterators(itrs).Close() + for { + var hasData bool + + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case IntegerIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case StringIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case BooleanIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } + } + + // Exit once all iterators return a nil point. + if !hasData { + break + } + } +} + +// NewReaderIterator returns an iterator that streams from a reader. +func NewReaderIterator(r io.Reader, typ DataType, stats IteratorStats) Iterator { + switch typ { + case Float: + return newFloatReaderIterator(r, stats) + case Integer: + return newIntegerReaderIterator(r, stats) + case String: + return newStringReaderIterator(r, stats) + case Boolean: + return newBooleanReaderIterator(r, stats) + default: + return &nilFloatIterator{} + } +} + +// IteratorCreator represents an interface for objects that can create Iterators. +type IteratorCreator interface { + // Creates a simple iterator for use in an InfluxQL query. + CreateIterator(opt IteratorOptions) (Iterator, error) + + // Returns the unique fields and dimensions across a list of sources. + FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) + + // Expands regex sources to all matching sources. + ExpandSources(sources Sources) (Sources, error) +} + +// IteratorCreators represents a list of iterator creators. +type IteratorCreators []IteratorCreator + +// Close closes all iterator creators that implement io.Closer. +func (a IteratorCreators) Close() error { + for _, ic := range a { + if ic, ok := ic.(io.Closer); ok { + ic.Close() + } + } + return nil +} + +// CreateIterator returns a single combined iterator from multiple iterator creators. +func (a IteratorCreators) CreateIterator(opt IteratorOptions) (Iterator, error) { + // Create iterators for each shard. + // Ensure that they are closed if an error occurs. + itrs := make([]Iterator, 0, len(a)) + if err := func() error { + for _, ic := range a { + itr, err := ic.CreateIterator(opt) + if err != nil { + return err + } else if itr == nil { + continue + } + itrs = append(itrs, itr) + } + return nil + }(); err != nil { + Iterators(itrs).Close() + return nil, err + } + + if len(itrs) == 0 { + return nil, nil + } + + return Iterators(itrs).Merge(opt) +} + +// FieldDimensions returns unique fields and dimensions from multiple iterator creators. +func (a IteratorCreators) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + fields = make(map[string]DataType) + dimensions = make(map[string]struct{}) + + for _, ic := range a { + f, d, err := ic.FieldDimensions(sources) + if err != nil { + return nil, nil, err + } + for k, typ := range f { + if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + for k := range d { + dimensions[k] = struct{}{} + } + } + return +} + +// ExpandSources expands sources across all iterator creators and returns a unique result. +func (a IteratorCreators) ExpandSources(sources Sources) (Sources, error) { + m := make(map[string]Source) + + for _, ic := range a { + expanded, err := ic.ExpandSources(sources) + if err != nil { + return nil, err + } + + for _, src := range expanded { + switch src := src.(type) { + case *Measurement: + m[src.String()] = src + default: + return nil, fmt.Errorf("IteratorCreators.ExpandSources: unsupported source type: %T", src) + } + } + } + + // Convert set to sorted slice. + names := make([]string, 0, len(m)) + for name := range m { + names = append(names, name) + } + sort.Strings(names) + + // Convert set to a list of Sources. + sorted := make(Sources, 0, len(m)) + for _, name := range names { + sorted = append(sorted, m[name]) + } + + return sorted, nil +} + +// IteratorOptions is an object passed to CreateIterator to specify creation options. +type IteratorOptions struct { + // Expression to iterate for. + // This can be VarRef or a Call. + Expr Expr + + // Auxilary tags or values to also retrieve for the point. + Aux []VarRef + + // Data sources from which to retrieve data. + Sources []Source + + // Group by interval and tags. + Interval Interval + Dimensions []string + + // Fill options. + Fill FillOption + FillValue interface{} + + // Condition to filter by. + Condition Expr + + // Time range for the iterator. + StartTime int64 + EndTime int64 + + // Sorted in time ascending order if true. + Ascending bool + + // Limits the number of points per series. + Limit, Offset int + + // Limits the number of series. + SLimit, SOffset int + + // Removes duplicate rows from raw queries. + Dedupe bool + + // If this channel is set and is closed, the iterator should try to exit + // and close as soon as possible. + InterruptCh <-chan struct{} +} + +// newIteratorOptionsStmt creates the iterator options from stmt. +func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt IteratorOptions, err error) { + // Determine time range from the condition. + startTime, endTime, err := TimeRange(stmt.Condition) + if err != nil { + return IteratorOptions{}, err + } + + if !startTime.IsZero() { + opt.StartTime = startTime.UnixNano() + } else { + if sopt != nil { + opt.StartTime = sopt.MinTime.UnixNano() + } else { + opt.StartTime = MinTime + } + } + if !endTime.IsZero() { + opt.EndTime = endTime.UnixNano() + } else { + if sopt != nil { + opt.EndTime = sopt.MaxTime.UnixNano() + } else { + opt.EndTime = MaxTime + } + } + + // Determine group by interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return opt, err + } + // Set duration to zero if a negative interval has been used. + if interval < 0 { + interval = 0 + } else if interval > 0 { + opt.Interval.Offset, err = stmt.GroupByOffset() + if err != nil { + return opt, err + } + } + opt.Interval.Duration = interval + + // Determine dimensions. + for _, d := range stmt.Dimensions { + if d, ok := d.Expr.(*VarRef); ok { + opt.Dimensions = append(opt.Dimensions, d.Val) + } + } + + opt.Sources = stmt.Sources + opt.Condition = stmt.Condition + opt.Ascending = stmt.TimeAscending() + opt.Dedupe = stmt.Dedupe + + opt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue + if opt.Fill == NullFill && stmt.Target != nil { + // Set the fill option to none if a target has been given. + // Null values will get ignored when being written to the target + // so fill(null) wouldn't write any null values to begin with. + opt.Fill = NoFill + } + opt.Limit, opt.Offset = stmt.Limit, stmt.Offset + opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset + if sopt != nil { + opt.InterruptCh = sopt.InterruptCh + } + + return opt, nil +} + +// MergeSorted returns true if the options require a sorted merge. +// This is only needed when the expression is a variable reference or there is no expr. +func (opt IteratorOptions) MergeSorted() bool { + if opt.Expr == nil { + return true + } + _, ok := opt.Expr.(*VarRef) + return ok +} + +// SeekTime returns the time the iterator should start from. +// For ascending iterators this is the start time, for descending iterators it's the end time. +func (opt IteratorOptions) SeekTime() int64 { + if opt.Ascending { + return opt.StartTime + } + return opt.EndTime +} + +// Window returns the time window [start,end) that t falls within. +func (opt IteratorOptions) Window(t int64) (start, end int64) { + if opt.Interval.IsZero() { + return opt.StartTime, opt.EndTime + 1 + } + + // Subtract the offset to the time so we calculate the correct base interval. + t -= int64(opt.Interval.Offset) + + // Truncate time by duration. + dt := t % int64(opt.Interval.Duration) + if dt < 0 { + // Negative modulo rounds up instead of down, so offset + // with the duration. + dt += int64(opt.Interval.Duration) + } + t -= dt + + // Apply the offset. + start = t + int64(opt.Interval.Offset) + end = start + int64(opt.Interval.Duration) + return +} + +// DerivativeInterval returns the time interval for the derivative function. +func (opt IteratorOptions) DerivativeInterval() Interval { + // Use the interval on the derivative() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + // Otherwise use the group by interval, if specified. + if opt.Interval.Duration > 0 { + return Interval{Duration: opt.Interval.Duration} + } + + return Interval{Duration: time.Second} +} + +// ElapsedInterval returns the time interval for the elapsed function. +func (opt IteratorOptions) ElapsedInterval() Interval { + // Use the interval on the elapsed() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + return Interval{Duration: time.Nanosecond} +} + +// MarshalBinary encodes opt into a binary format. +func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { + return proto.Marshal(encodeIteratorOptions(opt)) +} + +// UnmarshalBinary decodes from a binary format in to opt. +func (opt *IteratorOptions) UnmarshalBinary(buf []byte) error { + var pb internal.IteratorOptions + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + other, err := decodeIteratorOptions(&pb) + if err != nil { + return err + } + *opt = *other + + return nil +} + +func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { + pb := &internal.IteratorOptions{ + Interval: encodeInterval(opt.Interval), + Dimensions: opt.Dimensions, + Fill: proto.Int32(int32(opt.Fill)), + StartTime: proto.Int64(opt.StartTime), + EndTime: proto.Int64(opt.EndTime), + Ascending: proto.Bool(opt.Ascending), + Limit: proto.Int64(int64(opt.Limit)), + Offset: proto.Int64(int64(opt.Offset)), + SLimit: proto.Int64(int64(opt.SLimit)), + SOffset: proto.Int64(int64(opt.SOffset)), + Dedupe: proto.Bool(opt.Dedupe), + } + + // Set expression, if set. + if opt.Expr != nil { + pb.Expr = proto.String(opt.Expr.String()) + } + + // Convert and encode aux fields as variable references. + pb.Fields = make([]*internal.VarRef, len(opt.Aux)) + pb.Aux = make([]string, len(opt.Aux)) + for i, ref := range opt.Aux { + pb.Fields[i] = encodeVarRef(ref) + pb.Aux[i] = ref.Val + } + + // Convert and encode sources to measurements. + sources := make([]*internal.Measurement, len(opt.Sources)) + for i, source := range opt.Sources { + mm := source.(*Measurement) + sources[i] = encodeMeasurement(mm) + } + pb.Sources = sources + + // Fill value can only be a number. Set it if available. + if v, ok := opt.FillValue.(float64); ok { + pb.FillValue = proto.Float64(v) + } + + // Set condition, if set. + if opt.Condition != nil { + pb.Condition = proto.String(opt.Condition.String()) + } + + return pb +} + +func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { + opt := &IteratorOptions{ + Interval: decodeInterval(pb.GetInterval()), + Dimensions: pb.GetDimensions(), + Fill: FillOption(pb.GetFill()), + FillValue: pb.GetFillValue(), + StartTime: pb.GetStartTime(), + EndTime: pb.GetEndTime(), + Ascending: pb.GetAscending(), + Limit: int(pb.GetLimit()), + Offset: int(pb.GetOffset()), + SLimit: int(pb.GetSLimit()), + SOffset: int(pb.GetSOffset()), + Dedupe: pb.GetDedupe(), + } + + // Set expression, if set. + if pb.Expr != nil { + expr, err := ParseExpr(pb.GetExpr()) + if err != nil { + return nil, err + } + opt.Expr = expr + } + + // Convert and decode variable references. + if fields := pb.GetFields(); fields != nil { + opt.Aux = make([]VarRef, len(fields)) + for i, ref := range fields { + opt.Aux[i] = decodeVarRef(ref) + } + } else { + opt.Aux = make([]VarRef, len(pb.GetAux())) + for i, name := range pb.GetAux() { + opt.Aux[i] = VarRef{Val: name} + } + } + + // Convert and dencode sources to measurements. + sources := make([]Source, len(pb.GetSources())) + for i, source := range pb.GetSources() { + mm, err := decodeMeasurement(source) + if err != nil { + return nil, err + } + sources[i] = mm + } + opt.Sources = sources + + // Set condition, if set. + if pb.Condition != nil { + expr, err := ParseExpr(pb.GetCondition()) + if err != nil { + return nil, err + } + opt.Condition = expr + } + + return opt, nil +} + +// selectInfo represents an object that stores info about select fields. +type selectInfo struct { + calls map[*Call]struct{} + refs map[*VarRef]struct{} +} + +// newSelectInfo creates a object with call and var ref info from stmt. +func newSelectInfo(stmt *SelectStatement) *selectInfo { + info := &selectInfo{ + calls: make(map[*Call]struct{}), + refs: make(map[*VarRef]struct{}), + } + Walk(info, stmt.Fields) + return info +} + +func (v *selectInfo) Visit(n Node) Visitor { + switch n := n.(type) { + case *Call: + v.calls[n] = struct{}{} + return nil + case *VarRef: + v.refs[n] = struct{}{} + return nil + } + return v +} + +// Interval represents a repeating interval for a query. +type Interval struct { + Duration time.Duration + Offset time.Duration +} + +// IsZero returns true if the interval has no duration. +func (i Interval) IsZero() bool { return i.Duration == 0 } + +func encodeInterval(i Interval) *internal.Interval { + return &internal.Interval{ + Duration: proto.Int64(i.Duration.Nanoseconds()), + Offset: proto.Int64(i.Offset.Nanoseconds()), + } +} + +func decodeInterval(pb *internal.Interval) Interval { + return Interval{ + Duration: time.Duration(pb.GetDuration()), + Offset: time.Duration(pb.GetOffset()), + } +} + +func encodeVarRef(ref VarRef) *internal.VarRef { + return &internal.VarRef{ + Val: proto.String(ref.Val), + Type: proto.Int32(int32(ref.Type)), + } +} + +func decodeVarRef(pb *internal.VarRef) VarRef { + return VarRef{ + Val: pb.GetVal(), + Type: DataType(pb.GetType()), + } +} + +type nilFloatIterator struct{} + +func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } +func (*nilFloatIterator) Close() error { return nil } +func (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil } + +// integerFloatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerFloatTransformIterator struct { + input IntegerIterator + fn integerFloatTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerFloatTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerFloatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerFloatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// integerFloatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerFloatTransformFunc func(p *IntegerPoint) *FloatPoint + +type integerFloatCastIterator struct { + input IntegerIterator +} + +func (itr *integerFloatCastIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFloatCastIterator) Close() error { return itr.input.Close() } +func (itr *integerFloatCastIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + return &FloatPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Nil: p.Nil, + Value: float64(p.Value), + Aux: p.Aux, + }, nil +} + +// IteratorStats represents statistics about an iterator. +// Some statistics are available immediately upon iterator creation while +// some are derived as the iterator processes data. +type IteratorStats struct { + SeriesN int // series represented + PointN int // points returned +} + +// Add aggregates fields from s and other together. Overwrites s. +func (s *IteratorStats) Add(other IteratorStats) { + s.SeriesN += other.SeriesN + s.PointN += other.PointN +} + +func encodeIteratorStats(stats *IteratorStats) *internal.IteratorStats { + return &internal.IteratorStats{ + SeriesN: proto.Int64(int64(stats.SeriesN)), + PointN: proto.Int64(int64(stats.PointN)), + } +} + +func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats { + return IteratorStats{ + SeriesN: int(pb.GetSeriesN()), + PointN: int(pb.GetPointN()), + } +} + +// floatFastDedupeIterator outputs unique points where the point has a single aux field. +type floatFastDedupeIterator struct { + input FloatIterator + m map[fastDedupeKey]struct{} // lookup of points already sent +} + +// newFloatFastDedupeIterator returns a new instance of floatFastDedupeIterator. +func newFloatFastDedupeIterator(input FloatIterator) *floatFastDedupeIterator { + return &floatFastDedupeIterator{ + input: input, + m: make(map[fastDedupeKey]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatFastDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatFastDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatFastDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + // Skip if there are not any aux fields. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } else if len(p.Aux) == 0 { + continue + } + + // If the point has already been output then move to the next point. + key := fastDedupeKey{name: p.Name} + key.values[0] = p.Aux[0] + if len(p.Aux) > 1 { + key.values[1] = p.Aux[1] + } + if _, ok := itr.m[key]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[key] = struct{}{} + return p, nil + } +} + +type fastDedupeKey struct { + name string + values [2]interface{} +} + +type reverseStringSlice []string + +func (p reverseStringSlice) Len() int { return len(p) } +func (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] } +func (p reverseStringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff -Nru influxdb-0.10.0+dfsg1/influxql/iterator_test.go influxdb-1.1.1+dfsg1/influxql/iterator_test.go --- influxdb-0.10.0+dfsg1/influxql/iterator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/iterator_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1213 @@ +package influxql_test + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "reflect" + "regexp" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + {Points: []influxql.FloatPoint{}}, + } + + itr := influxql.NewMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestMergeIterator_Nil(t *testing.T) { + itr := influxql.NewMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +func TestMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + } + itr := influxql.NewSortedMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewSortedMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewSortedMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewSortedMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestSortedMergeIterator_Nil(t *testing.T) { + itr := influxql.NewSortedMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +func TestSortedMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewSortedMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Float(t *testing.T) { + input := &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.FloatPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Integer(t *testing.T) { + input := &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.IntegerPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_String(t *testing.T) { + input := &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "a"}, + {Name: "cpu", Time: 5, Value: "b"}, + {Name: "cpu", Time: 10, Value: "c"}, + {Name: "mem", Time: 5, Value: "d"}, + {Name: "mem", Time: 7, Value: "e"}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, + {&influxql.StringPoint{Name: "mem", Time: 7, Value: "e"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Boolean(t *testing.T) { + input := &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true}, + {Name: "cpu", Time: 5, Value: false}, + {Name: "cpu", Time: 10, Value: true}, + {Name: "mem", Time: 5, Value: false}, + {Name: "mem", Time: 7, Value: true}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Time: 7, Value: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure auxilary iterators can be created for auxilary fields. +func TestFloatAuxIterator(t *testing.T) { + itr := influxql.NewAuxIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + {Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + }}, + influxql.IteratorOptions{Aux: []influxql.VarRef{{Val: "f0", Type: influxql.Float}, {Val: "f1", Type: influxql.Float}}}, + ) + + itrs := []influxql.Iterator{ + itr, + itr.Iterator("f0", influxql.Unknown), + itr.Iterator("f1", influxql.Unknown), + itr.Iterator("f0", influxql.Unknown), + } + itr.Start() + + if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + &influxql.FloatPoint{Time: 0, Value: float64(200)}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + }, + { + &influxql.FloatPoint{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + &influxql.FloatPoint{Time: 1, Value: math.NaN()}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure limit iterator returns a subset of points. +func TestLimitIterator(t *testing.T) { + itr := influxql.NewLimitIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 0}, + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }}, + influxql.IteratorOptions{ + Limit: 2, + Offset: 1, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 1}}, + {&influxql.FloatPoint{Time: 2, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Iterators is a test wrapper for iterators. +type Iterators []influxql.Iterator + +// Next returns the next value from each iterator. +// Returns nil if any iterator returns a nil. +func (itrs Iterators) Next() ([]influxql.Point, error) { + a := make([]influxql.Point, len(itrs)) + for i, itr := range itrs { + switch itr := itr.(type) { + case influxql.FloatIterator: + fp, err := itr.Next() + if fp == nil || err != nil { + return nil, err + } + a[i] = fp + case influxql.IntegerIterator: + ip, err := itr.Next() + if ip == nil || err != nil { + return nil, err + } + a[i] = ip + case influxql.StringIterator: + sp, err := itr.Next() + if sp == nil || err != nil { + return nil, err + } + a[i] = sp + case influxql.BooleanIterator: + bp, err := itr.Next() + if bp == nil || err != nil { + return nil, err + } + a[i] = bp + default: + panic(fmt.Sprintf("iterator type not supported: %T", itr)) + } + } + return a, nil +} + +// ReadAll reads all points from all iterators. +func (itrs Iterators) ReadAll() ([][]influxql.Point, error) { + var a [][]influxql.Point + + // Read from every iterator until a nil is encountered. + for { + points, err := itrs.Next() + if err != nil { + return nil, err + } else if points == nil { + break + } + a = append(a, influxql.Points(points).Clone()) + } + + // Close all iterators. + influxql.Iterators(itrs).Close() + + return a, nil +} + +func TestIteratorOptions_Window_Interval(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + }, + } + + start, end := opt.Window(4) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 10 { + t.Errorf("expected end to be 10, got %d", end) + } +} + +func TestIteratorOptions_Window_Offset(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 8, + }, + } + + start, end := opt.Window(14) + if start != 8 { + t.Errorf("expected start to be 8, got %d", start) + } + if end != 18 { + t.Errorf("expected end to be 18, got %d", end) + } +} + +func TestIteratorOptions_Window_Default(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 0, + EndTime: 60, + } + + start, end := opt.Window(34) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 61 { + t.Errorf("expected end to be 61, got %d", end) + } +} + +func TestIteratorOptions_SeekTime_Ascending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: true, + } + + time := opt.SeekTime() + if time != 30 { + t.Errorf("expected time to be 30, got %d", time) + } +} + +func TestIteratorOptions_SeekTime_Descending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: false, + } + + time := opt.SeekTime() + if time != 60 { + t.Errorf("expected time to be 60, got %d", time) + } +} + +func TestIteratorOptions_MergeSorted(t *testing.T) { + opt := influxql.IteratorOptions{} + sorted := opt.MergeSorted() + if !sorted { + t.Error("expected no expression to be sorted, got unsorted") + } + + opt.Expr = &influxql.VarRef{} + sorted = opt.MergeSorted() + if !sorted { + t.Error("expected expression with varref to be sorted, got unsorted") + } + + opt.Expr = &influxql.Call{} + sorted = opt.MergeSorted() + if sorted { + t.Error("expected expression without varref to be unsorted, got sorted") + } +} + +func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 10} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_Call(t *testing.T) { + opt := influxql.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 2 * time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_GroupBy(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Call(t *testing.T) { + opt := influxql.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 2 * time.Second} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +// Ensure iterator options can be marshaled to and from a binary format. +func TestIteratorOptions_MarshalBinary(t *testing.T) { + opt := &influxql.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + Aux: []influxql.VarRef{{Val: "a"}, {Val: "b"}, {Val: "c"}}, + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db0", RetentionPolicy: "rp0", Name: "mm0"}, + }, + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + Offset: 20 * time.Minute, + }, + Dimensions: []string{"region", "host"}, + Fill: influxql.NumberFill, + FillValue: float64(100), + Condition: MustParseExpr(`foo = 'bar'`), + StartTime: 1000, + EndTime: 2000, + Ascending: true, + Limit: 100, + Offset: 200, + SLimit: 300, + SOffset: 400, + Dedupe: true, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(&other, opt) { + t.Fatalf("unexpected options: %s", spew.Sdump(other)) + } +} + +// Ensure iterator options with a regex measurement can be marshaled. +func TestIteratorOptions_MarshalBinary_Measurement_Regex(t *testing.T) { + opt := &influxql.IteratorOptions{ + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db1", RetentionPolicy: "rp2", Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`series.+`)}}, + }, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if v := other.Sources[0].(*influxql.Measurement).Regex.Val.String(); v != `series.+` { + t.Fatalf("unexpected measurement regex: %s", v) + } +} + +// Ensure iterator can be encoded and decoded over a byte stream. +func TestIterator_EncodeDecode(t *testing.T) { + var buf bytes.Buffer + + // Create an iterator with several points & stats. + itr := &FloatIterator{ + Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}, + }, + stats: influxql.IteratorStats{ + SeriesN: 2, + PointN: 0, + }, + } + + // Encode to the buffer. + enc := influxql.NewIteratorEncoder(&buf) + enc.StatsInterval = 100 * time.Millisecond + if err := enc.EncodeIterator(itr); err != nil { + t.Fatal(err) + } + + // Decode from the buffer. + dec := influxql.NewReaderIterator(&buf, influxql.Float, itr.Stats()) + + // Initial stats should exist immediately. + fdec := dec.(influxql.FloatIterator) + if stats := fdec.Stats(); !reflect.DeepEqual(stats, influxql.IteratorStats{SeriesN: 2, PointN: 0}) { + t.Fatalf("unexpected stats(initial): %#v", stats) + } + + // Read both points. + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(0): %#v", err) + } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}) { + t.Fatalf("unexpected point(0); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(1): %#v", err) + } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}) { + t.Fatalf("unexpected point(1); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(eof): %#v", err) + } else if p != nil { + t.Fatalf("unexpected point(eof); %#v", p) + } +} + +// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. +type IteratorCreator struct { + CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) +} + +func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(opt) +} + +func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(sources) +} + +func (ic *IteratorCreator) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return ic.ExpandSourcesFn(sources) +} + +// Test implementation of influxql.FloatIterator +type FloatIterator struct { + Points []influxql.FloatPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*influxql.FloatPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func FloatIterators(inputs []*FloatIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// GenerateFloatIterator creates a FloatIterator with random data. +func GenerateFloatIterator(rand *rand.Rand, valueN int) *FloatIterator { + const interval = 10 * time.Second + + itr := &FloatIterator{ + Points: make([]influxql.FloatPoint, valueN), + } + + for i := 0; i < valueN; i++ { + // Generate incrementing timestamp with some jitter (1s). + jitter := (rand.Int63n(2) * int64(time.Second)) + timestamp := int64(i)*int64(10*time.Second) + jitter + + itr.Points[i] = influxql.FloatPoint{ + Name: "cpu", + Time: timestamp, + Value: rand.Float64(), + } + } + + return itr +} + +// Test implementation of influxql.IntegerIterator +type IntegerIterator struct { + Points []influxql.IntegerPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *IntegerIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *IntegerIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *IntegerIterator) Next() (*influxql.IntegerPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func IntegerIterators(inputs []*IntegerIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.StringIterator +type StringIterator struct { + Points []influxql.StringPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *StringIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *StringIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *StringIterator) Next() (*influxql.StringPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func StringIterators(inputs []*StringIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.BooleanIterator +type BooleanIterator struct { + Points []influxql.BooleanPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *BooleanIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *BooleanIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *BooleanIterator) Next() (*influxql.BooleanPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func BooleanIterators(inputs []*BooleanIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} diff -Nru influxdb-0.10.0+dfsg1/influxql/linear.go influxdb-1.1.1+dfsg1/influxql/linear.go --- influxdb-0.10.0+dfsg1/influxql/linear.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/linear.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,21 @@ +package influxql + +// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { + m := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := previousValue + return m*x + b +} + +// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { + m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := float64(previousValue) + return int64(m*x + b) +} diff -Nru influxdb-0.10.0+dfsg1/influxql/monitor.go influxdb-1.1.1+dfsg1/influxql/monitor.go --- influxdb-0.10.0+dfsg1/influxql/monitor.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/monitor.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,23 @@ +package influxql + +import "time" + +// PointLimitMonitor is a query monitor that exits when the number of points +// emitted exceeds a threshold. +func PointLimitMonitor(itrs Iterators, interval time.Duration, limit int) QueryMonitorFunc { + return func(closing <-chan struct{}) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + stats := itrs.Stats() + if stats.PointN >= limit { + return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit) + } + case <-closing: + return nil + } + } + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/neldermead/neldermead.go influxdb-1.1.1+dfsg1/influxql/neldermead/neldermead.go --- influxdb-0.10.0+dfsg1/influxql/neldermead/neldermead.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/neldermead/neldermead.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,235 @@ +// This is an implementation of the Nelder-Mead optimization method +// Based on work by Michael F. Hutt http://www.mikehutt.com/neldermead.html +package neldermead + +import "math" + +const ( + defaultMaxIterations = 1000 + // reflection coefficient + defaultAlpha = 1.0 + // contraction coefficient + defaultBeta = 0.5 + // expansion coefficient + defaultGamma = 2.0 +) + +type Optimizer struct { + MaxIterations int + // reflection coefficient + Alpha, + // contraction coefficient + Beta, + // expansion coefficient + Gamma float64 +} + +func New() *Optimizer { + return &Optimizer{ + MaxIterations: defaultMaxIterations, + Alpha: defaultAlpha, + Beta: defaultBeta, + Gamma: defaultGamma, + } +} + +func (o *Optimizer) Optimize( + objfunc func([]float64) float64, + start []float64, + epsilon, + scale float64, +) (float64, []float64) { + n := len(start) + + //holds vertices of simplex + v := make([][]float64, n+1) + for i := range v { + v[i] = make([]float64, n) + } + + //value of function at each vertex + f := make([]float64, n+1) + + //reflection - coordinates + vr := make([]float64, n) + + //expansion - coordinates + ve := make([]float64, n) + + //contraction - coordinates + vc := make([]float64, n) + + //centroid - coordinates + vm := make([]float64, n) + + // create the initial simplex + // assume one of the vertices is 0,0 + + pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2)) + qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2)) + + for i := 0; i < n; i++ { + v[0][i] = start[i] + } + + for i := 1; i <= n; i++ { + for j := 0; j < n; j++ { + if i-1 == j { + v[i][j] = pn + start[j] + } else { + v[i][j] = qn + start[j] + } + } + } + + // find the initial function values + for j := 0; j <= n; j++ { + f[j] = objfunc(v[j]) + } + + // begin the main loop of the minimization + for itr := 1; itr <= o.MaxIterations; itr++ { + + // find the indexes of the largest and smallest values + vg := 0 + vs := 0 + for i := 0; i <= n; i++ { + if f[i] > f[vg] { + vg = i + } + if f[i] < f[vs] { + vs = i + } + } + // find the index of the second largest value + vh := vs + for i := 0; i <= n; i++ { + if f[i] > f[vh] && f[i] < f[vg] { + vh = i + } + } + + // calculate the centroid + for i := 0; i <= n-1; i++ { + cent := 0.0 + for m := 0; m <= n; m++ { + if m != vg { + cent += v[m][i] + } + } + vm[i] = cent / float64(n) + } + + // reflect vg to new vertex vr + for i := 0; i <= n-1; i++ { + vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i]) + } + + // value of function at reflection point + fr := objfunc(vr) + + if fr < f[vh] && fr >= f[vs] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + + // investigate a step further in this direction + if fr < f[vs] { + for i := 0; i <= n-1; i++ { + ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i]) + } + + // value of function at expansion point + fe := objfunc(ve) + + // by making fe < fr as opposed to fe < f[vs], + // Rosenbrocks function takes 63 iterations as opposed + // to 64 when using double variables. + + if fe < fr { + for i := 0; i <= n-1; i++ { + v[vg][i] = ve[i] + } + f[vg] = fe + } else { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + } + + // check to see if a contraction is necessary + if fr >= f[vh] { + if fr < f[vg] && fr >= f[vh] { + // perform outside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] + o.Beta*(vr[i]-vm[i]) + } + } else { + // perform inside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i]) + } + } + + // value of function at contraction point + fc := objfunc(vc) + + if fc < f[vg] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vc[i] + } + f[vg] = fc + } else { + // at this point the contraction is not successful, + // we must halve the distance from vs to all the + // vertices of the simplex and then continue. + + for row := 0; row <= n; row++ { + if row != vs { + for i := 0; i <= n-1; i++ { + v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0 + } + } + } + f[vg] = objfunc(v[vg]) + f[vh] = objfunc(v[vh]) + } + } + + // test for convergence + fsum := 0.0 + for i := 0; i <= n; i++ { + fsum += f[i] + } + favg := fsum / float64(n+1) + s := 0.0 + for i := 0; i <= n; i++ { + s += math.Pow((f[i]-favg), 2.0) / float64(n) + } + s = math.Sqrt(s) + if s < epsilon { + break + } + } + + // find the index of the smallest value + vs := 0 + for i := 0; i <= n; i++ { + if f[i] < f[vs] { + vs = i + } + } + + parameters := make([]float64, n) + for i := 0; i < n; i++ { + parameters[i] = v[vs][i] + } + + min := objfunc(v[vs]) + + return min, parameters +} diff -Nru influxdb-0.10.0+dfsg1/influxql/neldermead/neldermead_test.go influxdb-1.1.1+dfsg1/influxql/neldermead/neldermead_test.go --- influxdb-0.10.0+dfsg1/influxql/neldermead/neldermead_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/neldermead/neldermead_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,64 @@ +package neldermead_test + +import ( + "math" + "testing" + + "github.com/influxdata/influxdb/influxql/neldermead" +) + +func round(num float64, precision float64) float64 { + rnum := num * math.Pow(10, precision) + var tnum float64 + if rnum < 0 { + tnum = math.Floor(rnum - 0.5) + } else { + tnum = math.Floor(rnum + 0.5) + } + rnum = tnum / math.Pow(10, precision) + return rnum +} + +func almostEqual(a, b, e float64) bool { + return math.Abs(a-b) < e +} + +func Test_Optimize(t *testing.T) { + + constraints := func(x []float64) { + for i := range x { + x[i] = round(x[i], 5) + } + } + // 100*(b-a^2)^2 + (1-a)^2 + // + // Obvious global minimum at (a,b) = (1,1) + // + // Useful visualization: + // https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2) + f := func(x []float64) float64 { + constraints(x) + // a = x[0] + // b = x[1] + return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0]) + } + + start := []float64{-1.2, 1.0} + + opt := neldermead.New() + epsilon := 1e-5 + min, parameters := opt.Optimize(f, start, epsilon, 1) + + if !almostEqual(min, 0, epsilon) { + t.Errorf("unexpected min: got %f exp 0", min) + } + + if !almostEqual(parameters[0], 1, 1e-2) { + t.Errorf("unexpected parameters[0]: got %f exp 1", parameters[0]) + } + + if !almostEqual(parameters[1], 1, 1e-2) { + t.Errorf("unexpected parameters[1]: got %f exp 1", parameters[1]) + } + +} diff -Nru influxdb-0.10.0+dfsg1/influxql/NOTES influxdb-1.1.1+dfsg1/influxql/NOTES --- influxdb-0.10.0+dfsg1/influxql/NOTES 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/NOTES 1970-01-01 00:00:00.000000000 +0000 @@ -1,682 +0,0 @@ -SELECT mean(value) FROM cpu -WHERE service = 'redis' -GROUP BY region, time(10m) - - -based on group by, get unique tag sets for region -cpu region=uswest -> get series ids from cpu where and -cpu region=useast -> get series ids from cpu where and - -for each shard group in time range { - for each group by tagset { - shardItrs := map[shard]itr - for id := range seriesIds { - shard := group.shardForId(id) - shardItrs[shard].addId(id) - } - for _, itr := range shardItrs { - itr.tags = tagset - itr.name = cpu - } - } -} - -(host = 'serverA' AND value > 100) OR (region = 'uswest' AND value < 10) - -value > 100 OR value < 10 (host=serverA, region=uswest) -value < 10 (host!=serverA, region=uswest) -value > 100 - - -filters := make(map[whereCond]seriesIds) -filters := make(map[uint32]whereCond) -seriesIds - - -select mean(value) from foo WHERE someField = 'important' group by time(5m) - -=================== - - - -select derivative(mean(value)) -from cpu -group by time(5m) - -select mean(value) from cpu group by time(5m) -select top(10, value) from cpu group by host where time > now() - 1h - -this query uses this type of cycle --------REMOTE HOST ------------- -----HOST THAT GOT QUERY --- -map -> reduce -> combine -> map -> reduce -> combine -> user - -select mean(value) cpu group by time(5m), host where time > now() -4h -map -> reduce -> combine -> user -map -> reduce -> map -> reduce -> combine -> user -map -> reduce -> combine -> map -> reduce -> combine -> user - - -select value from -( - select mean(value) AS value FROM cpu GROUP BY time(5m) -) - -[ -{ - name: cpu, - tags: { - host: servera, - }, - columns: [time, mean], - values : [ - [23423423, 88.8] - ] -}, -{ - name: cpu, - tags: { - host: serverb, - } -} -] - - -================================================================================ - -// list series -> -/* -[ - { - "name": "cpu", - "columns": ["id", "region", "host"], - "values": [ - 1, "uswest", "servera", - 2, "uswest", "serverb" - ] - }, - { - "" - } -] - -list series where region = 'uswest' - -list tags where name = 'cpu' - -list tagKeys where name = 'cpu' - -list series where name = 'cpu' and region = 'uswest' - -select distinct(region) from cpu - -list names -list tagKeys - -list tagValeus where tagKey = 'region' and time > now() -1h - -select a.value, b.value from a join b where a.user_id == 100 - select a.value from a where a.user_id == 100 - select b.value from b - - 3 1 2 -select sum(a.value) + (sum(b.value) / min(b.value)) from a join b group by region - - select suM(a.value) from a group by time(5m) - select sum(b.value) from b group by time(5m) - -execute sum MR on series [23, 65, 88, 99, 101, 232] - -map -> 1 tick per 5m -reduce -> combines ticks per 5m interval -> outputs - -planner -> take reduce output per 5m interval from the two reducers - and combine with the join function, which is + - -[1,/,2,+,3] - - - -for v := s[0].Next(); v != nil; v = 2[0].Next() { - var result interface{} - for i := 1; i < len(s); i += 2 { - / it's an operator - if i % 2 == 1 { - - } - } -} - -select count(distinct(host)) from cpu where time > now() - 5m - -type mapper interface { - Map(iterator) -} - -type floatCountMapper struct {} -func(m *floatCountMapper) Map(i Iterator) { - itr := i.(*floatIterator) -} - -type Iterator interface { - itr() -} - -type iterator struct { - cursor *bolt.Cursor - timeBucket time.Time - name string - seriesID uint32 - tags map[string]string - fieldID uint8 - where *WhereClause -} - -func (i *intIterator) itr() {} -func (i *intIterator) Next() (k int64, v float64) { - // loop through bolt cursor applying where clause and yield next point - // if cursor is at end or time is out of range, yield nil -} - -*/ - - - - - -field: ipaddress - -select top(10, count, ipaddress) from hits group by time(5m), host - -map -> 10 records, , - -reducer -> take in all map outputs for each 5m bucket - combine them, sort, take out the top 10 - output -> 10 records, count, ipaddresses, time - - -========== - -select top(10, count, host) from hits group by time(5m) - -select host, value from cpu where time > now() - 1h - -select last(value) from cpu group by time(auto), host fill(previous) where time > now() - 1h - -select sum(value) from cpu group by host where time > now() - 1h - - - - - - -select sum(value) from cpu where time > now() - 1h - -select * from a; - -[ -{ - "name": "cpu", - "tags": { - "host": "servera" - }, - "fields": [ - "time", - "count", - "ipaddress" - ] - "values": [ - [t, v, "123.23.22.2"], - [t, v, "192.232.2.2"], - - ] -}, -{ - "name": "cpu", - "tags": { - "host": "serverb" - }, - "values": [ - [t, v], - [t + 1, v], - - ] -}, -] - -[t, v, "servera"] -[t, v, "serverb"] -[t+1, v, "servera"] -[t+1, v, "serverb"] - -====== - -a INNER JOIN b - -- planner always has "group by" - -select count(errors.value) / count(requests.value) as error_rate -from errors join requests as "mysuperseries" -group by time(5m) -fill(previous) -where time > now() - 4h - -select mean(value) as cpu_mean from cpu group by time(5m) where host = 'servera' - -select count(value) from errors group by time(5m) fill(previous) where.. -select count(value) from requests group by time(5m) fill(previ... - -{ - "name": "errors.requests", - "tags": {}, - "fields": ["time", "errors.count", "requests.count"], - "values": [ - [t, n, m] - ] -} - - -a MERGE b - -a - t -b - t -a - t + 1 -b - t + 1 -b - t + 2 -a - t + 3 - - - -select value from cpu -select mean(value) from cpu group by time(5m) - -select first(value) from cpu - - -===== - -1. Group by time -2. Group by -3. Raw - -====== - -SELECT sum(value) FROM myseries - -host=servera -host=serverb - -{"host":"servera", "value":100} -{"host":"serverb", "value":"hello!"} - - -series = -series = seriesID - -seriesID -> name - -name has_many seriesIDs -name has_many fields - -field -> (type, id) - - -> (type, id) - - - -> fieldValues - - -field - -type topMapper struct { - count int -} - -func newTopMaper(count int) { - -} - -func (t *topCountMapper) Map(i Iterator) { - topValues := make(map[string]int) - for p := i.Next(); p != nil; p = i.Next() { - topValues[p.String()] += 1 - } - for k, v := range topValues { - t.job.Emit(k, v) - } -} - -type topCountReducer struct { - count int -} - -func (r *topCountReducer) Reduce(i Iterator) { - realzTop10 := make(map[string]int) - for v := i.Next(); v != nil; v = i.Next() { - top10 := v.(map[string]int) - for k, n := range top10 { - realzTop10[k] += n - } - } - realyrealTop10 := make(map[string]int) - // do sorty magic on reazTop10 and set realyreal - r.job.Emit(realyrealTop10) -} - -type Transformer interface { - Transform(interface{}) Series -} - -type ReduceOutput struct { - values [][]interface{} - fieldIDs [] -} - -// for topCountReducer ReduceOutput would look like -// values = [t, c, "some string"] -// fieldIDs = [0, 0, 3] - -SELECT val1, val2 FROM abc - - -select mean(value) from cpu where region='uswest' group by time(5m), host - -2000 series - -200 series to each machine - - - -================================================================================ - - - -type Mapper interface { - Map(Iterator) -} - - -type countMapper struct {} - -// Iterator is the entire series if not an aggregate query -// or iterator is the entire time bucket if an aggregate query -func (m *sumMapper) Map(i Iterator) { - var sum int - for p := i.Next(); p != nil; p = i.Next() { - sum += p.Float() - } - m.Emitter.Emit(k, sum) -} - -type Point interface { - String(name) - Int(name) -} - -type cursorIterator struct { - Cursor *bolt.Cursor - FieldID uint8 - Value []byte -} - -func (i cursorIterator) Next() Point { - _, i.Value = i.Cursor.Next() - return byteSlicePoint(i.Value) -} - -type byteSlicePoint []byte - -func (p byteSlicePoint) String() string { - // unmarshal from byte slice. -} - -/* -{ - "name": "foo", - "fields": { - "value": 23.2, - "user_id": 23 - }, - "tags": { - - } -} -*/ - - -CNT ID0 VALUEVALUEVALUEVALUEVALUEVALUEVALUEVALU -0001 0000 0000 0000 0000 0000 0000 0000 0000 0000 - -CNT ID0 ID1 ID2 FLOATFLOA STRINGSTR STRINGSTR -0002 0001 0002 0003 0000 0000 0000 0000 0000 0000 - - - -// SELECT count() FROM cpu GROUP BY host - -// SELECT mean(value) from cpu where region = 'uswest' - -// SELECT derivative(value) from redis_key_count GROUP BY time(5m) - - -// SELECT host, mean(value) -// FROM cpu -// GROUP BY host -// HAVING top(20, mean) -// WHERE time > now() - 1h -// AND region = 'uswest' - -// SELECT ipaddress, count(ipaddress) -// FROM hits -// GROUP BY ipaddress -// HAVING top(10, count) -// WHERE time > now() - 1h - - -series := meta.DistinctTagValues("cpu", "host") - -tye Series struct { - name string - fields map[uint8]string -} - -type SeriesData struct { - ID - tags map[string]string -} - - - -mrJobs := make([]*MRJob, 0, len(series)) -for _, s := range series { - j := NewMRJob(s) - mrJobs = append(mrJobs, j) - j.Execute() -} - -for _, j := range mrJobs { - // pull in results - // construct series object with same tags as series -} - - -================================================================================ - - - -type Mapper interface { - Map(Iterator) -} - - -type countMapper struct {} - -// Iterator is the entire series if not an aggregate query -// or iterator is the entire time bucket if an aggregate query -func (m *sumMapper) Map(i Iterator) { - var sum int - for p := i.Next(); p != nil; p = i.Next() { - sum += p.Float() - } - m.Emitter.Emit(k, sum) -} - -type Point interface { - String(name) - Int(name) -} - -type cursorIterator struct { - Cursor *bolt.Cursor - FieldID uint8 - Value []byte -} - -func (i cursorIterator) Next() Point { - _, i.Value = i.Cursor.Next() - return byteSlicePoint(i.Value) -} - -type byteSlicePoint []byte - -func (p byteSlicePoint) String() string { - // unmarshal from byte slice. -} - -/* -{ - "name": "foo", - "fields": { - "value": 23.2, - "user_id": 23 - }, - "tags": { - - } -} -*/ - - -CNT ID0 VALUEVALUEVALUEVALUEVALUEVALUEVALUEVALU -0001 0000 0000 0000 0000 0000 0000 0000 0000 0000 - -CNT ID0 ID1 ID2 FLOATFLOA STRINGSTR STRINGSTR -0002 0001 0002 0003 0000 0000 0000 0000 0000 0000 - - - -// SELECT count() FROM cpu GROUP BY host - -// SELECT mean(value) from cpu where region = 'uswest' - -// SELECT derivative(value) from redis_key_count GROUP BY time(5m) - - -// SELECT host, mean(value) -// FROM cpu -// GROUP BY host -// HAVING top(20, mean) -// WHERE time > now() - 1h -// AND region = 'uswest' - -// SELECT ipaddress, count(ipaddress) -// FROM hits -// GROUP BY ipaddress -// HAVING top(10, count) -// WHERE time > now() - 1h - - -series := meta.DistinctTagValues("cpu", "host") - -mrJobs := make([]*MRJob, 0, len(series)) -for _, s := range series { - j := NewMRJob(s) - mrJobs = append(mrJobs, j) - j.Execute() -} - -for _, j := range mrJobs { - // pull in results - // construct series object with same tags as series -} - - -================================================================================ - - -type Iterator interface { - Next() (interface{}, bool) -} - -type iteratorCounter struct { - iterator Iterator -} - -func (iteratorCounter) Next() { - -} - - -SELECT max(a.value), min(a.value), max(b.value) -FROM a, b -WHERE a.host = 'influxdb.org' - - -grouper { - []Iterator -} - - -SELECT max(a.value) FROM a WHERE a.host = 'influxdb.org' --> 1 value -SELECT min(a.value) FROM a WHERE a.host = 'influxdb.org' --> 1 value -SELECT max(b.value) FROM b --> 1 value - - -SELECT max(a.value) FROM a GROUP BY time WHERE a.host = 'influxdb.org' --> key,value - - -timeGrouper { - []Iterator -} - - -type maxMapper struct { -} - -IntervalIterator { -} - - - -maxMapper.Map(Iterator) - - - -- GROUP BY time -- GROUP BY time, -- GROUP BY - - - - - -COUNT(field) -MIN(field) -MAX(field) -MEAN(field) -MODE(field) -MEDIAN(field) -COUNT(DISTINCT field) -PERCENTILE(field, N) -HISTOGRAM(field [, bucketSize]) -DERIVATIVE(field) -SUM(field) -STDDEV(field) -FIRST(field) -LAST(field) -DIFFERENCE(field) -TOP(field, N) -BOTTOM(field, N) <----- multivalue - - - -================================================================================ diff -Nru influxdb-0.10.0+dfsg1/influxql/parser.go influxdb-1.1.1+dfsg1/influxql/parser.go --- influxdb-0.10.0+dfsg1/influxql/parser.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/parser.go 2016-12-06 21:36:15.000000000 +0000 @@ -23,7 +23,8 @@ // Parser represents an InfluxQL parser. type Parser struct { - s *bufScanner + s *bufScanner + params map[string]interface{} } // NewParser returns a new instance of Parser. @@ -31,6 +32,11 @@ return &Parser{s: newBufScanner(r)} } +// SetParams sets the parameters that will be used for any bound parameter substitutions. +func (p *Parser) SetParams(params map[string]interface{}) { + p.params = params +} + // ParseQuery parses a query string and returns its AST representation. func ParseQuery(s string) (*Query, error) { return NewParser(strings.NewReader(s)).ParseQuery() } @@ -51,17 +57,29 @@ // ParseExpr parses an expression string and returns its AST representation. func ParseExpr(s string) (Expr, error) { return NewParser(strings.NewReader(s)).ParseExpr() } +// MustParseExpr parses an expression string and returns its AST. Panic on error. +func MustParseExpr(s string) Expr { + expr, err := ParseExpr(s) + if err != nil { + panic(err.Error()) + } + return expr +} + // ParseQuery parses an InfluxQL string and returns a Query AST object. func (p *Parser) ParseQuery() (*Query, error) { var statements Statements - var semi bool + semi := true for { - if tok, _, _ := p.scanIgnoreWhitespace(); tok == EOF { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok == EOF { return &Query{Statements: statements}, nil - } else if !semi && tok == SEMICOLON { + } else if tok == SEMICOLON { semi = true } else { + if !semi { + return nil, newParseError(tokstr(tok, lit), []string{";"}, pos) + } p.unscan() s, err := p.ParseStatement() if err != nil { @@ -96,8 +114,10 @@ return p.parseAlterStatement() case SET: return p.parseSetPasswordUserStatement() + case KILL: + return p.parseKillQueryStatement() default: - return nil, newParseError(tokstr(tok, lit), []string{"SELECT", "DELETE", "SHOW", "CREATE", "DROP", "GRANT", "REVOKE", "ALTER", "SET"}, pos) + return nil, newParseError(tokstr(tok, lit), []string{"SELECT", "DELETE", "SHOW", "CREATE", "DROP", "GRANT", "REVOKE", "ALTER", "SET", "KILL"}, pos) } } @@ -112,8 +132,6 @@ return p.parseGrantsForUserStatement() case DATABASES: return p.parseShowDatabasesStatement() - case SERVERS: - return p.parseShowServersStatement() case FIELD: tok, pos, lit := p.scanIgnoreWhitespace() if tok == KEYS { @@ -122,6 +140,8 @@ return nil, newParseError(tokstr(tok, lit), []string{"KEYS"}, pos) case MEASUREMENTS: return p.parseShowMeasurementsStatement() + case QUERIES: + return p.parseShowQueriesStatement() case RETENTION: tok, pos, lit := p.scanIgnoreWhitespace() if tok == POLICIES { @@ -162,9 +182,9 @@ "FIELD", "GRANTS", "MEASUREMENTS", + "QUERIES", "RETENTION", "SERIES", - "SERVERS", "TAG", "USERS", "STATS", @@ -205,28 +225,29 @@ // This function assumes the DROP token has already been consumed. func (p *Parser) parseDropStatement() (Statement, error) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok == SERIES { - return p.parseDropSeriesStatement() - } else if tok == MEASUREMENT { - return p.parseDropMeasurementStatement() - } else if tok == CONTINUOUS { + switch tok { + case CONTINUOUS: return p.parseDropContinuousQueryStatement() - } else if tok == DATABASE { + case DATABASE: return p.parseDropDatabaseStatement() - } else if tok == RETENTION { + case MEASUREMENT: + return p.parseDropMeasurementStatement() + case RETENTION: if tok, pos, lit := p.scanIgnoreWhitespace(); tok != POLICY { return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) } return p.parseDropRetentionPolicyStatement() - } else if tok == USER { - return p.parseDropUserStatement() - } else if tok == META || tok == DATA { - return p.parseDropServerStatement(tok) - } else if tok == SUBSCRIPTION { + case SERIES: + return p.parseDropSeriesStatement() + case SHARD: + return p.parseDropShardStatement() + case SUBSCRIPTION: return p.parseDropSubscriptionStatement() + case USER: + return p.parseDropUserStatement() + default: + return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "MEASUREMENT", "RETENTION", "SERIES", "SHARD", "SUBSCRIPTION", "USER"}, pos) } - - return nil, newParseError(tokstr(tok, lit), []string{"SERIES", "CONTINUOUS", "MEASUREMENT", "SERVER", "SUBSCRIPTION"}, pos) } // parseAlterStatement parses a string and returns an alter statement. @@ -275,6 +296,30 @@ return stmt, nil } +// parseKillQueryStatement parses a string and returns a kill statement. +// This function assumes the KILL token has already been consumed. +func (p *Parser) parseKillQueryStatement() (*KillQueryStatement, error) { + if err := p.parseTokens([]Token{QUERY}); err != nil { + return nil, err + } + + qid, err := p.parseUInt64() + if err != nil { + return nil, err + } + + var host string + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + host, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + return &KillQueryStatement{QueryID: qid, Host: host}, nil +} + // parseCreateSubscriptionStatement parses a string and returns a CreatesubScriptionStatement. // This function assumes the "CREATE SUBSCRIPTION" tokens have already been consumed. func (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) { @@ -355,8 +400,7 @@ stmt.Database = ident // Parse required DURATION token. - tok, pos, lit := p.scanIgnoreWhitespace() - if tok != DURATION { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION { return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) } @@ -368,7 +412,7 @@ stmt.Duration = d // Parse required REPLICATION token. - if tok, pos, lit = p.scanIgnoreWhitespace(); tok != REPLICATION { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != REPLICATION { return nil, newParseError(tokstr(tok, lit), []string{"REPLICATION"}, pos) } @@ -379,11 +423,25 @@ } stmt.Replication = n + // Parse optional SHARD token. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == SHARD { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.ShardGroupDuration = d + } else { + p.unscan() + } + // Parse optional DEFAULT token. - if tok, pos, lit = p.scanIgnoreWhitespace(); tok == DEFAULT { + if tok, _, _ := p.scanIgnoreWhitespace(); tok == DEFAULT { stmt.Default = true - } else if tok != EOF && tok != SEMICOLON { - return nil, newParseError(tokstr(tok, lit), []string{"DEFAULT"}, pos) + } else { + p.unscan() } return stmt, nil @@ -416,11 +474,18 @@ } stmt.Database = ident - // Loop through option tokens (DURATION, REPLICATION, DEFAULT, etc.). - maxNumOptions := 3 + // Loop through option tokens (DURATION, REPLICATION, SHARD DURATION, DEFAULT, etc.). + found := make(map[Token]struct{}) Loop: - for i := 0; i < maxNumOptions; i++ { + for { tok, pos, lit := p.scanIgnoreWhitespace() + if _, ok := found[tok]; ok { + return nil, &ParseError{ + Message: fmt.Sprintf("found duplicate %s option", tok), + Pos: pos, + } + } + switch tok { case DURATION: d, err := p.parseDuration() @@ -434,15 +499,27 @@ return nil, err } stmt.Replication = &n + case SHARD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == DURATION { + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.ShardGroupDuration = &d + } else { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } case DEFAULT: stmt.Default = true default: - if i < 1 { - return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "RETENTION", "DEFAULT"}, pos) + if len(found) == 0 { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "REPLICATION", "SHARD", "DEFAULT"}, pos) } p.unscan() break Loop } + found[tok] = struct{}{} } return stmt, nil @@ -451,13 +528,8 @@ // parseInt parses a string and returns an integer literal. func (p *Parser) parseInt(min, max int) (int, error) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != NUMBER { - return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) - } - - // Return an error if the number has a fractional part. - if strings.Contains(lit, ".") { - return 0, &ParseError{Message: "number must be an integer", Pos: pos} + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Convert string to int. @@ -477,8 +549,8 @@ // parseUInt32 parses a string and returns a 32-bit unsigned integer literal. func (p *Parser) parseUInt32() (uint32, error) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != NUMBER { - return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Convert string to unsigned 32-bit integer @@ -493,8 +565,8 @@ // parseUInt64 parses a string and returns a 64-bit unsigned integer literal. func (p *Parser) parseUInt64() (uint64, error) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != NUMBER { - return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Convert string to unsigned 64-bit integer @@ -510,7 +582,7 @@ // This function assumes the DURATION token has already been consumed. func (p *Parser) parseDuration() (time.Duration, error) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != DURATION_VAL && tok != INF { + if tok != DURATIONVAL && tok != INF { return 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) } @@ -945,33 +1017,53 @@ return t, nil } -// parseDeleteStatement parses a delete string and returns a DeleteStatement. +// parseDeleteStatement parses a string and returns a delete statement. // This function assumes the DELETE token has already been consumed. -func (p *Parser) parseDeleteStatement() (*DeleteStatement, error) { - // TODO remove and do not skip test once we wire up DELETE FROM. - // See issues https://github.com/influxdb/influxdb/issues/1647 - // and https://github.com/influxdb/influxdb/issues/4404 - return nil, errors.New("DELETE FROM is currently not supported. Use DROP SERIES or DROP MEASUREMENT instead") - //stmt := &DeleteStatement{} - - //// Parse source - //if tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM { - // return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) - //} - //source, err := p.parseSource() - //if err != nil { - // return nil, err - //} - //stmt.Source = source - - //// Parse condition: "WHERE EXPR". - //condition, err := p.parseCondition() - //if err != nil { - // return nil, err - //} - //stmt.Condition = condition +func (p *Parser) parseDeleteStatement() (Statement, error) { + stmt := &DeleteSeriesStatement{} + var err error + + tok, pos, lit := p.scanIgnoreWhitespace() - //return stmt, nil + if tok == FROM { + // Parse source. + if stmt.Sources, err = p.parseSources(); err != nil { + return nil, err + } + + var err error + WalkFunc(stmt.Sources, func(n Node) { + if t, ok := n.(*Measurement); ok { + // Don't allow database or retention policy in from clause for delete + // statement. They apply to the selected database across all retention + // policies. + if t.Database != "" { + err = &ParseError{Message: "database not supported"} + } + if t.RetentionPolicy != "" { + err = &ParseError{Message: "retention policy not supported"} + } + } + }) + if err != nil { + return nil, err + } + + } else { + p.unscan() + } + + // Parse condition: "WHERE EXPR". + if stmt.Condition, err = p.parseCondition(); err != nil { + return nil, err + } + + // If they didn't provide a FROM or a WHERE, this query is invalid + if stmt.Condition == nil && stmt.Sources == nil { + return nil, newParseError(tokstr(tok, lit), []string{"FROM", "WHERE"}, pos) + } + + return stmt, nil } // parseShowSeriesStatement parses a string and returns a ShowSeriesStatement. @@ -980,6 +1072,17 @@ stmt := &ShowSeriesStatement{} var err error + // Parse optional ON clause. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + stmt.Database, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + // Parse optional FROM. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { if stmt.Sources, err = p.parseSources(); err != nil { @@ -1018,6 +1121,17 @@ stmt := &ShowMeasurementsStatement{} var err error + // Parse optional ON clause. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + stmt.Database, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + // Parse optional WITH clause. if tok, _, _ := p.scanIgnoreWhitespace(); tok == WITH { // Parse required MEASUREMENT token. @@ -1064,22 +1178,28 @@ return stmt, nil } +// parseShowQueriesStatement parses a string and returns a ShowQueriesStatement. +// This function assumes the "SHOW QUERIES" tokens have been consumed. +func (p *Parser) parseShowQueriesStatement() (*ShowQueriesStatement, error) { + return &ShowQueriesStatement{}, nil +} + // parseShowRetentionPoliciesStatement parses a string and returns a ShowRetentionPoliciesStatement. // This function assumes the "SHOW RETENTION POLICIES" tokens have been consumed. func (p *Parser) parseShowRetentionPoliciesStatement() (*ShowRetentionPoliciesStatement, error) { stmt := &ShowRetentionPoliciesStatement{} // Expect an "ON" keyword. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON { - return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) - } - - // Parse the database. - ident, err := p.parseIdent() - if err != nil { - return nil, err + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Database = ident + } else { + p.unscan() } - stmt.Database = ident return stmt, nil } @@ -1090,6 +1210,17 @@ stmt := &ShowTagKeysStatement{} var err error + // Parse optional ON clause. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + stmt.Database, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { if stmt.Sources, err = p.parseSources(); err != nil { @@ -1138,6 +1269,17 @@ stmt := &ShowTagValuesStatement{} var err error + // Parse optional ON clause. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + stmt.Database, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { if stmt.Sources, err = p.parseSources(); err != nil { @@ -1148,7 +1290,7 @@ } // Parse required WITH KEY. - if stmt.TagKeys, err = p.parseTagKeys(); err != nil { + if stmt.Op, stmt.TagKeyExpr, err = p.parseTagKeyExpr(); err != nil { return nil, err } @@ -1176,44 +1318,52 @@ } // parseTagKeys parses a string and returns a list of tag keys. -func (p *Parser) parseTagKeys() ([]string, error) { +func (p *Parser) parseTagKeyExpr() (Token, Literal, error) { var err error // Parse required WITH KEY tokens. if err := p.parseTokens([]Token{WITH, KEY}); err != nil { - return nil, err + return 0, nil, err } - var tagKeys []string - - // Parse required IN or EQ token. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok == IN { + // Parse required IN, EQ, or EQREGEX token. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == IN { // Parse required ( token. if tok, pos, lit = p.scanIgnoreWhitespace(); tok != LPAREN { - return nil, newParseError(tokstr(tok, lit), []string{"("}, pos) + return 0, nil, newParseError(tokstr(tok, lit), []string{"("}, pos) } // Parse tag key list. + var tagKeys []string if tagKeys, err = p.parseIdentList(); err != nil { - return nil, err + return 0, nil, err } // Parse required ) token. if tok, pos, lit = p.scanIgnoreWhitespace(); tok != RPAREN { - return nil, newParseError(tokstr(tok, lit), []string{")"}, pos) + return 0, nil, newParseError(tokstr(tok, lit), []string{")"}, pos) } - } else if tok == EQ { + return IN, &ListLiteral{Vals: tagKeys}, nil + } else if tok == EQ || tok == NEQ { // Parse required tag key. ident, err := p.parseIdent() if err != nil { - return nil, err + return 0, nil, err } - tagKeys = append(tagKeys, ident) - } else { - return nil, newParseError(tokstr(tok, lit), []string{"IN", "="}, pos) + return tok, &StringLiteral{Val: ident}, nil + } else if tok == EQREGEX || tok == NEQREGEX { + re, err := p.parseRegex() + if err != nil { + return 0, nil, err + } else if re == nil { + // parseRegex can return an empty type, but we need it to be present + tok, pos, lit := p.scanIgnoreWhitespace() + return 0, nil, newParseError(tokstr(tok, lit), []string{"regex"}, pos) + } + return tok, re, nil } - - return tagKeys, nil + return 0, nil, newParseError(tokstr(tok, lit), []string{"IN", "=", "=~"}, pos) } // parseShowUsersStatement parses a string and returns a ShowUsersStatement. @@ -1235,6 +1385,17 @@ stmt := &ShowFieldKeysStatement{} var err error + // Parse optional ON clause. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + // Parse the database. + stmt.Database, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { if stmt.Sources, err = p.parseSources(); err != nil { @@ -1290,6 +1451,24 @@ if stmt.Sources, err = p.parseSources(); err != nil { return nil, err } + + var err error + WalkFunc(stmt.Sources, func(n Node) { + if t, ok := n.(*Measurement); ok { + // Don't allow database or retention policy in from clause for delete + // statement. They apply to the selected database across all retention + // policies. + if t.Database != "" { + err = &ParseError{Message: "database not supported"} + } + if t.RetentionPolicy != "" { + err = &ParseError{Message: "retention policy not supported"} + } + } + }) + if err != nil { + return nil, err + } } else { p.unscan() } @@ -1307,27 +1486,18 @@ return stmt, nil } -// parseDropServerStatement parses a string and returns a DropServerStatement. -// This function assumes the "DROP " tokens have already been consumed. -func (p *Parser) parseDropServerStatement(tok Token) (*DropServerStatement, error) { - // Parse the SERVER token - if tok, pos, lit := p.scanIgnoreWhitespace(); tok != SERVER { - return nil, newParseError(tokstr(tok, lit), []string{"SERVER"}, pos) - } - - s := &DropServerStatement{} +// parseDropShardStatement parses a string and returns a +// DropShardStatement. This function assumes the "DROP SHARD" tokens +// have already been consumed. +func (p *Parser) parseDropShardStatement() (*DropShardStatement, error) { var err error + stmt := &DropShardStatement{} - if tok == META { - s.Meta = true - } - - // Parse the server's ID. - if s.NodeID, err = p.parseUInt64(); err != nil { + // Parse the ID of the shard to be dropped. + if stmt.ID, err = p.parseUInt64(); err != nil { return nil, err } - - return s, nil + return stmt, nil } // parseShowContinuousQueriesStatement parses a string and returns a ShowContinuousQueriesStatement. @@ -1343,13 +1513,6 @@ return stmt, nil } -// parseShowServersStatement parses a string and returns a ShowServersStatement. -// This function assumes the "SHOW SERVERS" tokens have already been consumed. -func (p *Parser) parseShowServersStatement() (*ShowServersStatement, error) { - stmt := &ShowServersStatement{} - return stmt, nil -} - // parseGrantsForUserStatement parses a string and returns a ShowGrantsForUserStatement. // This function assumes the "SHOW GRANTS" tokens have already been consumed. func (p *Parser) parseGrantsForUserStatement() (*ShowGrantsForUserStatement, error) { @@ -1457,16 +1620,6 @@ func (p *Parser) parseCreateDatabaseStatement() (*CreateDatabaseStatement, error) { stmt := &CreateDatabaseStatement{} - // Look for "IF NOT EXISTS" - if tok, _, _ := p.scanIgnoreWhitespace(); tok == IF { - if err := p.parseTokens([]Token{NOT, EXISTS}); err != nil { - return nil, err - } - stmt.IfNotExists = true - } else { - p.unscan() - } - // Parse the name of the database to be created. lit, err := p.parseIdent() if err != nil { @@ -1476,10 +1629,10 @@ // Look for "WITH" if tok, _, _ := p.scanIgnoreWhitespace(); tok == WITH { - // validate that at least one of DURATION, REPLICATION or NAME is provided + // validate that at least one of DURATION, NAME, REPLICATION or SHARD is provided tok, pos, lit := p.scanIgnoreWhitespace() - if tok != DURATION && tok != REPLICATION && tok != NAME { - return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "REPLICATION", "NAME"}, pos) + if tok != DURATION && tok != NAME && tok != REPLICATION && tok != SHARD { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "NAME", "REPLICATION", "SHARD"}, pos) } // rewind p.unscan() @@ -1488,44 +1641,54 @@ stmt.RetentionPolicyCreate = true // Look for "DURATION" - var rpDuration time.Duration // default is forever if err := p.parseTokens([]Token{DURATION}); err != nil { p.unscan() } else { - rpDuration, err = p.parseDuration() + rpDuration, err := p.parseDuration() if err != nil { return nil, err } + stmt.RetentionPolicyDuration = &rpDuration } - stmt.RetentionPolicyDuration = rpDuration // Look for "REPLICATION" - var rpReplication int = 1 // default is 1 if err := p.parseTokens([]Token{REPLICATION}); err != nil { p.unscan() } else { - rpReplication, err = p.parseInt(1, math.MaxInt32) + rpReplication, err := p.parseInt(1, math.MaxInt32) + if err != nil { + return nil, err + } + stmt.RetentionPolicyReplication = &rpReplication + } + + // Look for "SHARD" + if err := p.parseTokens([]Token{SHARD}); err != nil { + p.unscan() + } else { + // Look for "DURATION" + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != DURATION { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + stmt.RetentionPolicyShardGroupDuration, err = p.parseDuration() if err != nil { return nil, err } } - stmt.RetentionPolicyReplication = rpReplication // Look for "NAME" - var rpName string = "default" // default is default if err := p.parseTokens([]Token{NAME}); err != nil { p.unscan() } else { - rpName, err = p.parseIdent() + stmt.RetentionPolicyName, err = p.parseIdent() if err != nil { return nil, err } } - stmt.RetentionPolicyName = rpName } else { p.unscan() } - return stmt, nil } @@ -1534,16 +1697,6 @@ func (p *Parser) parseDropDatabaseStatement() (*DropDatabaseStatement, error) { stmt := &DropDatabaseStatement{} - // Look for "IF EXISTS" - if tok, _, _ := p.scanIgnoreWhitespace(); tok == IF { - if err := p.parseTokens([]Token{EXISTS}); err != nil { - return nil, err - } - stmt.IfExists = true - } else { - p.unscan() - } - // Parse the name of the database to be dropped. lit, err := p.parseIdent() if err != nil { @@ -1799,19 +1952,27 @@ func (p *Parser) parseField() (*Field, error) { f := &Field{} - _, pos, _ := p.scanIgnoreWhitespace() - p.unscan() - // Parse the expression first. - expr, err := p.ParseExpr() + // Attempt to parse a regex. + re, err := p.parseRegex() if err != nil { return nil, err + } else if re != nil { + f.Expr = re + } else { + _, pos, _ := p.scanIgnoreWhitespace() + p.unscan() + // Parse the expression first. + expr, err := p.ParseExpr() + if err != nil { + return nil, err + } + var c validateField + Walk(&c, expr) + if c.foundInvalid { + return nil, fmt.Errorf("invalid operator %s in SELECT clause at line %d, char %d; operator is intended for WHERE clause", c.badToken, pos.Line+1, pos.Char+1) + } + f.Expr = expr } - var c validateField - Walk(&c, expr) - if c.foundInvalid { - return nil, fmt.Errorf("invalid operator %s in SELECT clause at line %d, char %d; operator is intended for WHERE clause", c.badToken, pos.Line+1, pos.Char+1) - } - f.Expr = expr // Parse the alias if the current and next tokens are "WS AS". alias, err := p.parseAlias() @@ -1999,6 +2160,13 @@ // parseDimension parses a single dimension. func (p *Parser) parseDimension() (*Dimension, error) { + re, err := p.parseRegex() + if err != nil { + return nil, err + } else if re != nil { + return &Dimension{Expr: re}, nil + } + // Parse the expression first. expr, err := p.ParseExpr() if err != nil { @@ -2014,36 +2182,40 @@ // parseFill parses the fill call and its options. func (p *Parser) parseFill() (FillOption, interface{}, error) { // Parse the expression first. + tok, _, lit := p.scanIgnoreWhitespace() + p.unscan() + if tok != IDENT || strings.ToLower(lit) != "fill" { + return NullFill, nil, nil + } + expr, err := p.ParseExpr() if err != nil { - p.unscan() - return NullFill, nil, nil + return NullFill, nil, err } - lit, ok := expr.(*Call) + fill, ok := expr.(*Call) if !ok { - p.unscan() - return NullFill, nil, nil - } - if strings.ToLower(lit.Name) != "fill" { - p.unscan() - return NullFill, nil, nil + return NullFill, nil, errors.New("fill must be a function call") + } else if len(fill.Args) != 1 { + return NullFill, nil, errors.New("fill requires an argument, e.g.: 0, null, none, previous, linear") } - if len(lit.Args) != 1 { - return NullFill, nil, errors.New("fill requires an argument, e.g.: 0, null, none, previous") - } - switch lit.Args[0].String() { + switch fill.Args[0].String() { case "null": return NullFill, nil, nil case "none": return NoFill, nil, nil case "previous": return PreviousFill, nil, nil + case "linear": + return LinearFill, nil, nil default: - num, ok := lit.Args[0].(*NumberLiteral) - if !ok { + switch num := fill.Args[0].(type) { + case *IntegerLiteral: + return NumberFill, num.Val, nil + case *NumberLiteral: + return NumberFill, num.Val, nil + default: return NullFill, nil, fmt.Errorf("expected number argument in fill()") } - return NumberFill, num.Val, nil } } @@ -2058,19 +2230,12 @@ // Scan the number. tok, pos, lit := p.scanIgnoreWhitespace() - if tok != NUMBER { - return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) - } - - // Return an error if the number has a fractional part. - if strings.Contains(lit, ".") { - msg := fmt.Sprintf("fractional parts not allowed in %s", t.String()) - return 0, &ParseError{Message: msg, Pos: pos} + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Parse number. n, _ := strconv.ParseInt(lit, 10, 64) - if n < 0 { msg := fmt.Sprintf("%s must be >= 0", t.String()) return 0, &ParseError{Message: msg, Pos: pos} @@ -2183,7 +2348,35 @@ return nil, err } - vr := &VarRef{Val: strings.Join(segments, ".")} + var dtype DataType + if tok, _, _ := p.scan(); tok == DOUBLECOLON { + tok, pos, lit := p.scan() + switch tok { + case IDENT: + switch strings.ToLower(lit) { + case "float": + dtype = Float + case "integer": + dtype = Integer + case "string": + dtype = String + case "boolean": + dtype = Boolean + default: + return nil, newParseError(tokstr(tok, lit), []string{"float", "integer", "string", "boolean", "field", "tag"}, pos) + } + case FIELD: + dtype = AnyField + case TAG: + dtype = Tag + default: + return nil, newParseError(tokstr(tok, lit), []string{"float", "integer", "string", "boolean", "field", "tag"}, pos) + } + } else { + p.unscan() + } + + vr := &VarRef{Val: strings.Join(segments, "."), Type: dtype} return vr, nil } @@ -2294,25 +2487,6 @@ return nil, newParseError(tokstr(tok0, lit), []string{"(", "identifier"}, pos) case STRING: - // If literal looks like a date time then parse it as a time literal. - if isDateTimeString(lit) { - t, err := time.Parse(DateTimeFormat, lit) - if err != nil { - // try to parse it as an RFCNano time - t, err := time.Parse(time.RFC3339Nano, lit) - if err != nil { - return nil, &ParseError{Message: "unable to parse datetime", Pos: pos} - } - return &TimeLiteral{Val: t}, nil - } - return &TimeLiteral{Val: t}, nil - } else if isDateString(lit) { - t, err := time.Parse(DateFormat, lit) - if err != nil { - return nil, &ParseError{Message: "unable to parse date", Pos: pos} - } - return &TimeLiteral{Val: t}, nil - } return &StringLiteral{Val: lit}, nil case NUMBER: v, err := strconv.ParseFloat(lit, 64) @@ -2320,19 +2494,60 @@ return nil, &ParseError{Message: "unable to parse number", Pos: pos} } return &NumberLiteral{Val: v}, nil + case INTEGER: + v, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + return nil, &ParseError{Message: "unable to parse integer", Pos: pos} + } + return &IntegerLiteral{Val: v}, nil case TRUE, FALSE: return &BooleanLiteral{Val: (tok == TRUE)}, nil - case DURATION_VAL: + case DURATIONVAL: v, _ := ParseDuration(lit) return &DurationLiteral{Val: v}, nil case MUL: - return &Wildcard{}, nil + wc := &Wildcard{} + if tok, _, _ := p.scan(); tok == DOUBLECOLON { + tok, pos, lit := p.scan() + switch tok { + case FIELD, TAG: + wc.Type = tok + default: + return nil, newParseError(tokstr(tok, lit), []string{"field", "tag"}, pos) + } + } else { + p.unscan() + } + return wc, nil case REGEX: re, err := regexp.Compile(lit) if err != nil { return nil, &ParseError{Message: err.Error(), Pos: pos} } return &RegexLiteral{Val: re}, nil + case BOUNDPARAM: + k := strings.TrimPrefix(lit, "$") + if len(k) == 0 { + return nil, errors.New("empty bound parameter") + } + + v, ok := p.params[k] + if !ok { + return nil, fmt.Errorf("missing parameter: %s", k) + } + + switch v := v.(type) { + case float64: + return &NumberLiteral{Val: v}, nil + case int64: + return &IntegerLiteral{Val: v}, nil + case string: + return &StringLiteral{Val: v}, nil + case bool: + return &BooleanLiteral{Val: v}, nil + default: + return nil, fmt.Errorf("unable to bind parameter with type %T", v) + } default: return nil, newParseError(tokstr(tok, lit), []string{"identifier", "string", "number", "bool"}, pos) } @@ -2375,27 +2590,50 @@ // This function assumes the function name and LPAREN have been consumed. func (p *Parser) parseCall(name string) (*Call, error) { name = strings.ToLower(name) - // If there's a right paren then just return immediately. - if tok, _, _ := p.scan(); tok == RPAREN { - return &Call{Name: name}, nil - } - p.unscan() - // Otherwise parse function call arguments. + // Parse first function argument if one exists. var args []Expr - for { - // Parse an expression argument. + re, err := p.parseRegex() + if err != nil { + return nil, err + } else if re != nil { + args = append(args, re) + } else { + // If there's a right paren then just return immediately. + if tok, _, _ := p.scan(); tok == RPAREN { + return &Call{Name: name}, nil + } + p.unscan() + arg, err := p.ParseExpr() if err != nil { return nil, err } args = append(args, arg) + } - // If there's not a comma next then stop parsing arguments. - if tok, _, _ := p.scan(); tok != COMMA { + // Parse additional function arguments if there is a comma. + for { + // If there's not a comma, stop parsing arguments. + if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA { p.unscan() break } + + re, err := p.parseRegex() + if err != nil { + return nil, err + } else if re != nil { + args = append(args, re) + continue + } + + // Parse an expression argument. + arg, err := p.ParseExpr() + if err != nil { + return nil, err + } + args = append(args, arg) } // There should be a right parentheses at the end. @@ -2413,7 +2651,7 @@ var interval time.Duration if p.parseTokenMaybe(EVERY) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != DURATION_VAL { + if tok != DURATIONVAL { return 0, 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) } @@ -2427,7 +2665,7 @@ var maxDuration time.Duration if p.parseTokenMaybe(FOR) { tok, pos, lit := p.scanIgnoreWhitespace() - if tok != DURATION_VAL { + if tok != DURATIONVAL { return 0, 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) } @@ -2470,6 +2708,9 @@ func (p *Parser) unscan() { p.s.Unscan() } // ParseDuration parses a time duration from a string. +// This is needed instead of time.ParseDuration because this will support +// the full syntax that InfluxQL supports for specifying durations +// including weeks and days. func ParseDuration(s string) (time.Duration, error) { // Return an error if the string is blank or one character if len(s) < 2 { @@ -2479,41 +2720,78 @@ // Split string into individual runes. a := split(s) - // Extract the unit of measure. - // If the last two characters are "ms" then parse as milliseconds. - // Otherwise just use the last character as the unit of measure. - var num, uom string - if len(s) > 2 && s[len(s)-2:] == "ms" { - num, uom = string(a[:len(a)-2]), "ms" - } else { - num, uom = string(a[:len(a)-1]), string(a[len(a)-1:]) + // Start with a zero duration. + var d time.Duration + i := 0 + + // Check for a negative. + isNegative := false + if a[i] == '-' { + isNegative = true + i++ } - // Parse the numeric part. - n, err := strconv.ParseInt(num, 10, 64) - if err != nil { - return 0, ErrInvalidDuration + var measure int64 + var unit string + + // Parsing loop. + for i < len(a) { + // Find the number portion. + start := i + for ; i < len(a) && isDigit(a[i]); i++ { + // Scan for the digits. + } + + // Check if we reached the end of the string prematurely. + if i >= len(a) || i == start { + return 0, ErrInvalidDuration + } + + // Parse the numeric part. + n, err := strconv.ParseInt(string(a[start:i]), 10, 64) + if err != nil { + return 0, ErrInvalidDuration + } + measure = n + + // Extract the unit of measure. + // If the last two characters are "ms" then parse as milliseconds. + // Otherwise just use the last character as the unit of measure. + unit = string(a[i]) + switch a[i] { + case 'u', 'µ': + d += time.Duration(n) * time.Microsecond + case 'm': + if i+1 < len(a) && a[i+1] == 's' { + unit = string(a[i : i+2]) + d += time.Duration(n) * time.Millisecond + i += 2 + continue + } + d += time.Duration(n) * time.Minute + case 's': + d += time.Duration(n) * time.Second + case 'h': + d += time.Duration(n) * time.Hour + case 'd': + d += time.Duration(n) * 24 * time.Hour + case 'w': + d += time.Duration(n) * 7 * 24 * time.Hour + default: + return 0, ErrInvalidDuration + } + i++ } - // Multiply by the unit of measure. - switch uom { - case "u", "µ": - return time.Duration(n) * time.Microsecond, nil - case "ms": - return time.Duration(n) * time.Millisecond, nil - case "s": - return time.Duration(n) * time.Second, nil - case "m": - return time.Duration(n) * time.Minute, nil - case "h": - return time.Duration(n) * time.Hour, nil - case "d": - return time.Duration(n) * 24 * time.Hour, nil - case "w": - return time.Duration(n) * 7 * 24 * time.Hour, nil - default: - return 0, ErrInvalidDuration + // Check to see if we overflowed a duration + if d < 0 && !isNegative { + return 0, fmt.Errorf("overflowed duration %d%s: choose a smaller duration or INF", measure, unit) + } + + if isNegative { + d = -d } + return d, nil } // FormatDuration formats a duration to a string. @@ -2560,25 +2838,29 @@ return true } +var ( + qsReplacer = strings.NewReplacer("\n", `\n`, `\`, `\\`, `'`, `\'`) + qiReplacer = strings.NewReplacer("\n", `\n`, `\`, `\\`, `"`, `\"`) +) + // QuoteString returns a quoted string. func QuoteString(s string) string { - return `'` + strings.NewReplacer("\n", `\n`, `\`, `\\`, `'`, `\'`).Replace(s) + `'` + return `'` + qsReplacer.Replace(s) + `'` } // QuoteIdent returns a quoted identifier from multiple bare identifiers. func QuoteIdent(segments ...string) string { - r := strings.NewReplacer("\n", `\n`, `\`, `\\`, `"`, `\"`) - var buf bytes.Buffer for i, segment := range segments { needQuote := IdentNeedsQuotes(segment) || - ((i < len(segments)-1) && segment != "") // not last segment && not "" + ((i < len(segments)-1) && segment != "") || // not last segment && not "" + ((i == 0 || i == len(segments)-1) && segment == "") // the first or last segment and an empty string if needQuote { _ = buf.WriteByte('"') } - _, _ = buf.WriteString(r.Replace(segment)) + _, _ = buf.WriteString(qiReplacer.Replace(segment)) if needQuote { _ = buf.WriteByte('"') diff -Nru influxdb-0.10.0+dfsg1/influxql/parser_test.go influxdb-1.1.1+dfsg1/influxql/parser_test.go --- influxdb-0.10.0+dfsg1/influxql/parser_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/parser_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -9,7 +9,7 @@ "testing" "time" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/influxql" ) // Ensure the parser can parse a multi-statement query. @@ -51,16 +51,24 @@ } } +func TestParser_ParseQuery_NoSemicolon(t *testing.T) { + _, err := influxql.NewParser(strings.NewReader(`CREATE DATABASE foo CREATE DATABASE bar`)).ParseQuery() + if err == nil || err.Error() != `found CREATE, expected ; at line 1, char 21` { + t.Fatalf("unexpected error: %s", err) + } +} + // Ensure the parser can parse strings into Statement ASTs. func TestParser_ParseStatement(t *testing.T) { // For use in various tests. now := time.Now() var tests = []struct { - skip bool - s string - stmt influxql.Statement - err string + skip bool + s string + params map[string]interface{} + stmt influxql.Statement + err string }{ // SELECT * statement { @@ -130,7 +138,7 @@ RHS: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}}, @@ -162,6 +170,18 @@ }, }, + // sample + { + s: `SELECT sample(field1, 100) FROM myseries;`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "sample", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 100}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + }, + }, + // derivative { s: `SELECT derivative(field1, 1h) FROM myseries;`, @@ -185,30 +205,355 @@ Condition: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + { + s: `SELECT derivative(field1, 1h) / derivative(field2, 1h) FROM myseries`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + { + Expr: &influxql.BinaryExpr{ + LHS: &influxql.Call{ + Name: "derivative", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + &influxql.DurationLiteral{Val: time.Hour}, + }, + }, + RHS: &influxql.Call{ + Name: "derivative", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field2"}, + &influxql.DurationLiteral{Val: time.Hour}, + }, + }, + Op: influxql.DIV, + }, + }, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "myseries"}, + }, + }, + }, + + // difference + { + s: `SELECT difference(field1) FROM myseries;`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "difference", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + }, + }, + + { + s: fmt.Sprintf(`SELECT difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "difference", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "max", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Minute}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, }, }, + // moving_average { - s: `SELECT derivative(mean(field1), 1h) FROM myseries;`, + s: `SELECT moving_average(field1, 3) FROM myseries;`, stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "derivative", Args: []influxql.Expr{&influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}, &influxql.DurationLiteral{Val: time.Hour}}}}, + {Expr: &influxql.Call{Name: "moving_average", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 3}}}}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, }, { - s: `SELECT derivative(mean(field1)) FROM myseries;`, + s: fmt.Sprintf(`SELECT moving_average(max(field1), 3) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)), stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "derivative", Args: []influxql.Expr{&influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}}}, + { + Expr: &influxql.Call{ + Name: "moving_average", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "max", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + &influxql.IntegerLiteral{Val: 3}, + }, + }, + }, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Minute}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + // cumulative_sum + { + s: fmt.Sprintf(`SELECT cumulative_sum(field1) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "cumulative_sum", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + { + s: fmt.Sprintf(`SELECT cumulative_sum(mean(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "cumulative_sum", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Minute}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + // holt_winters + { + s: fmt.Sprintf(`SELECT holt_winters(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{ + Name: "holt_winters", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "first", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + &influxql.IntegerLiteral{Val: 3}, + &influxql.IntegerLiteral{Val: 1}, + }, + }}, + }, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: 1 * time.Hour}, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + { + s: fmt.Sprintf(`SELECT holt_winters_with_fit(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{ + Name: "holt_winters_with_fit", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "first", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + &influxql.IntegerLiteral{Val: 3}, + &influxql.IntegerLiteral{Val: 1}, + }}}, + }, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: 1 * time.Hour}, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + { + s: fmt.Sprintf(`SELECT holt_winters(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "holt_winters", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "max", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + &influxql.IntegerLiteral{Val: 4}, + &influxql.IntegerLiteral{Val: 5}, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Minute}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + }, + }, + + { + s: fmt.Sprintf(`SELECT holt_winters_with_fit(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "holt_winters_with_fit", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "max", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "field1"}, + }, + }, + &influxql.IntegerLiteral{Val: 4}, + &influxql.IntegerLiteral{Val: 5}, + }, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Minute}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, }, }, @@ -296,13 +641,25 @@ }, }, + { + s: `select percentile("field1", 2.0), field2 from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}}, + {Expr: &influxql.VarRef{Val: "field2"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + // select top statements { s: `select top("field1", 2) from cpu`, stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, }, @@ -313,7 +670,7 @@ stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, }, @@ -324,7 +681,7 @@ stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}}, {Expr: &influxql.VarRef{Val: "tag1"}}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, @@ -336,7 +693,7 @@ stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ - {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.IntegerLiteral{Val: 2}}}}, {Expr: &influxql.VarRef{Val: "tag1"}}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, @@ -411,7 +768,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, }, }, @@ -426,7 +783,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -439,7 +796,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.GTE, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -452,7 +809,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -465,7 +822,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.LTE, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -478,7 +835,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.LT, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -491,7 +848,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.NEQ, LHS: &influxql.VarRef{Val: "load"}, - RHS: &influxql.NumberLiteral{Val: 100}, + RHS: &influxql.IntegerLiteral{Val: 100}, }, }, }, @@ -585,6 +942,23 @@ }, }, }, + // SELECT statement with group by and multi digit duration (prevent regression from #731://github.com/influxdata/influxdb/pull/7316) + { + s: fmt.Sprintf(`SELECT count(value) FROM cpu where time < '%s' group by time(500ms)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "count", + Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}}, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.LT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 500 * time.Millisecond}}}}}, + }, + }, // SELECT statement with fill { @@ -598,11 +972,11 @@ Condition: &influxql.BinaryExpr{ Op: influxql.LT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}}, Fill: influxql.NumberFill, - FillValue: float64(1), + FillValue: int64(1), }, }, @@ -618,7 +992,7 @@ Condition: &influxql.BinaryExpr{ Op: influxql.LT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}}, Fill: influxql.NoFill, @@ -637,15 +1011,100 @@ Condition: &influxql.BinaryExpr{ Op: influxql.LT, LHS: &influxql.VarRef{Val: "time"}, - RHS: &influxql.TimeLiteral{Val: now.UTC()}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}}, Fill: influxql.PreviousFill, }, }, - // See issues https://github.com/influxdb/influxdb/issues/1647 - // and https://github.com/influxdb/influxdb/issues/4404 + // SELECT statement with average fill + { + s: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(linear)`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}}, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.LT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)}, + }, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}}, + Fill: influxql.LinearFill, + }, + }, + + // SELECT casts + { + s: `SELECT field1::float, field2::integer, field3::string, field4::boolean, field5::field, tag1::tag FROM cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + { + Expr: &influxql.VarRef{ + Val: "field1", + Type: influxql.Float, + }, + }, + { + Expr: &influxql.VarRef{ + Val: "field2", + Type: influxql.Integer, + }, + }, + { + Expr: &influxql.VarRef{ + Val: "field3", + Type: influxql.String, + }, + }, + { + Expr: &influxql.VarRef{ + Val: "field4", + Type: influxql.Boolean, + }, + }, + { + Expr: &influxql.VarRef{ + Val: "field5", + Type: influxql.AnyField, + }, + }, + { + Expr: &influxql.VarRef{ + Val: "tag1", + Type: influxql.Tag, + }, + }, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + // SELECT statement with a bound parameter + { + s: `SELECT value FROM cpu WHERE value > $value`, + params: map[string]interface{}{ + "value": int64(2), + }, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{{ + Expr: &influxql.VarRef{Val: "value"}}}, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "value"}, + RHS: &influxql.IntegerLiteral{Val: 2}, + }, + }, + }, + + // See issues https://github.com/influxdata/influxdb/issues/1647 + // and https://github.com/influxdata/influxdb/issues/4404 // DELETE statement //{ // s: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`, @@ -659,12 +1118,6 @@ // }, //}, - // SHOW SERVERS - { - s: `SHOW SERVERS`, - stmt: &influxql.ShowServersStatement{}, - }, - // SHOW GRANTS { s: `SHOW GRANTS FOR jdoe`, @@ -691,6 +1144,14 @@ }, }, + // SHOW SERIES ON db0 + { + s: `SHOW SERIES ON db0`, + stmt: &influxql.ShowSeriesStatement{ + Database: "db0", + }, + }, + // SHOW SERIES FROM // { s: `SHOW SERIES FROM /[cg]pu/`, @@ -753,6 +1214,14 @@ }, }, + // SHOW MEASUREMENTS ON db0 + { + s: `SHOW MEASUREMENTS ON db0`, + stmt: &influxql.ShowMeasurementsStatement{ + Database: "db0", + }, + }, + // SHOW MEASUREMENTS WITH MEASUREMENT = cpu { s: `SHOW MEASUREMENTS WITH MEASUREMENT = cpu`, @@ -771,11 +1240,40 @@ }, }, + // SHOW QUERIES + { + s: `SHOW QUERIES`, + stmt: &influxql.ShowQueriesStatement{}, + }, + + // KILL QUERY 4 + { + s: `KILL QUERY 4`, + stmt: &influxql.KillQueryStatement{ + QueryID: 4, + }, + }, + + // KILL QUERY 4 ON localhost + { + s: `KILL QUERY 4 ON localhost`, + stmt: &influxql.KillQueryStatement{ + QueryID: 4, + Host: "localhost", + }, + }, + // SHOW RETENTION POLICIES { - s: `SHOW RETENTION POLICIES ON mydb`, + s: `SHOW RETENTION POLICIES`, + stmt: &influxql.ShowRetentionPoliciesStatement{}, + }, + + // SHOW RETENTION POLICIES ON db0 + { + s: `SHOW RETENTION POLICIES ON db0`, stmt: &influxql.ShowRetentionPoliciesStatement{ - Database: "mydb", + Database: "db0", }, }, @@ -787,6 +1285,14 @@ }, }, + // SHOW TAG KEYS ON db0 + { + s: `SHOW TAG KEYS ON db0`, + stmt: &influxql.ShowTagKeysStatement{ + Database: "db0", + }, + }, + // SHOW TAG KEYS with LIMIT { s: `SHOW TAG KEYS FROM src LIMIT 2`, @@ -892,8 +1398,9 @@ skip: true, s: `SHOW TAG VALUES FROM src WITH KEY = region WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`, stmt: &influxql.ShowTagValuesStatement{ - Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, - TagKeys: []string{"region"}, + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Op: influxql.EQ, + TagKeyExpr: &influxql.StringLiteral{Val: "region"}, Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, @@ -912,8 +1419,9 @@ { s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE region = 'uswest'`, stmt: &influxql.ShowTagValuesStatement{ - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, - TagKeys: []string{"region", "host"}, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + Op: influxql.IN, + TagKeyExpr: &influxql.ListLiteral{Vals: []string{"region", "host"}}, Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, @@ -926,8 +1434,9 @@ { s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region,service,host)WHERE region = 'uswest'`, stmt: &influxql.ShowTagValuesStatement{ - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, - TagKeys: []string{"region", "service", "host"}, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + Op: influxql.IN, + TagKeyExpr: &influxql.ListLiteral{Vals: []string{"region", "service", "host"}}, Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, @@ -940,7 +1449,8 @@ { s: `SHOW TAG VALUES WITH KEY = host WHERE region = 'uswest'`, stmt: &influxql.ShowTagValuesStatement{ - TagKeys: []string{"host"}, + Op: influxql.EQ, + TagKeyExpr: &influxql.StringLiteral{Val: "host"}, Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, @@ -958,7 +1468,8 @@ Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)}, }, }, - TagKeys: []string{"host"}, + Op: influxql.EQ, + TagKeyExpr: &influxql.StringLiteral{Val: "host"}, }, }, @@ -966,7 +1477,8 @@ { s: `SHOW TAG VALUES WITH KEY = "host" WHERE region = 'uswest'`, stmt: &influxql.ShowTagValuesStatement{ - TagKeys: []string{`host`}, + Op: influxql.EQ, + TagKeyExpr: &influxql.StringLiteral{Val: `host`}, Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, @@ -975,6 +1487,25 @@ }, }, + // SHOW TAG VALUES WITH KEY =~ // + { + s: `SHOW TAG VALUES WITH KEY =~ /(host|region)/`, + stmt: &influxql.ShowTagValuesStatement{ + Op: influxql.EQREGEX, + TagKeyExpr: &influxql.RegexLiteral{Val: regexp.MustCompile(`(host|region)`)}, + }, + }, + + // SHOW TAG VALUES ON db0 + { + s: `SHOW TAG VALUES ON db0 WITH KEY = "host"`, + stmt: &influxql.ShowTagValuesStatement{ + Database: "db0", + Op: influxql.EQ, + TagKeyExpr: &influxql.StringLiteral{Val: "host"}, + }, + }, + // SHOW USERS { s: `SHOW USERS`, @@ -1005,6 +1536,39 @@ }, }, }, + { + s: `SHOW FIELD KEYS ON db0`, + stmt: &influxql.ShowFieldKeysStatement{ + Database: "db0", + }, + }, + + // DELETE statement + { + s: `DELETE FROM src`, + stmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}}, + }, + { + s: `DELETE WHERE host = 'hosta.influxdb.org'`, + stmt: &influxql.DeleteSeriesStatement{ + Condition: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "host"}, + RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"}, + }, + }, + }, + { + s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`, + stmt: &influxql.DeleteSeriesStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "host"}, + RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"}, + }, + }, + }, // DROP SERIES statement { @@ -1033,16 +1597,6 @@ }, }, - // DROP SERVER statement - { - s: `DROP META SERVER 123`, - stmt: &influxql.DropServerStatement{NodeID: 123, Meta: true}, - }, - { - s: `DROP DATA SERVER 123`, - stmt: &influxql.DropServerStatement{NodeID: 123, Meta: false}, - }, - // SHOW CONTINUOUS QUERIES statement { s: `SHOW CONTINUOUS QUERIES`, @@ -1228,105 +1782,61 @@ { s: `CREATE DATABASE testdb`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: false, - RetentionPolicyCreate: false, - }, - }, - { - s: `CREATE DATABASE IF NOT EXISTS testdb`, - stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: true, + Name: "testdb", RetentionPolicyCreate: false, }, }, { s: `CREATE DATABASE testdb WITH DURATION 24h`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: false, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 24 * time.Hour, - RetentionPolicyReplication: 1, - RetentionPolicyName: "default", + Name: "testdb", + RetentionPolicyCreate: true, + RetentionPolicyDuration: duration(24 * time.Hour), }, }, { - s: `CREATE DATABASE IF NOT EXISTS testdb WITH DURATION 24h`, + s: `CREATE DATABASE testdb WITH SHARD DURATION 30m`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: true, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 24 * time.Hour, - RetentionPolicyReplication: 1, - RetentionPolicyName: "default", + Name: "testdb", + RetentionPolicyCreate: true, + RetentionPolicyShardGroupDuration: 30 * time.Minute, }, }, { s: `CREATE DATABASE testdb WITH REPLICATION 2`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: false, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 0, - RetentionPolicyReplication: 2, - RetentionPolicyName: "default", - }, - }, - { - s: `CREATE DATABASE IF NOT EXISTS testdb WITH REPLICATION 2`, - stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: true, + Name: "testdb", RetentionPolicyCreate: true, - RetentionPolicyDuration: 0, - RetentionPolicyReplication: 2, - RetentionPolicyName: "default", + RetentionPolicyReplication: intptr(2), }, }, { s: `CREATE DATABASE testdb WITH NAME test_name`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: false, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 0, - RetentionPolicyReplication: 1, - RetentionPolicyName: "test_name", - }, - }, - { - s: `CREATE DATABASE IF NOT EXISTS testdb WITH NAME test_name`, - stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: true, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 0, - RetentionPolicyReplication: 1, - RetentionPolicyName: "test_name", + Name: "testdb", + RetentionPolicyCreate: true, + RetentionPolicyName: "test_name", }, }, { s: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 NAME test_name`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: false, + Name: "testdb", RetentionPolicyCreate: true, - RetentionPolicyDuration: 24 * time.Hour, - RetentionPolicyReplication: 2, + RetentionPolicyDuration: duration(24 * time.Hour), + RetentionPolicyReplication: intptr(2), RetentionPolicyName: "test_name", }, }, { - s: `CREATE DATABASE IF NOT EXISTS testdb WITH DURATION 24h REPLICATION 2 NAME test_name`, + s: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 SHARD DURATION 10m NAME test_name `, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", - IfNotExists: true, - RetentionPolicyCreate: true, - RetentionPolicyDuration: 24 * time.Hour, - RetentionPolicyReplication: 2, - RetentionPolicyName: "test_name", + Name: "testdb", + RetentionPolicyCreate: true, + RetentionPolicyDuration: duration(24 * time.Hour), + RetentionPolicyReplication: intptr(2), + RetentionPolicyName: "test_name", + RetentionPolicyShardGroupDuration: 10 * time.Minute, }, }, @@ -1368,15 +1878,7 @@ { s: `DROP DATABASE testdb`, stmt: &influxql.DropDatabaseStatement{ - Name: "testdb", - IfExists: false, - }, - }, - { - s: `DROP DATABASE IF EXISTS testdb`, - stmt: &influxql.DropDatabaseStatement{ - Name: "testdb", - IfExists: true, + Name: "testdb", }, }, @@ -1546,46 +2048,67 @@ Default: true, }, }, + // CREATE RETENTION POLICY + { + s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 30m`, + stmt: &influxql.CreateRetentionPolicyStatement{ + Name: "policy1", + Database: "testdb", + Duration: time.Hour, + Replication: 2, + ShardGroupDuration: 30 * time.Minute, + }, + }, // ALTER RETENTION POLICY { s: `ALTER RETENTION POLICY policy1 ON testdb DURATION 1m REPLICATION 4 DEFAULT`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, -1, 4, true), }, // ALTER RETENTION POLICY with options in reverse order { s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION 1m`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, -1, 4, true), }, // ALTER RETENTION POLICY with infinite retention { s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION INF`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", 0, 4, true), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", 0, -1, 4, true), }, // ALTER RETENTION POLICY without optional DURATION { s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, true), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, 4, true), }, // ALTER RETENTION POLICY without optional REPLICATION { s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, true), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, -1, true), }, // ALTER RETENTION POLICY without optional DEFAULT { s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4`, - stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, false), + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, 4, false), }, // ALTER default retention policy unquoted { s: `ALTER RETENTION POLICY default ON testdb REPLICATION 4`, - stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, 4, false), + stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, -1, 4, false), + }, + // ALTER RETENTION POLICY with SHARD duration + { + s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4 SHARD DURATION 10m`, + stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 10*time.Minute, 4, false), + }, + // ALTER RETENTION POLICY with all options + { + s: `ALTER RETENTION POLICY default ON testdb DURATION 0s REPLICATION 4 SHARD DURATION 10m DEFAULT`, + stmt: newAlterRetentionPolicyStatement("default", "testdb", time.Duration(0), 10*time.Minute, 4, true), }, // SHOW STATS @@ -1655,32 +2178,51 @@ }, // Errors - {s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`}, + {s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`}, {s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`}, {s: `SELECT time FROM myseries`, err: `at least 1 non-time field must be queried`}, - {s: `blah blah`, err: `found blah, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`}, + {s: `blah blah`, err: `found blah, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`}, {s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`}, {s: `SELECT field1 FROM "series" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`}, {s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`}, - {s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`}, - {s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`}, + {s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected integer at line 1, char 35`}, + {s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `found 10.5, expected integer at line 1, char 35`}, + {s: `SELECT count(max(value)) FROM myseries`, err: `expected field argument in count()`}, + {s: `SELECT count(distinct('value')) FROM myseries`, err: `expected field argument in distinct()`}, + {s: `SELECT distinct('value') FROM myseries`, err: `expected field argument in distinct()`}, + {s: `SELECT min(max(value)) FROM myseries`, err: `expected field argument in min()`}, + {s: `SELECT min(distinct(value)) FROM myseries`, err: `expected field argument in min()`}, + {s: `SELECT max(max(value)) FROM myseries`, err: `expected field argument in max()`}, + {s: `SELECT sum(max(value)) FROM myseries`, err: `expected field argument in sum()`}, + {s: `SELECT first(max(value)) FROM myseries`, err: `expected field argument in first()`}, + {s: `SELECT last(max(value)) FROM myseries`, err: `expected field argument in last()`}, + {s: `SELECT mean(max(value)) FROM myseries`, err: `expected field argument in mean()`}, + {s: `SELECT median(max(value)) FROM myseries`, err: `expected field argument in median()`}, + {s: `SELECT mode(max(value)) FROM myseries`, err: `expected field argument in mode()`}, + {s: `SELECT stddev(max(value)) FROM myseries`, err: `expected field argument in stddev()`}, + {s: `SELECT spread(max(value)) FROM myseries`, err: `expected field argument in spread()`}, {s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`}, {s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`}, {s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, {s: `SELECT top(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, - {s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5.000`}, + {s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5`}, {s: `SELECT top(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`}, + {s: `SELECT top(value, 10) + count(value) FROM myseries`, err: `cannot use top() inside of a binary expression`}, + {s: `SELECT top(max(value), 10) FROM myseries`, err: `only fields or tags are allowed in top(), found max(value)`}, {s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`}, {s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, {s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, {s: `SELECT bottom(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, - {s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5.000`}, + {s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5`}, {s: `SELECT bottom(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`}, + {s: `SELECT bottom(value, 10) + count(value) FROM myseries`, err: `cannot use bottom() inside of a binary expression`}, + {s: `SELECT bottom(max(value), 10) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(value)`}, {s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`}, {s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`}, {s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`}, - {s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`}, - {s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`}, + {s: `SELECT percentile(max(field1), 75) FROM myseries`, err: `expected field argument in percentile()`}, + {s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected integer at line 1, char 36`}, + {s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `found 10.5, expected integer at line 1, char 36`}, {s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`}, {s: `SELECT field1 FROM myseries ORDER BY`, err: `found EOF, expected identifier, ASC, DESC at line 1, char 38`}, {s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`}, @@ -1692,14 +2234,16 @@ {s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT count(value)/10, value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT count(value) FROM foo group by time(1s)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, + {s: `SELECT count(value) FROM foo group by time(500ms)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, {s: `SELECT count(value) FROM foo group by time(1s) where host = 'hosta.influxdb.org'`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, {s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`}, {s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected one argument`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have one duration argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected 1 or 2 arguments`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have duration argument`}, {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, b)`, err: `time dimension offset must be duration or now()`}, {s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`}, - {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`}, + {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`}, {s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`}, {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, @@ -1719,6 +2263,7 @@ {s: `SELECT derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, {s: `SELECT derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, {s: `SELECT derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`}, {s: `SELECT non_negative_derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`}, {s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`}, @@ -1726,33 +2271,74 @@ {s: `SELECT non_negative_derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, {s: `SELECT non_negative_derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, {s: `SELECT non_negative_derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT non_negative_derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_derivative aggregate requires a GROUP BY interval`}, {s: `SELECT non_negative_derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT difference(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT difference() from myseries`, err: `invalid number of arguments for difference, expected 1, got 0`}, + {s: `SELECT difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to difference`}, + {s: `SELECT difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `difference aggregate requires a GROUP BY interval`}, + {s: `SELECT moving_average(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT moving_average() from myseries`, err: `invalid number of arguments for moving_average, expected 2, got 0`}, + {s: `SELECT moving_average(value) FROM myseries`, err: `invalid number of arguments for moving_average, expected 2, got 1`}, + {s: `SELECT moving_average(value, 2) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to moving_average`}, + {s: `SELECT moving_average(top(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT moving_average(bottom(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT moving_average(max(), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT moving_average(percentile(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT moving_average(mean(value), 2) FROM myseries where time < now() and time > now() - 1d`, err: `moving_average aggregate requires a GROUP BY interval`}, + {s: `SELECT cumulative_sum(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT cumulative_sum() from myseries`, err: `invalid number of arguments for cumulative_sum, expected 1, got 0`}, + {s: `SELECT cumulative_sum(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to cumulative_sum`}, + {s: `SELECT cumulative_sum(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT cumulative_sum(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT cumulative_sum(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT cumulative_sum(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT cumulative_sum(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `cumulative_sum aggregate requires a GROUP BY interval`}, + {s: `SELECT holt_winters(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters, expected 3, got 1`}, + {s: `SELECT holt_winters(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters`}, + {s: `SELECT holt_winters(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters aggregate requires a GROUP BY interval`}, + {s: `SELECT holt_winters(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters must be greater than 0, got 0`}, + {s: `SELECT holt_winters(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters`}, + {s: `SELECT holt_winters(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters`}, {s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`}, {s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, - // See issues https://github.com/influxdb/influxdb/issues/1647 - // and https://github.com/influxdb/influxdb/issues/4404 + {s: `SELECT mean(value) + value FROM cpu WHERE time < now() and time > now() - 1h GROUP BY time(10m)`, err: `binary expressions cannot mix aggregates and raw fields`}, + // TODO: Remove this restriction in the future: https://github.com/influxdata/influxdb/issues/5968 + {s: `SELECT mean(cpu_total - cpu_idle) FROM cpu`, err: `expected field argument in mean()`}, + {s: `SELECT derivative(mean(cpu_total - cpu_idle), 1s) FROM cpu WHERE time < now() AND time > now() - 1d GROUP BY time(1h)`, err: `expected field argument in mean()`}, + // TODO: The error message will change when math is allowed inside an aggregate: https://github.com/influxdata/influxdb/pull/5990#issuecomment-195565870 + {s: `SELECT count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`}, + {s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`}, + {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`}, + {s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`}, + // See issues https://github.com/influxdata/influxdb/issues/1647 + // and https://github.com/influxdata/influxdb/issues/4404 //{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`}, //{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`}, //{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`}, - {s: `DELETE`, err: `DELETE FROM is currently not supported. Use DROP SERIES or DROP MEASUREMENT instead`}, - {s: `DELETE FROM`, err: `DELETE FROM is currently not supported. Use DROP SERIES or DROP MEASUREMENT instead`}, - {s: `DELETE FROM myseries WHERE`, err: `DELETE FROM is currently not supported. Use DROP SERIES or DROP MEASUREMENT instead`}, + {s: `DELETE`, err: `found EOF, expected FROM, WHERE at line 1, char 8`}, + {s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`}, + {s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`}, + {s: `DELETE FROM "foo".myseries`, err: `retention policy not supported at line 1, char 1`}, + {s: `DELETE FROM foo..myseries`, err: `database not supported at line 1, char 1`}, {s: `DROP MEASUREMENT`, err: `found EOF, expected identifier at line 1, char 18`}, {s: `DROP SERIES`, err: `found EOF, expected FROM, WHERE at line 1, char 13`}, {s: `DROP SERIES FROM`, err: `found EOF, expected identifier at line 1, char 18`}, {s: `DROP SERIES FROM src WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`}, - {s: `DROP META SERVER`, err: `found EOF, expected number at line 1, char 18`}, - {s: `DROP DATA SERVER abc`, err: `found abc, expected number at line 1, char 18`}, + {s: `DROP SERIES FROM "foo".myseries`, err: `retention policy not supported at line 1, char 1`}, + {s: `DROP SERIES FROM foo..myseries`, err: `database not supported at line 1, char 1`}, {s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`}, {s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`}, {s: `SHOW RETENTION ON`, err: `found ON, expected POLICIES at line 1, char 16`}, - {s: `SHOW RETENTION POLICIES`, err: `found EOF, expected ON at line 1, char 25`}, - {s: `SHOW RETENTION POLICIES mydb`, err: `found mydb, expected ON at line 1, char 25`}, {s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`}, {s: `SHOW SHARD`, err: `found EOF, expected GROUPS at line 1, char 12`}, - {s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, RETENTION, SERIES, SERVERS, SHARD, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6`}, + {s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, QUERIES, RETENTION, SERIES, SHARD, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6`}, {s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`}, {s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`}, {s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`}, @@ -1765,23 +2351,15 @@ {s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`}, {s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(10s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`}, {s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(5s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`}, - {s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT, SERVER, SUBSCRIPTION at line 1, char 6`}, + {s: `DROP FOO`, err: `found FOO, expected CONTINUOUS, MEASUREMENT, RETENTION, SERIES, SHARD, SUBSCRIPTION, USER at line 1, char 6`}, {s: `CREATE FOO`, err: `found FOO, expected CONTINUOUS, DATABASE, USER, RETENTION, SUBSCRIPTION at line 1, char 8`}, {s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`}, - {s: `CREATE DATABASE "testdb" WITH`, err: `found EOF, expected DURATION, REPLICATION, NAME at line 1, char 31`}, + {s: `CREATE DATABASE "testdb" WITH`, err: `found EOF, expected DURATION, NAME, REPLICATION, SHARD at line 1, char 31`}, {s: `CREATE DATABASE "testdb" WITH DURATION`, err: `found EOF, expected duration at line 1, char 40`}, - {s: `CREATE DATABASE "testdb" WITH REPLICATION`, err: `found EOF, expected number at line 1, char 43`}, + {s: `CREATE DATABASE "testdb" WITH REPLICATION`, err: `found EOF, expected integer at line 1, char 43`}, {s: `CREATE DATABASE "testdb" WITH NAME`, err: `found EOF, expected identifier at line 1, char 36`}, - {s: `CREATE DATABASE IF`, err: `found EOF, expected NOT at line 1, char 20`}, - {s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`}, - {s: `CREATE DATABASE IF NOT EXISTS`, err: `found EOF, expected identifier at line 1, char 31`}, - {s: `CREATE DATABASE IF NOT EXISTS "testdb" WITH`, err: `found EOF, expected DURATION, REPLICATION, NAME at line 1, char 45`}, - {s: `CREATE DATABASE IF NOT EXISTS "testdb" WITH DURATION`, err: `found EOF, expected duration at line 1, char 54`}, - {s: `CREATE DATABASE IF NOT EXISTS "testdb" WITH REPLICATION`, err: `found EOF, expected number at line 1, char 57`}, - {s: `CREATE DATABASE IF NOT EXISTS "testdb" WITH NAME`, err: `found EOF, expected identifier at line 1, char 50`}, + {s: `CREATE DATABASE "testdb" WITH SHARD`, err: `found EOF, expected DURATION at line 1, char 37`}, {s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`}, - {s: `DROP DATABASE IF`, err: `found EOF, expected EXISTS at line 1, char 18`}, - {s: `DROP DATABASE IF EXISTS`, err: `found EOF, expected identifier at line 1, char 25`}, {s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`}, {s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`}, {s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 31`}, @@ -1837,6 +2415,9 @@ {s: `GRANT ALL PRIVILEGES ON testdb TO`, err: `found EOF, expected identifier at line 1, char 35`}, {s: `GRANT ALL TO`, err: `found EOF, expected identifier at line 1, char 14`}, {s: `GRANT ALL PRIVILEGES TO`, err: `found EOF, expected identifier at line 1, char 25`}, + {s: `KILL`, err: `found EOF, expected QUERY at line 1, char 6`}, + {s: `KILL QUERY 10s`, err: `found 10s, expected integer at line 1, char 12`}, + {s: `KILL QUERY 4 ON 'host'`, err: `found host, expected identifier at line 1, char 16`}, {s: `REVOKE`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`}, {s: `REVOKE BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`}, {s: `REVOKE READ`, err: `found EOF, expected ON at line 1, char 13`}, @@ -1877,16 +2458,17 @@ {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION`, err: `found EOF, expected duration at line 1, char 52`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION bad`, err: `found bad, expected duration at line 1, char 52`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h`, err: `found EOF, expected REPLICATION at line 1, char 54`}, - {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected number at line 1, char 67`}, - {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `number must be an integer at line 1, char 67`}, + {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected integer at line 1, char 67`}, + {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `found 3.14, expected integer at line 1, char 67`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`}, - {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`}, - {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 1 foo`, err: `found foo, expected DEFAULT at line 1, char 69`}, + {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected integer at line 1, char 67`}, {s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`}, {s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`}, {s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`}, {s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`}, - {s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`}, + {s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, REPLICATION, SHARD, DEFAULT at line 1, char 42`}, + {s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 1 REPLICATION 2`, err: `found duplicate REPLICATION option at line 1, char 56`}, + {s: `ALTER RETENTION POLICY policy1 ON testdb DURATION 15251w`, err: `overflowed duration 15251w: choose a smaller duration or INF at line 1, char 51`}, {s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`}, {s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`}, {s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`}, @@ -1894,14 +2476,19 @@ {s: `SET PASSWORD FOR dejan`, err: `found EOF, expected = at line 1, char 24`}, {s: `SET PASSWORD FOR dejan =`, err: `found EOF, expected string at line 1, char 25`}, {s: `SET PASSWORD FOR dejan = bla`, err: `found bla, expected string at line 1, char 26`}, + {s: `$SHOW$DATABASES`, err: `found $SHOW, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`}, + {s: `SELECT * FROM cpu WHERE "tagkey" = $$`, err: `empty bound parameter`}, } for i, tt := range tests { if tt.skip { - t.Logf("skipping test of '%s'", tt.s) continue } - stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement() + p := influxql.NewParser(strings.NewReader(tt.s)) + if tt.params != nil { + p.SetParams(tt.params) + } + stmt, err := p.ParseStatement() // We are memoizing a field so for testing we need to... if s, ok := tt.stmt.(*influxql.SelectStatement); ok { @@ -1914,10 +2501,28 @@ if !reflect.DeepEqual(tt.err, errstring(err)) { t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err) - } else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) { - t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) - t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) - t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) + } else if tt.err == "" { + if !reflect.DeepEqual(tt.stmt, stmt) { + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) + t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) + } else { + // Attempt to reparse the statement as a string and confirm it parses the same. + // Skip this if we have some kind of statement with a password since those will never be reparsed. + switch stmt.(type) { + case *influxql.CreateUserStatement, *influxql.SetPasswordUserStatement: + continue + } + + stmt2, err := influxql.ParseStatement(stmt.String()) + if err != nil { + t.Errorf("%d. %q: unable to parse statement string: %s", i, stmt.String(), err) + } else if !reflect.DeepEqual(tt.stmt, stmt2) { + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt2)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt2.String()) + t.Errorf("%d. %q\n\nstmt reparse mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt2) + } + } } } } @@ -1930,24 +2535,22 @@ err string }{ // Primitives - {s: `100`, expr: &influxql.NumberLiteral{Val: 100}}, + {s: `100.0`, expr: &influxql.NumberLiteral{Val: 100}}, + {s: `100`, expr: &influxql.IntegerLiteral{Val: 100}}, {s: `'foo bar'`, expr: &influxql.StringLiteral{Val: "foo bar"}}, {s: `true`, expr: &influxql.BooleanLiteral{Val: true}}, {s: `false`, expr: &influxql.BooleanLiteral{Val: false}}, {s: `my_ident`, expr: &influxql.VarRef{Val: "my_ident"}}, - {s: `'2000-01-01 00:00:00'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00Z")}}, - {s: `'2000-01-01 00:00:00.232'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00.232Z")}}, - {s: `'2000-01-32 00:00:00'`, err: `unable to parse datetime at line 1, char 1`}, - {s: `'2000-01-01'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00Z")}}, - {s: `'2000-01-99'`, err: `unable to parse date at line 1, char 1`}, + {s: `'2000-01-01 00:00:00'`, expr: &influxql.StringLiteral{Val: "2000-01-01 00:00:00"}}, + {s: `'2000-01-01'`, expr: &influxql.StringLiteral{Val: "2000-01-01"}}, // Simple binary expression { s: `1 + 2`, expr: &influxql.BinaryExpr{ Op: influxql.ADD, - LHS: &influxql.NumberLiteral{Val: 1}, - RHS: &influxql.NumberLiteral{Val: 2}, + LHS: &influxql.IntegerLiteral{Val: 1}, + RHS: &influxql.IntegerLiteral{Val: 2}, }, }, @@ -1958,10 +2561,10 @@ Op: influxql.ADD, LHS: &influxql.BinaryExpr{ Op: influxql.MUL, - LHS: &influxql.NumberLiteral{Val: 1}, - RHS: &influxql.NumberLiteral{Val: 2}, + LHS: &influxql.IntegerLiteral{Val: 1}, + RHS: &influxql.IntegerLiteral{Val: 2}, }, - RHS: &influxql.NumberLiteral{Val: 3}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, }, @@ -1970,11 +2573,11 @@ s: `1 + 2 * 3`, expr: &influxql.BinaryExpr{ Op: influxql.ADD, - LHS: &influxql.NumberLiteral{Val: 1}, + LHS: &influxql.IntegerLiteral{Val: 1}, RHS: &influxql.BinaryExpr{ Op: influxql.MUL, - LHS: &influxql.NumberLiteral{Val: 2}, - RHS: &influxql.NumberLiteral{Val: 3}, + LHS: &influxql.IntegerLiteral{Val: 2}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, }, }, @@ -1987,11 +2590,11 @@ LHS: &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.ADD, - LHS: &influxql.NumberLiteral{Val: 1}, - RHS: &influxql.NumberLiteral{Val: 2}, + LHS: &influxql.IntegerLiteral{Val: 1}, + RHS: &influxql.IntegerLiteral{Val: 2}, }, }, - RHS: &influxql.NumberLiteral{Val: 3}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, }, @@ -2002,10 +2605,10 @@ Op: influxql.MUL, LHS: &influxql.BinaryExpr{ Op: influxql.MUL, - LHS: &influxql.NumberLiteral{Val: 1}, - RHS: &influxql.NumberLiteral{Val: 2}, + LHS: &influxql.IntegerLiteral{Val: 1}, + RHS: &influxql.IntegerLiteral{Val: 2}, }, - RHS: &influxql.NumberLiteral{Val: 3}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, }, @@ -2041,14 +2644,14 @@ LHS: &influxql.BinaryExpr{ Op: influxql.ADD, LHS: &influxql.VarRef{Val: "value"}, - RHS: &influxql.NumberLiteral{Val: 3}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, - RHS: &influxql.NumberLiteral{Val: 30}, + RHS: &influxql.IntegerLiteral{Val: 30}, }, RHS: &influxql.BinaryExpr{ Op: influxql.ADD, - LHS: &influxql.NumberLiteral{Val: 1}, - RHS: &influxql.NumberLiteral{Val: 2}, + LHS: &influxql.IntegerLiteral{Val: 1}, + RHS: &influxql.IntegerLiteral{Val: 2}, }, }, RHS: &influxql.BooleanLiteral{Val: true}, @@ -2095,11 +2698,11 @@ expr: &influxql.Call{ Name: "my_func", Args: []influxql.Expr{ - &influxql.NumberLiteral{Val: 1}, + &influxql.IntegerLiteral{Val: 1}, &influxql.BinaryExpr{ Op: influxql.ADD, - LHS: &influxql.NumberLiteral{Val: 2}, - RHS: &influxql.NumberLiteral{Val: 3}, + LHS: &influxql.IntegerLiteral{Val: 2}, + RHS: &influxql.IntegerLiteral{Val: 3}, }, }, }, @@ -2131,6 +2734,10 @@ {s: `2h`, d: 2 * time.Hour}, {s: `2d`, d: 2 * 24 * time.Hour}, {s: `2w`, d: 2 * 7 * 24 * time.Hour}, + {s: `1h30m`, d: time.Hour + 30*time.Minute}, + {s: `30ms3000u`, d: 30*time.Millisecond + 3000*time.Microsecond}, + {s: `-5s`, d: -5 * time.Second}, + {s: `-5m30s`, d: -5*time.Minute - 30*time.Second}, {s: ``, err: "invalid duration"}, {s: `3`, err: "invalid duration"}, @@ -2199,7 +2806,7 @@ ident []string s string }{ - {[]string{``}, ``}, + {[]string{``}, `""`}, {[]string{`select`}, `"select"`}, {[]string{`in-bytes`}, `"in-bytes"`}, {[]string{`foo`, `bar`}, `"foo".bar`}, @@ -2215,6 +2822,58 @@ } } +// Ensure DeleteSeriesStatement can convert to a string +func TestDeleteSeriesStatement_String(t *testing.T) { + var tests = []struct { + s string + stmt influxql.Statement + }{ + { + s: `DELETE FROM src`, + stmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}}, + }, + { + s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`, + stmt: &influxql.DeleteSeriesStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "host"}, + RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"}, + }, + }, + }, + { + s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`, + stmt: &influxql.DeleteSeriesStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "host"}, + RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"}, + }, + }, + }, + { + s: `DELETE WHERE host = 'hosta.influxdb.org'`, + stmt: &influxql.DeleteSeriesStatement{ + Condition: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "host"}, + RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"}, + }, + }, + }, + } + + for _, test := range tests { + s := test.stmt.String() + if s != test.s { + t.Errorf("error rendering string. expected %s, actual: %s", test.s, s) + } + } +} + // Ensure DropSeriesStatement can convert to a string func TestDropSeriesStatement_String(t *testing.T) { var tests = []struct { @@ -2269,7 +2928,7 @@ func BenchmarkParserParseStatement(b *testing.B) { b.ReportAllocs() - s := `SELECT field FROM "series" WHERE value > 10` + s := `SELECT "field" FROM "series" WHERE value > 10` for i := 0; i < b.N; i++ { if stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement(); err != nil { b.Fatalf("unexpected error: %s", err) @@ -2283,14 +2942,18 @@ // MustParseSelectStatement parses a select statement. Panic on error. func MustParseSelectStatement(s string) *influxql.SelectStatement { stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement() - panicIfErr(err) + if err != nil { + panic(err) + } return stmt.(*influxql.SelectStatement) } // MustParseExpr parses an expression. Panic on error. func MustParseExpr(s string) influxql.Expr { expr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr() - panicIfErr(err) + if err != nil { + panic(err) + } return expr } @@ -2303,7 +2966,7 @@ } // newAlterRetentionPolicyStatement creates an initialized AlterRetentionPolicyStatement. -func newAlterRetentionPolicyStatement(name string, DB string, d time.Duration, replication int, dfault bool) *influxql.AlterRetentionPolicyStatement { +func newAlterRetentionPolicyStatement(name string, DB string, d, sd time.Duration, replication int, dfault bool) *influxql.AlterRetentionPolicyStatement { stmt := &influxql.AlterRetentionPolicyStatement{ Name: name, Database: DB, @@ -2314,6 +2977,10 @@ stmt.Duration = &d } + if sd > -1 { + stmt.ShardGroupDuration = &sd + } + if replication > -1 { stmt.Replication = &replication } @@ -2323,19 +2990,25 @@ // mustMarshalJSON encodes a value to JSON. func mustMarshalJSON(v interface{}) []byte { - b, err := json.Marshal(v) - panicIfErr(err) + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + panic(err) + } return b } func mustParseDuration(s string) time.Duration { d, err := influxql.ParseDuration(s) - panicIfErr(err) - return d -} - -func panicIfErr(err error) { if err != nil { panic(err) } + return d +} + +func duration(v time.Duration) *time.Duration { + return &v +} + +func intptr(v int) *int { + return &v } diff -Nru influxdb-0.10.0+dfsg1/influxql/point.gen.go influxdb-1.1.1+dfsg1/influxql/point.gen.go --- influxdb-0.10.0+dfsg1/influxql/point.gen.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/point.gen.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,823 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: point.gen.go.tmpl + +package influxql + +import ( + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// FloatPoint represents a point with a float64 value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type FloatPoint struct { + Name string + Tags Tags + + Time int64 + Nil bool + Value float64 + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 +} + +func (v *FloatPoint) name() string { return v.Name } +func (v *FloatPoint) tags() Tags { return v.Tags } +func (v *FloatPoint) time() int64 { return v.Time } +func (v *FloatPoint) nil() bool { return v.Nil } +func (v *FloatPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *FloatPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *FloatPoint) Clone() *FloatPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +func encodeFloatPoint(p *FloatPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + FloatValue: proto.Float64(p.Value), + } +} + +func decodeFloatPoint(pb *internal.Point) *FloatPoint { + return &FloatPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetFloatValue(), + } +} + +// floatPoints represents a slice of points sortable by value. +type floatPoints []FloatPoint + +func (a floatPoints) Len() int { return len(a) } +func (a floatPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointsByValue represents a slice of points sortable by value. +type floatPointsByValue []FloatPoint + +func (a floatPointsByValue) Len() int { return len(a) } + +func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointsByTime represents a slice of points sortable by value. +type floatPointsByTime []FloatPoint + +func (a floatPointsByTime) Len() int { return len(a) } +func (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a floatPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointByFunc represents a slice of points sortable by a function. +type floatPointsByFunc struct { + points []FloatPoint + cmp func(a, b *FloatPoint) bool +} + +func (a *floatPointsByFunc) Len() int { return len(a.points) } +func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *floatPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(FloatPoint)) +} + +func (a *floatPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc { + return &floatPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// FloatPointEncoder encodes FloatPoint points to a writer. +type FloatPointEncoder struct { + w io.Writer +} + +// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w. +func NewFloatPointEncoder(w io.Writer) *FloatPointEncoder { + return &FloatPointEncoder{w: w} +} + +// EncodeFloatPoint marshals and writes p to the underlying writer. +func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// FloatPointDecoder decodes FloatPoint points from a reader. +type FloatPointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r. +func NewFloatPointDecoder(r io.Reader) *FloatPointDecoder { + return &FloatPointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeFloatPoint reads from the underlying reader and unmarshals into p. +func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + // Decode into point object. + *p = *decodeFloatPoint(&pb) + + return nil + } +} + +// IntegerPoint represents a point with a int64 value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type IntegerPoint struct { + Name string + Tags Tags + + Time int64 + Nil bool + Value int64 + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 +} + +func (v *IntegerPoint) name() string { return v.Name } +func (v *IntegerPoint) tags() Tags { return v.Tags } +func (v *IntegerPoint) time() int64 { return v.Time } +func (v *IntegerPoint) nil() bool { return v.Nil } +func (v *IntegerPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *IntegerPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *IntegerPoint) Clone() *IntegerPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +func encodeIntegerPoint(p *IntegerPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + IntegerValue: proto.Int64(p.Value), + } +} + +func decodeIntegerPoint(pb *internal.Point) *IntegerPoint { + return &IntegerPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetIntegerValue(), + } +} + +// integerPoints represents a slice of points sortable by value. +type integerPoints []IntegerPoint + +func (a integerPoints) Len() int { return len(a) } +func (a integerPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointsByValue represents a slice of points sortable by value. +type integerPointsByValue []IntegerPoint + +func (a integerPointsByValue) Len() int { return len(a) } + +func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointsByTime represents a slice of points sortable by value. +type integerPointsByTime []IntegerPoint + +func (a integerPointsByTime) Len() int { return len(a) } +func (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a integerPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointByFunc represents a slice of points sortable by a function. +type integerPointsByFunc struct { + points []IntegerPoint + cmp func(a, b *IntegerPoint) bool +} + +func (a *integerPointsByFunc) Len() int { return len(a.points) } +func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *integerPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(IntegerPoint)) +} + +func (a *integerPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc { + return &integerPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// IntegerPointEncoder encodes IntegerPoint points to a writer. +type IntegerPointEncoder struct { + w io.Writer +} + +// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w. +func NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder { + return &IntegerPointEncoder{w: w} +} + +// EncodeIntegerPoint marshals and writes p to the underlying writer. +func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// IntegerPointDecoder decodes IntegerPoint points from a reader. +type IntegerPointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r. +func NewIntegerPointDecoder(r io.Reader) *IntegerPointDecoder { + return &IntegerPointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeIntegerPoint reads from the underlying reader and unmarshals into p. +func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + // Decode into point object. + *p = *decodeIntegerPoint(&pb) + + return nil + } +} + +// StringPoint represents a point with a string value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type StringPoint struct { + Name string + Tags Tags + + Time int64 + Nil bool + Value string + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 +} + +func (v *StringPoint) name() string { return v.Name } +func (v *StringPoint) tags() Tags { return v.Tags } +func (v *StringPoint) time() int64 { return v.Time } +func (v *StringPoint) nil() bool { return v.Nil } +func (v *StringPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *StringPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *StringPoint) Clone() *StringPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +func encodeStringPoint(p *StringPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + StringValue: proto.String(p.Value), + } +} + +func decodeStringPoint(pb *internal.Point) *StringPoint { + return &StringPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetStringValue(), + } +} + +// stringPoints represents a slice of points sortable by value. +type stringPoints []StringPoint + +func (a stringPoints) Len() int { return len(a) } +func (a stringPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointsByValue represents a slice of points sortable by value. +type stringPointsByValue []StringPoint + +func (a stringPointsByValue) Len() int { return len(a) } + +func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointsByTime represents a slice of points sortable by value. +type stringPointsByTime []StringPoint + +func (a stringPointsByTime) Len() int { return len(a) } +func (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a stringPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointByFunc represents a slice of points sortable by a function. +type stringPointsByFunc struct { + points []StringPoint + cmp func(a, b *StringPoint) bool +} + +func (a *stringPointsByFunc) Len() int { return len(a.points) } +func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *stringPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(StringPoint)) +} + +func (a *stringPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc { + return &stringPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// StringPointEncoder encodes StringPoint points to a writer. +type StringPointEncoder struct { + w io.Writer +} + +// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w. +func NewStringPointEncoder(w io.Writer) *StringPointEncoder { + return &StringPointEncoder{w: w} +} + +// EncodeStringPoint marshals and writes p to the underlying writer. +func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// StringPointDecoder decodes StringPoint points from a reader. +type StringPointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r. +func NewStringPointDecoder(r io.Reader) *StringPointDecoder { + return &StringPointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeStringPoint reads from the underlying reader and unmarshals into p. +func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + // Decode into point object. + *p = *decodeStringPoint(&pb) + + return nil + } +} + +// BooleanPoint represents a point with a bool value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type BooleanPoint struct { + Name string + Tags Tags + + Time int64 + Nil bool + Value bool + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 +} + +func (v *BooleanPoint) name() string { return v.Name } +func (v *BooleanPoint) tags() Tags { return v.Tags } +func (v *BooleanPoint) time() int64 { return v.Time } +func (v *BooleanPoint) nil() bool { return v.Nil } +func (v *BooleanPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *BooleanPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *BooleanPoint) Clone() *BooleanPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +func encodeBooleanPoint(p *BooleanPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + BooleanValue: proto.Bool(p.Value), + } +} + +func decodeBooleanPoint(pb *internal.Point) *BooleanPoint { + return &BooleanPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetBooleanValue(), + } +} + +// booleanPoints represents a slice of points sortable by value. +type booleanPoints []BooleanPoint + +func (a booleanPoints) Len() int { return len(a) } +func (a booleanPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return !a[i].Value +} +func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointsByValue represents a slice of points sortable by value. +type booleanPointsByValue []BooleanPoint + +func (a booleanPointsByValue) Len() int { return len(a) } + +func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value } + +func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointsByTime represents a slice of points sortable by value. +type booleanPointsByTime []BooleanPoint + +func (a booleanPointsByTime) Len() int { return len(a) } +func (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a booleanPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointByFunc represents a slice of points sortable by a function. +type booleanPointsByFunc struct { + points []BooleanPoint + cmp func(a, b *BooleanPoint) bool +} + +func (a *booleanPointsByFunc) Len() int { return len(a.points) } +func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *booleanPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(BooleanPoint)) +} + +func (a *booleanPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc { + return &booleanPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// BooleanPointEncoder encodes BooleanPoint points to a writer. +type BooleanPointEncoder struct { + w io.Writer +} + +// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w. +func NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder { + return &BooleanPointEncoder{w: w} +} + +// EncodeBooleanPoint marshals and writes p to the underlying writer. +func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// BooleanPointDecoder decodes BooleanPoint points from a reader. +type BooleanPointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r. +func NewBooleanPointDecoder(r io.Reader) *BooleanPointDecoder { + return &BooleanPointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeBooleanPoint reads from the underlying reader and unmarshals into p. +func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + // Decode into point object. + *p = *decodeBooleanPoint(&pb) + + return nil + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/point.gen.go.tmpl influxdb-1.1.1+dfsg1/influxql/point.gen.go.tmpl --- influxdb-0.10.0+dfsg1/influxql/point.gen.go.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/point.gen.go.tmpl 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,226 @@ +package influxql + +import ( + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +{{range .}} + +// {{.Name}}Point represents a point with a {{.Type}} value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type {{.Name}}Point struct { + Name string + Tags Tags + + Time int64 + Nil bool + Value {{.Type}} + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 +} + +func (v *{{.Name}}Point) name() string { return v.Name } +func (v *{{.Name}}Point) tags() Tags { return v.Tags } +func (v *{{.Name}}Point) time() int64 { return v.Time } +func (v *{{.Name}}Point) nil() bool { return v.Nil } +func (v *{{.Name}}Point) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *{{.Name}}Point) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *{{.Name}}Point) Clone() *{{.Name}}Point { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +func encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + {{if eq .Name "Float"}} + FloatValue: proto.Float64(p.Value), + {{else if eq .Name "Integer"}} + IntegerValue: proto.Int64(p.Value), + {{else if eq .Name "String"}} + StringValue: proto.String(p.Value), + {{else if eq .Name "Boolean"}} + BooleanValue: proto.Bool(p.Value), + {{end}} + } +} + +func decode{{.Name}}Point(pb *internal.Point) *{{.Name}}Point { + return &{{.Name}}Point{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.Get{{.Name}}Value(), + } +} + +// {{.name}}Points represents a slice of points sortable by value. +type {{.name}}Points []{{.Name}}Point + +func (a {{.name}}Points) Len() int { return len(a) } +func (a {{.name}}Points) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return {{if ne .Name "Boolean"}}a[i].Value < a[j].Value{{else}}!a[i].Value{{end}} +} +func (a {{.name}}Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointsByValue represents a slice of points sortable by value. +type {{.name}}PointsByValue []{{.Name}}Point + +func (a {{.name}}PointsByValue) Len() int { return len(a) } +{{if eq .Name "Boolean"}} +func (a {{.name}}PointsByValue) Less(i, j int) bool { return !a[i].Value } +{{else}} +func (a {{.name}}PointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } +{{end}} +func (a {{.name}}PointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointsByTime represents a slice of points sortable by value. +type {{.name}}PointsByTime []{{.Name}}Point + +func (a {{.name}}PointsByTime) Len() int { return len(a) } +func (a {{.name}}PointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a {{.name}}PointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointByFunc represents a slice of points sortable by a function. +type {{.name}}PointsByFunc struct { + points []{{.Name}}Point + cmp func(a, b *{{.Name}}Point) bool +} + +func (a *{{.name}}PointsByFunc) Len() int { return len(a.points) } +func (a *{{.name}}PointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *{{.name}}PointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *{{.name}}PointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.({{.Name}}Point)) +} + +func (a *{{.name}}PointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func {{.name}}PointsSortBy(points []{{.Name}}Point, cmp func(a, b *{{.Name}}Point) bool) *{{.name}}PointsByFunc { + return &{{.name}}PointsByFunc{ + points: points, + cmp: cmp, + } +} + +// {{.Name}}PointEncoder encodes {{.Name}}Point points to a writer. +type {{.Name}}PointEncoder struct { + w io.Writer +} + +// New{{.Name}}PointEncoder returns a new instance of {{.Name}}PointEncoder that writes to w. +func New{{.Name}}PointEncoder(w io.Writer) *{{.Name}}PointEncoder { + return &{{.Name}}PointEncoder{w: w} +} + +// Encode{{.Name}}Point marshals and writes p to the underlying writer. +func (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error { + // Marshal to bytes. + buf, err := proto.Marshal(encode{{.Name}}Point(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + + +// {{.Name}}PointDecoder decodes {{.Name}}Point points from a reader. +type {{.Name}}PointDecoder struct { + r io.Reader + stats IteratorStats +} + +// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r. +func New{{.Name}}PointDecoder(r io.Reader) *{{.Name}}PointDecoder { + return &{{.Name}}PointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *{{.Name}}PointDecoder) Stats() IteratorStats { return dec.stats } + +// Decode{{.Name}}Point reads from the underlying reader and unmarshals into p. +func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + // Decode into point object. + *p = *decode{{.Name}}Point(&pb) + + return nil + } +} + +{{end}} diff -Nru influxdb-0.10.0+dfsg1/influxql/point.go influxdb-1.1.1+dfsg1/influxql/point.go --- influxdb-0.10.0+dfsg1/influxql/point.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/point.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,339 @@ +package influxql + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "sort" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// ZeroTime is the Unix nanosecond timestamp for no time. +// This time is not used by the query engine or the storage engine as a valid time. +const ZeroTime = int64(math.MinInt64) + +// Point represents a value in a series that occurred at a given time. +type Point interface { + // Name and tags uniquely identify the series the value belongs to. + name() string + tags() Tags + + // The time that the value occurred at. + time() int64 + + // The value at the given time. + value() interface{} + + // Auxillary values passed along with the value. + aux() []interface{} +} + +// Points represents a list of points. +type Points []Point + +// Clone returns a deep copy of a. +func (a Points) Clone() []Point { + other := make([]Point, len(a)) + for i, p := range a { + if p == nil { + other[i] = nil + continue + } + + switch p := p.(type) { + case *FloatPoint: + other[i] = p.Clone() + case *IntegerPoint: + other[i] = p.Clone() + case *StringPoint: + other[i] = p.Clone() + case *BooleanPoint: + other[i] = p.Clone() + default: + panic(fmt.Sprintf("unable to clone point: %T", p)) + } + } + return other +} + +// Tags represent a map of keys and values. +// It memoizes its key so it can be used efficiently during query execution. +type Tags struct { + id string + m map[string]string +} + +// NewTags returns a new instance of Tags. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return Tags{} + } + return Tags{ + id: string(encodeTags(m)), + m: m, + } +} + +// newTagsID returns a new instance of Tags parses from a tag id. +func newTagsID(id string) Tags { + m := decodeTags([]byte(id)) + if len(m) == 0 { + return Tags{} + } + return Tags{id: id, m: m} +} + +// ID returns the string identifier for the tags. +func (t Tags) ID() string { return t.id } + +// KeyValues returns the underlying map for the tags. +func (t Tags) KeyValues() map[string]string { return t.m } + +// Keys returns a sorted list of all keys on the tag. +func (t *Tags) Keys() []string { + if t == nil { + return nil + } + + var a []string + for k := range t.m { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Value returns the value for a given key. +func (t *Tags) Value(k string) string { + if t == nil { + return "" + } + return t.m[k] +} + +// Subset returns a new tags object with a subset of the keys. +func (t *Tags) Subset(keys []string) Tags { + if len(keys) == 0 { + return Tags{} + } + + // If keys match existing keys, simply return this tagset. + if keysMatch(t.m, keys) { + return *t + } + + // Otherwise create new tag set. + m := make(map[string]string, len(keys)) + for _, k := range keys { + m[k] = t.m[k] + } + return NewTags(m) +} + +// Equals returns true if t equals other. +func (t *Tags) Equals(other *Tags) bool { + if t == nil && other == nil { + return true + } else if t == nil || other == nil { + return false + } + return t.id == other.id +} + +// keysMatch returns true if m has exactly the same keys as listed in keys. +func keysMatch(m map[string]string, keys []string) bool { + if len(keys) != len(m) { + return false + } + + for _, k := range keys { + if _, ok := m[k]; !ok { + return false + } + } + + return true +} + +// encodeTags converts a map of strings to an identifier. +func encodeTags(m map[string]string) []byte { + // Empty maps marshal to empty bytes. + if len(m) == 0 { + return nil + } + + // Extract keys and determine final size. + sz := (len(m) * 2) - 1 // separators + keys := make([]string, 0, len(m)) + for k, v := range m { + keys = append(keys, k) + sz += len(k) + len(v) + } + sort.Strings(keys) + + // Generate marshaled bytes. + b := make([]byte, sz) + buf := b + for _, k := range keys { + copy(buf, k) + buf[len(k)] = '\x00' + buf = buf[len(k)+1:] + } + for i, k := range keys { + v := m[k] + copy(buf, v) + if i < len(keys)-1 { + buf[len(v)] = '\x00' + buf = buf[len(v)+1:] + } + } + return b +} + +// decodeTags parses an identifier into a map of tags. +func decodeTags(id []byte) map[string]string { + a := bytes.Split(id, []byte{'\x00'}) + + // There must be an even number of segments. + if len(a) > 0 && len(a)%2 == 1 { + a = a[:len(a)-1] + } + + // Return nil if there are no segments. + if len(a) == 0 { + return nil + } + mid := len(a) / 2 + + // Decode key/value tags. + m := make(map[string]string) + for i := 0; i < mid; i++ { + m[string(a[i])] = string(a[i+mid]) + } + return m +} + +func encodeAux(aux []interface{}) []*internal.Aux { + pb := make([]*internal.Aux, len(aux)) + for i := range aux { + switch v := aux[i].(type) { + case float64: + pb[i] = &internal.Aux{DataType: proto.Int32(Float), FloatValue: proto.Float64(v)} + case *float64: + pb[i] = &internal.Aux{DataType: proto.Int32(Float)} + case int64: + pb[i] = &internal.Aux{DataType: proto.Int32(Integer), IntegerValue: proto.Int64(v)} + case *int64: + pb[i] = &internal.Aux{DataType: proto.Int32(Integer)} + case string: + pb[i] = &internal.Aux{DataType: proto.Int32(String), StringValue: proto.String(v)} + case *string: + pb[i] = &internal.Aux{DataType: proto.Int32(String)} + case bool: + pb[i] = &internal.Aux{DataType: proto.Int32(Boolean), BooleanValue: proto.Bool(v)} + case *bool: + pb[i] = &internal.Aux{DataType: proto.Int32(Boolean)} + default: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(Unknown))} + } + } + return pb +} + +func decodeAux(pb []*internal.Aux) []interface{} { + if len(pb) == 0 { + return nil + } + + aux := make([]interface{}, len(pb)) + for i := range pb { + switch pb[i].GetDataType() { + case Float: + if pb[i].FloatValue != nil { + aux[i] = *pb[i].FloatValue + } else { + aux[i] = (*float64)(nil) + } + case Integer: + if pb[i].IntegerValue != nil { + aux[i] = *pb[i].IntegerValue + } else { + aux[i] = (*int64)(nil) + } + case String: + if pb[i].StringValue != nil { + aux[i] = *pb[i].StringValue + } else { + aux[i] = (*string)(nil) + } + case Boolean: + if pb[i].BooleanValue != nil { + aux[i] = *pb[i].BooleanValue + } else { + aux[i] = (*bool)(nil) + } + default: + aux[i] = nil + } + } + return aux +} + +// PointDecoder decodes generic points from a reader. +type PointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewPointDecoder returns a new instance of PointDecoder that reads from r. +func NewPointDecoder(r io.Reader) *PointDecoder { + return &PointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *PointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodePoint reads from the underlying reader and unmarshals into p. +func (dec *PointDecoder) DecodePoint(p *Point) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if pb.IntegerValue != nil { + *p = decodeIntegerPoint(&pb) + } else if pb.StringValue != nil { + *p = decodeStringPoint(&pb) + } else if pb.BooleanValue != nil { + *p = decodeBooleanPoint(&pb) + } else { + *p = decodeFloatPoint(&pb) + } + + return nil + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/point_test.go influxdb-1.1.1+dfsg1/influxql/point_test.go --- influxdb-0.10.0+dfsg1/influxql/point_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/point_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,187 @@ +package influxql_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +func TestPoint_Clone_Float(t *testing.T) { + p := &influxql.FloatPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: 2, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Integer(t *testing.T) { + p := &influxql.IntegerPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: 2, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_String(t *testing.T) { + p := &influxql.StringPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: "clone", + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Boolean(t *testing.T) { + p := &influxql.BooleanPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: true, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Nil(t *testing.T) { + var fp *influxql.FloatPoint + if p := fp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var ip *influxql.IntegerPoint + if p := ip.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var sp *influxql.StringPoint + if p := sp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var bp *influxql.BooleanPoint + if p := bp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } +} + +// TestPoint_Fields ensures that no additional fields are added to the point structs. +// This struct is very sensitive and can effect performance unless handled carefully. +// To avoid the struct becoming a dumping ground for every function that needs to store +// miscellaneous information, this test is meant to ensure that new fields don't slip +// into the struct. +func TestPoint_Fields(t *testing.T) { + allowedFields := map[string]bool{ + "Name": true, + "Tags": true, + "Time": true, + "Nil": true, + "Value": true, + "Aux": true, + "Aggregated": true, + } + + for _, typ := range []reflect.Type{ + reflect.TypeOf(influxql.FloatPoint{}), + reflect.TypeOf(influxql.IntegerPoint{}), + reflect.TypeOf(influxql.StringPoint{}), + reflect.TypeOf(influxql.BooleanPoint{}), + } { + f, ok := typ.FieldByNameFunc(func(name string) bool { + return !allowedFields[name] + }) + if ok { + t.Errorf("found an unallowed field in %s: %s %s", typ, f.Name, f.Type) + } + } +} + +// Ensure that tags can return a unique id. +func TestTags_ID(t *testing.T) { + tags := influxql.NewTags(map[string]string{"foo": "bar", "baz": "bat"}) + if id := tags.ID(); id != "baz\x00foo\x00bat\x00bar" { + t.Fatalf("unexpected id: %q", id) + } +} + +// Ensure that a subset can be created from a tag set. +func TestTags_Subset(t *testing.T) { + tags := influxql.NewTags(map[string]string{"a": "0", "b": "1", "c": "2"}) + subset := tags.Subset([]string{"b", "c", "d"}) + if keys := subset.Keys(); !reflect.DeepEqual(keys, []string{"b", "c", "d"}) { + t.Fatalf("unexpected keys: %+v", keys) + } else if v := subset.Value("a"); v != "" { + t.Fatalf("unexpected 'a' value: %s", v) + } else if v := subset.Value("b"); v != "1" { + t.Fatalf("unexpected 'b' value: %s", v) + } else if v := subset.Value("c"); v != "2" { + t.Fatalf("unexpected 'c' value: %s", v) + } else if v := subset.Value("d"); v != "" { + t.Fatalf("unexpected 'd' value: %s", v) + } +} + +// ParseTags returns an instance of Tags for a comma-delimited list of key/values. +func ParseTags(s string) influxql.Tags { + m := make(map[string]string) + for _, kv := range strings.Split(s, ",") { + a := strings.Split(kv, "=") + m[a[0]] = a[1] + } + return influxql.NewTags(m) +} diff -Nru influxdb-0.10.0+dfsg1/influxql/query_executor.go influxdb-1.1.1+dfsg1/influxql/query_executor.go --- influxdb-0.10.0+dfsg1/influxql/query_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/query_executor.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,418 @@ +package influxql + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "runtime/debug" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb/models" +) + +var ( + // ErrInvalidQuery is returned when executing an unknown query type. + ErrInvalidQuery = errors.New("invalid query") + + // ErrNotExecuted is returned when a statement is not executed in a query. + // This can occur when a previous statement in the same query has errored. + ErrNotExecuted = errors.New("not executed") + + // ErrQueryInterrupted is an error returned when the query is interrupted. + ErrQueryInterrupted = errors.New("query interrupted") + + // ErrQueryAborted is an error returned when the query is aborted. + ErrQueryAborted = errors.New("query aborted") + + // ErrQueryEngineShutdown is an error sent when the query cannot be + // created because the query engine was shutdown. + ErrQueryEngineShutdown = errors.New("query engine shutdown") + + // ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run. + ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded") +) + +// Statistics for the QueryExecutor +const ( + statQueriesActive = "queriesActive" // Number of queries currently being executed + statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started). + statQueriesFinished = "queriesFinished" // Number of queries that have finished. + statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries +) + +// ErrDatabaseNotFound returns a database not found error for the given database name. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrMeasurementNotFound returns a measurement not found error for the given measurement name. +func ErrMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) } + +// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points. +func ErrMaxSelectPointsLimitExceeded(n, limit int) error { + return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit) +} + +// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run +// because the maximum number of queries has been reached. +func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error { + return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit) +} + +// ExecutionOptions contains the options for executing a query. +type ExecutionOptions struct { + // The database the query is running against. + Database string + + // The requested maximum number of points to return in each result. + ChunkSize int + + // If this query is being executed in a read-only context. + ReadOnly bool + + // Node to execute on. + NodeID uint64 + + // Quiet suppresses non-essential output from the query executor. + Quiet bool + + // AbortCh is a channel that signals when results are no longer desired by the caller. + AbortCh <-chan struct{} +} + +// ExecutionContext contains state that the query is currently executing with. +type ExecutionContext struct { + // The statement ID of the executing query. + StatementID int + + // The query ID of the executing query. + QueryID uint64 + + // The query task information available to the StatementExecutor. + Query *QueryTask + + // Output channel where results and errors should be sent. + Results chan *Result + + // Hold the query executor's logger. + Log *log.Logger + + // A channel that is closed when the query is interrupted. + InterruptCh <-chan struct{} + + // Options used to start this query. + ExecutionOptions +} + +// send sends a Result to the Results channel and will exit if the query has +// been aborted. +func (ctx *ExecutionContext) send(result *Result) error { + select { + case <-ctx.AbortCh: + return ErrQueryAborted + case ctx.Results <- result: + } + return nil +} + +// Send sends a Result to the Results channel and will exit if the query has +// been interrupted or aborted. +func (ctx *ExecutionContext) Send(result *Result) error { + select { + case <-ctx.InterruptCh: + return ErrQueryInterrupted + case <-ctx.AbortCh: + return ErrQueryAborted + case ctx.Results <- result: + } + return nil +} + +// StatementExecutor executes a statement within the QueryExecutor. +type StatementExecutor interface { + // ExecuteStatement executes a statement. Results should be sent to the + // results channel in the ExecutionContext. + ExecuteStatement(stmt Statement, ctx ExecutionContext) error +} + +// StatementNormalizer normalizes a statement before it is executed. +type StatementNormalizer interface { + // NormalizeStatement adds a default database and policy to the + // measurements in the statement. + NormalizeStatement(stmt Statement, database string) error +} + +// QueryExecutor executes every statement in an Query. +type QueryExecutor struct { + // Used for executing a statement in the query. + StatementExecutor StatementExecutor + + // Used for tracking running queries. + TaskManager *TaskManager + + // Logger to use for all logging. + // Defaults to discarding all log output. + Logger *log.Logger + + // expvar-based stats. + stats *QueryStatistics +} + +// NewQueryExecutor returns a new instance of QueryExecutor. +func NewQueryExecutor() *QueryExecutor { + return &QueryExecutor{ + TaskManager: NewTaskManager(), + Logger: log.New(ioutil.Discard, "[query] ", log.LstdFlags), + stats: &QueryStatistics{}, + } +} + +// QueryStatistics keeps statistics related to the QueryExecutor. +type QueryStatistics struct { + ActiveQueries int64 + ExecutedQueries int64 + FinishedQueries int64 + QueryExecutionDuration int64 +} + +// Statistics returns statistics for periodic monitoring. +func (e *QueryExecutor) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "queryExecutor", + Tags: tags, + Values: map[string]interface{}{ + statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries), + statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries), + statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries), + statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration), + }, + }} +} + +// Close kills all running queries and prevents new queries from being attached. +func (e *QueryExecutor) Close() error { + return e.TaskManager.Close() +} + +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (e *QueryExecutor) SetLogOutput(w io.Writer) { + e.Logger = log.New(w, "[query] ", log.LstdFlags) + e.TaskManager.Logger = e.Logger +} + +// ExecuteQuery executes each statement within a query. +func (e *QueryExecutor) ExecuteQuery(query *Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result { + results := make(chan *Result) + go e.executeQuery(query, opt, closing, results) + return results +} + +func (e *QueryExecutor) executeQuery(query *Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) { + defer close(results) + defer e.recover(query, results) + + atomic.AddInt64(&e.stats.ActiveQueries, 1) + atomic.AddInt64(&e.stats.ExecutedQueries, 1) + defer func(start time.Time) { + atomic.AddInt64(&e.stats.ActiveQueries, -1) + atomic.AddInt64(&e.stats.FinishedQueries, 1) + atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds()) + }(time.Now()) + + qid, task, err := e.TaskManager.AttachQuery(query, opt.Database, closing) + if err != nil { + select { + case results <- &Result{Err: err}: + case <-opt.AbortCh: + } + return + } + defer e.TaskManager.KillQuery(qid) + + // Setup the execution context that will be used when executing statements. + ctx := ExecutionContext{ + QueryID: qid, + Query: task, + Results: results, + Log: e.Logger, + InterruptCh: task.closing, + ExecutionOptions: opt, + } + + var i int +LOOP: + for ; i < len(query.Statements); i++ { + ctx.StatementID = i + stmt := query.Statements[i] + + // If a default database wasn't passed in by the caller, check the statement. + defaultDB := opt.Database + if defaultDB == "" { + if s, ok := stmt.(HasDefaultDatabase); ok { + defaultDB = s.DefaultDatabase() + } + } + + // Do not let queries manually use the system measurements. If we find + // one, return an error. This prevents a person from using the + // measurement incorrectly and causing a panic. + if stmt, ok := stmt.(*SelectStatement); ok { + for _, s := range stmt.Sources { + switch s := s.(type) { + case *Measurement: + if IsSystemName(s.Name) { + command := "the appropriate meta command" + switch s.Name { + case "_fieldKeys": + command = "SHOW FIELD KEYS" + case "_measurements": + command = "SHOW MEASUREMENTS" + case "_series": + command = "SHOW SERIES" + case "_tagKeys": + command = "SHOW TAG KEYS" + case "_tags": + command = "SHOW TAG VALUES" + } + results <- &Result{ + Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command), + } + break LOOP + } + } + } + } + + // Rewrite statements, if necessary. + // This can occur on meta read statements which convert to SELECT statements. + newStmt, err := RewriteStatement(stmt) + if err != nil { + results <- &Result{Err: err} + break + } + stmt = newStmt + + // Normalize each statement if possible. + if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok { + if err := normalizer.NormalizeStatement(stmt, defaultDB); err != nil { + if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted { + return + } + break + } + } + + // Log each normalized statement. + if !ctx.Quiet { + e.Logger.Println(stmt.String()) + } + + // Send any other statements to the underlying statement executor. + err = e.StatementExecutor.ExecuteStatement(stmt, ctx) + if err == ErrQueryInterrupted { + // Query was interrupted so retrieve the real interrupt error from + // the query task if there is one. + if qerr := task.Error(); qerr != nil { + err = qerr + } + } + + // Send an error for this result if it failed for some reason. + if err != nil { + if err := ctx.send(&Result{ + StatementID: i, + Err: err, + }); err == ErrQueryAborted { + return + } + // Stop after the first error. + break + } + + // Check if the query was interrupted during an uninterruptible statement. + interrupted := false + if ctx.InterruptCh != nil { + select { + case <-ctx.InterruptCh: + interrupted = true + default: + // Query has not been interrupted. + } + } + + if interrupted { + break + } + } + + // Send error results for any statements which were not executed. + for ; i < len(query.Statements)-1; i++ { + if err := ctx.send(&Result{ + StatementID: i, + Err: ErrNotExecuted, + }); err == ErrQueryAborted { + return + } + } +} + +func (e *QueryExecutor) recover(query *Query, results chan *Result) { + if err := recover(); err != nil { + e.Logger.Printf("%s [panic:%s] %s", query.String(), err, debug.Stack()) + results <- &Result{ + StatementID: -1, + Err: fmt.Errorf("%s [panic:%s]", query.String(), err), + } + } +} + +// QueryMonitorFunc is a function that will be called to check if a query +// is currently healthy. If the query needs to be interrupted for some reason, +// the error should be returned by this function. +type QueryMonitorFunc func(<-chan struct{}) error + +// QueryTask is the internal data structure for managing queries. +// For the public use data structure that gets returned, see QueryTask. +type QueryTask struct { + query string + database string + startTime time.Time + closing chan struct{} + monitorCh chan error + err error + mu sync.Mutex +} + +// Monitor starts a new goroutine that will monitor a query. The function +// will be passed in a channel to signal when the query has been finished +// normally. If the function returns with an error and the query is still +// running, the query will be terminated. +func (q *QueryTask) Monitor(fn QueryMonitorFunc) { + go q.monitor(fn) +} + +// Error returns any asynchronous error that may have occured while executing +// the query. +func (q *QueryTask) Error() error { + q.mu.Lock() + defer q.mu.Unlock() + return q.err +} + +func (q *QueryTask) setError(err error) { + q.mu.Lock() + q.err = err + q.mu.Unlock() +} + +func (q *QueryTask) monitor(fn QueryMonitorFunc) { + if err := fn(q.closing); err != nil { + select { + case <-q.closing: + case q.monitorCh <- err: + } + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/query_executor_test.go influxdb-1.1.1+dfsg1/influxql/query_executor_test.go --- influxdb-0.10.0+dfsg1/influxql/query_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/query_executor_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,372 @@ +package influxql_test + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/influxql" +) + +var errUnexpected = errors.New("unexpected error") + +type StatementExecutor struct { + ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error +} + +func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + return e.ExecuteStatementFn(stmt, ctx) +} + +func NewQueryExecutor() *influxql.QueryExecutor { + return influxql.NewQueryExecutor() +} + +func TestQueryExecutor_AttachQuery(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + if ctx.QueryID != 1 { + t.Errorf("incorrect query id: exp=1 got=%d", ctx.QueryID) + } + return nil + }, + } + + discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)) +} + +func TestQueryExecutor_KillQuery(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + switch stmt.(type) { + case *influxql.KillQueryStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + qid <- ctx.QueryID + select { + case <-ctx.InterruptCh: + return influxql.ErrQueryInterrupted + case <-time.After(100 * time.Millisecond): + t.Error("killing the query did not close the channel after 100 milliseconds") + return errUnexpected + } + }, + } + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) + if err != nil { + t.Fatal(err) + } + discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)) + + result := <-results + if result.Err != influxql.ErrQueryInterrupted { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Interrupt(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + select { + case <-ctx.InterruptCh: + return influxql.ErrQueryInterrupted + case <-time.After(100 * time.Millisecond): + t.Error("killing the query did not close the channel after 100 milliseconds") + return errUnexpected + } + }, + } + + closing := make(chan struct{}) + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, closing) + close(closing) + result := <-results + if result.Err != influxql.ErrQueryInterrupted { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Abort(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + <-ch1 + if err := ctx.Send(&influxql.Result{Err: errUnexpected}); err != influxql.ErrQueryAborted { + t.Errorf("unexpected error: %v", err) + } + close(ch2) + return nil + }, + } + + done := make(chan struct{}) + close(done) + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{AbortCh: done}, nil) + close(ch1) + + <-ch2 + discardOutput(results) +} + +func TestQueryExecutor_ShowQueries(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + switch stmt.(type) { + case *influxql.ShowQueriesStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + t.Errorf("unexpected statement: %s", stmt) + return errUnexpected + }, + } + + q, err = influxql.ParseQuery(`SHOW QUERIES`) + if err != nil { + t.Fatal(err) + } + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 1 { + t.Errorf("expected %d rows, got %d", 1, len(result.Series)) + } + if result.Err != nil { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Limit_Timeout(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + select { + case <-ctx.InterruptCh: + return influxql.ErrQueryInterrupted + case <-time.After(time.Second): + t.Errorf("timeout has not killed the query") + return errUnexpected + } + }, + } + e.TaskManager.QueryTimeout = time.Nanosecond + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + result := <-results + if result.Err == nil || !strings.Contains(result.Err.Error(), "query-timeout") { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + qid <- ctx.QueryID + <-ctx.InterruptCh + return influxql.ErrQueryInterrupted + }, + } + e.TaskManager.MaxConcurrentQueries = 1 + defer e.Close() + + // Start first query and wait for it to be executing. + go discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)) + <-qid + + // Start second query and expect for it to fail. + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + + select { + case result := <-results: + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err == nil || !strings.Contains(result.Err.Error(), "max-concurrent-queries") { + t.Errorf("unexpected error: %s", result.Err) + } + case <-qid: + t.Errorf("unexpected statement execution for the second query") + } +} + +func TestQueryExecutor_Close(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + close(ch1) + <-ctx.InterruptCh + return influxql.ErrQueryInterrupted + }, + } + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + go func(results <-chan *influxql.Result) { + result := <-results + if result.Err != influxql.ErrQueryEngineShutdown { + t.Errorf("unexpected error: %s", result.Err) + } + close(ch2) + }(results) + + // Wait for the statement to start executing. + <-ch1 + + // Close the query executor. + e.Close() + + // Check that the statement gets interrupted and finishes. + select { + case <-ch2: + case <-time.After(100 * time.Millisecond): + t.Fatal("closing the query manager did not kill the query after 100 milliseconds") + } + + results = e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err != influxql.ErrQueryEngineShutdown { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Panic(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + panic("test error") + }, + } + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err == nil || result.Err.Error() != "SELECT count(value) FROM cpu [panic:test error]" { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_InvalidSource(t *testing.T) { + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + return errors.New("statement executed unexpectedly") + }, + } + + for i, tt := range []struct { + q string + err string + }{ + { + q: `SELECT fieldKey, fieldType FROM _fieldKeys`, + err: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`, + }, + { + q: `SELECT "name" FROM _measurements`, + err: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`, + }, + { + q: `SELECT "key" FROM _series`, + err: `unable to use system source '_series': use SHOW SERIES instead`, + }, + { + q: `SELECT tagKey FROM _tagKeys`, + err: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`, + }, + { + q: `SELECT "key", value FROM _tags`, + err: `unable to use system source '_tags': use SHOW TAG VALUES instead`, + }, + } { + q, err := influxql.ParseQuery(tt.q) + if err != nil { + t.Errorf("%d. unable to parse: %s", i, tt.q) + continue + } + + results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series)) + } + if result.Err == nil || result.Err.Error() != tt.err { + t.Errorf("%d. unexpected error: %s", i, result.Err) + } + } +} + +func discardOutput(results <-chan *influxql.Result) { + for range results { + // Read all results and discard. + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/README.md influxdb-1.1.1+dfsg1/influxql/README.md --- influxdb-0.10.0+dfsg1/influxql/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1048 @@ +# The Influx Query Language Specification + +## Introduction + +This is a reference for the Influx Query Language ("InfluxQL"). + +InfluxQL is a SQL-like query language for interacting with InfluxDB. It has +been lovingly crafted to feel familiar to those coming from other SQL or +SQL-like environments while providing features specific to storing and analyzing +time series data. + + +## Notation + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the +same notation used in the [Go](http://golang.org) programming language +specification, which can be found [here](https://golang.org/ref/spec). Not so +coincidentally, InfluxDB is written in Go. + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + +## Query representation + +### Characters + +InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). + +``` +newline = /* the Unicode code point U+000A */ . +unicode_char = /* an arbitrary Unicode code point except newline */ . +``` + +## Letters and digits + +Letters are the set of ASCII characters plus the underscore character _ (U+005F) +is considered a letter. + +Only decimal digits are supported. + +``` +letter = ascii_letter | "_" . +ascii_letter = "A" … "Z" | "a" … "z" . +digit = "0" … "9" . +``` + +## Identifiers + +Identifiers are tokens which refer to database names, retention policy names, +user names, measurement names, tag keys, and field keys. + +The rules: + +- double quoted identifiers can contain any unicode character other than a new line +- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) +- double quoted identifiers can contain InfluxQL keywords +- unquoted identifiers must start with an upper or lowercase ASCII character or "_" +- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" + +``` +identifier = unquoted_identifier | quoted_identifier . +unquoted_identifier = ( letter ) { letter | digit } . +quoted_identifier = `"` unicode_char { unicode_char } `"` . +``` + +#### Examples: + +``` +cpu +_cpu_stats +"1h" +"anything really" +"1_Crazy-1337.identifier>NAME👍" +``` + +## Keywords + +``` +ALL ALTER ANY AS ASC BEGIN +BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT +DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP +DURATION END EVERY EXPLAIN FIELD FOR +FROM GRANT GRANTS GROUP GROUPS IN +INF INSERT INTO KEY KEYS KILL +LIMIT SHOW MEASUREMENT MEASUREMENTS NAME OFFSET +ON ORDER PASSWORD POLICY POLICIES PRIVILEGES +QUERIES QUERY READ REPLICATION RESAMPLE RETENTION +REVOKE SELECT SERIES SET SHARD SHARDS +SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG +TO USER USERS VALUES WHERE WITH +WRITE +``` + +## Literals + +### Integers + +InfluxQL supports decimal integer literals. Hexadecimal and octal literals are +not currently supported. + +``` +int_lit = ( "1" … "9" ) { digit } . +``` + +### Floats + +InfluxQL supports floating-point literals. Exponents are not currently supported. + +``` +float_lit = int_lit "." int_lit . +``` + +### Strings + +String literals must be surrounded by single quotes. Strings may contain `'` +characters as long as they are escaped (i.e., `\'`). + +``` +string_lit = `'` { unicode_char } `'` . +``` + +### Durations + +Duration literals specify a length of time. An integer literal followed +immediately (with no spaces) by a duration unit listed below is interpreted as +a duration literal. + +### Duration units +| Units | Meaning | +|--------|-----------------------------------------| +| u or µ | microseconds (1 millionth of a second) | +| ms | milliseconds (1 thousandth of a second) | +| s | second | +| m | minute | +| h | hour | +| d | day | +| w | week | + +``` +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "ms" | "s" | "m" | "h" | "d" | "w" . +``` + +### Dates & Times + +The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: + +InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM + +``` +time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . +``` + +### Booleans + +``` +bool_lit = TRUE | FALSE . +``` + +### Regular Expressions + +``` +regex_lit = "/" { unicode_char } "/" . +``` + +**Comparators:** +`=~` matches against +`!~` doesn't match against + +> **Note:** Use regular expressions to match measurements and tags. +You cannot use regular expressions to match databases, retention policies, or fields. + +## Queries + +A query is composed of one or more statements separated by a semicolon. + +``` +query = statement { ";" statement } . + +statement = alter_retention_policy_stmt | + create_continuous_query_stmt | + create_database_stmt | + create_retention_policy_stmt | + create_subscription_stmt | + create_user_stmt | + delete_stmt | + drop_continuous_query_stmt | + drop_database_stmt | + drop_measurement_stmt | + drop_retention_policy_stmt | + drop_series_stmt | + drop_shard_stmt | + drop_subscription_stmt | + drop_user_stmt | + grant_stmt | + kill_query_statement | + show_continuous_queries_stmt | + show_databases_stmt | + show_field_keys_stmt | + show_grants_stmt | + show_measurements_stmt | + show_queries_stmt | + show_retention_policies | + show_series_stmt | + show_shard_groups_stmt | + show_shards_stmt | + show_subscriptions_stmt| + show_tag_keys_stmt | + show_tag_values_stmt | + show_users_stmt | + revoke_stmt | + select_stmt . +``` + +## Statements + +### ALTER RETENTION POLICY + +``` +alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause + retention_policy_option + [ retention_policy_option ] + [ retention_policy_option ] + [ retention_policy_option ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples: + +```sql +-- Set default retention policy for mydb to 1h.cpu. +ALTER RETENTION POLICY "1h.cpu" ON "mydb" DEFAULT + +-- Change duration and replication factor. +ALTER RETENTION POLICY "policy1" ON "somedb" DURATION 1h REPLICATION 4 +``` + +### CREATE CONTINUOUS QUERY + +``` +create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause + [ "RESAMPLE" resample_opts ] + "BEGIN" select_stmt "END" . + +query_name = identifier . + +resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . +every_stmt = "EVERY" duration_lit +for_stmt = "FOR" duration_lit +``` + +#### Examples: + +```sql +-- selects from DEFAULT retention policy and writes into 6_months retention policy +CREATE CONTINUOUS QUERY "10m_event_count" +ON "db_name" +BEGIN + SELECT count("value") + INTO "6_months"."events" + FROM "events" + GROUP BY time(10m) +END; + +-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy +CREATE CONTINUOUS QUERY "1h_event_count" +ON "db_name" +BEGIN + SELECT sum("count") as "count" + INTO "2_years"."events" + FROM "6_months"."events" + GROUP BY time(1h) +END; + +-- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time +-- when resample is used, at least one of "EVERY" or "FOR" must be used +CREATE CONTINUOUS QUERY "cpu_mean" +ON "db_name" +RESAMPLE EVERY 10s FOR 2m +BEGIN + SELECT mean("value") + INTO "cpu_mean" + FROM "cpu" + GROUP BY time(1m) +END; +``` + +### CREATE DATABASE + +``` +create_database_stmt = "CREATE DATABASE" db_name + [ WITH + [ retention_policy_duration ] + [ retention_policy_replication ] + [ retention_policy_shard_group_duration ] + [ retention_policy_name ] + ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples: + +```sql +-- Create a database called foo +CREATE DATABASE "foo" + +-- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy +CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp" + +-- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy +CREATE DATABASE "mydb" WITH NAME "myrp" +``` + +### CREATE RETENTION POLICY + +``` +create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause + retention_policy_duration + retention_policy_replication + [ retention_policy_shard_group_duration ] + [ "DEFAULT" ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples + +```sql +-- Create a retention policy. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 + +-- Create a retention policy and set it as the DEFAULT. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFAULT + +-- Create a retention policy and specify the shard group duration. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m +``` + +### CREATE SUBSCRIPTION + +Subscriptions tell InfluxDB to send all the data it receives to Kapacitor or other third parties. + +``` +create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . +``` + +#### Examples: + +```sql +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that send data to 'example.com:9090' via UDP. +CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ALL 'udp://example.com:9090' + +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. +CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090' +``` + +### CREATE USER + +``` +create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password + [ "WITH ALL PRIVILEGES" ] . +``` + +#### Examples: + +```sql +-- Create a normal database user. +CREATE USER "jdoe" WITH PASSWORD '1337password' + +-- Create an admin user. +-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. +CREATE USER "jdoe" WITH PASSWORD '1337password' WITH ALL PRIVILEGES +``` + +> **Note:** The password string must be wrapped in single quotes. + +### DELETE + +``` +delete_stmt = "DELETE" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Examples: + +```sql +DELETE FROM "cpu" +DELETE FROM "cpu" WHERE time < '2000-01-01T00:00:00Z' +DELETE WHERE time < '2000-01-01T00:00:00Z' +``` + +### DROP CONTINUOUS QUERY + +``` +drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . +``` + +#### Example: + +```sql +DROP CONTINUOUS QUERY "myquery" ON "mydb" +``` + +### DROP DATABASE + +``` +drop_database_stmt = "DROP DATABASE" db_name . +``` + +#### Example: + +```sql +DROP DATABASE "mydb" +``` + +### DROP MEASUREMENT + +``` +drop_measurement_stmt = "DROP MEASUREMENT" measurement . +``` + +#### Examples: + +```sql +-- drop the cpu measurement +DROP MEASUREMENT "cpu" +``` + +### DROP RETENTION POLICY + +``` +drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . +``` + +#### Example: + +```sql +-- drop the retention policy named 1h.cpu from mydb +DROP RETENTION POLICY "1h.cpu" ON "mydb" +``` + +### DROP SERIES + +``` +drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Example: + +```sql +DROP SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' + +``` + +### DROP SHARD + +``` +drop_shard_stmt = "DROP SHARD" ( shard_id ) . +``` + +#### Example: + +``` +DROP SHARD 1 +``` + +### DROP SUBSCRIPTION + +``` +drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . +``` + +#### Example: + +```sql +DROP SUBSCRIPTION "sub0" ON "mydb"."autogen" +``` + +### DROP USER + +``` +drop_user_stmt = "DROP USER" user_name . +``` + +#### Example: + +```sql +DROP USER "jdoe" +``` + +### GRANT + +> **NOTE:** Users can be granted privileges on databases that do not exist. + +``` +grant_stmt = "GRANT" privilege [ on_clause ] to_clause . +``` + +#### Examples: + +```sql +-- grant admin privileges +GRANT ALL TO "jdoe" + +-- grant read access to a database +GRANT READ ON "mydb" TO "jdoe" +``` + +### KILL QUERY + +``` +kill_query_statement = "KILL QUERY" query_id . +``` + +#### Examples: + +``` +--- kill a query with the query_id 36 +KILL QUERY 36 +``` + +> **NOTE:** Identify the `query_id` from the `SHOW QUERIES` output. + +### SHOW CONTINUOUS QUERIES + +``` +show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . +``` + +#### Example: + +```sql +-- show all continuous queries +SHOW CONTINUOUS QUERIES +``` + +### SHOW DATABASES + +``` +show_databases_stmt = "SHOW DATABASES" . +``` + +#### Example: + +```sql +-- show all databases +SHOW DATABASES +``` + +### SHOW FIELD KEYS + +``` +show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . +``` + +#### Examples: + +```sql +-- show field keys and field value data types from all measurements +SHOW FIELD KEYS + +-- show field keys and field value data types from specified measurement +SHOW FIELD KEYS FROM "cpu" +``` + +### SHOW GRANTS + +``` +show_grants_stmt = "SHOW GRANTS FOR" user_name . +``` + +#### Example: + +```sql +-- show grants for jdoe +SHOW GRANTS FOR "jdoe" +``` + +### SHOW MEASUREMENTS + +``` +show_measurements_stmt = "SHOW MEASUREMENTS" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all measurements +SHOW MEASUREMENTS + +-- show measurements where region tag = 'uswest' AND host tag = 'serverA' +SHOW MEASUREMENTS WHERE "region" = 'uswest' AND "host" = 'serverA' + +-- show measurements that start with 'h2o' +SHOW MEASUREMENTS WITH MEASUREMENT =~ /h2o.*/ +``` + +### SHOW QUERIES + +``` +show_queries_stmt = "SHOW QUERIES" . +``` + +#### Example: + +```sql +-- show all currently-running queries +SHOW QUERIES +``` + +### SHOW RETENTION POLICIES + +``` +show_retention_policies = "SHOW RETENTION POLICIES" on_clause . +``` + +#### Example: + +```sql +-- show all retention policies on a database +SHOW RETENTION POLICIES ON "mydb" +``` + +### SHOW SERIES + +``` +show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Example: + +```sql +SHOW SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' +``` + +### SHOW SHARD GROUPS + +``` +show_shard_groups_stmt = "SHOW SHARD GROUPS" . +``` + +#### Example: + +```sql +SHOW SHARD GROUPS +``` + +### SHOW SHARDS + +``` +show_shards_stmt = "SHOW SHARDS" . +``` + +#### Example: + +```sql +SHOW SHARDS +``` + +### SHOW SUBSCRIPTIONS + +``` +show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . +``` + +#### Example: + +```sql +SHOW SUBSCRIPTIONS +``` + +### SHOW TAG KEYS + +``` +show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag keys +SHOW TAG KEYS + +-- show all tag keys from the cpu measurement +SHOW TAG KEYS FROM "cpu" + +-- show all tag keys from the cpu measurement where the region key = 'uswest' +SHOW TAG KEYS FROM "cpu" WHERE "region" = 'uswest' + +-- show all tag keys where the host key = 'serverA' +SHOW TAG KEYS WHERE "host" = 'serverA' +``` + +### SHOW TAG VALUES + +``` +show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] + [ group_by_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag values across all measurements for the region tag +SHOW TAG VALUES WITH KEY = "region" + +-- show tag values from the cpu measurement for the region tag +SHOW TAG VALUES FROM "cpu" WITH KEY = "region" + +-- show tag values across all measurements for all tag keys that do not include the letter c +SHOW TAG VALUES WITH KEY !~ /.*c.*/ + +-- show tag values from the cpu measurement for region & host tag keys where service = 'redis' +SHOW TAG VALUES FROM "cpu" WITH KEY IN ("region", "host") WHERE "service" = 'redis' +``` + +### SHOW USERS + +``` +show_users_stmt = "SHOW USERS" . +``` + +#### Example: + +```sql +-- show all users +SHOW USERS +``` + +### REVOKE + +``` +revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . +``` + +#### Examples: + +```sql +-- revoke admin privileges from jdoe +REVOKE ALL PRIVILEGES FROM "jdoe" + +-- revoke read privileges from jdoe on mydb +REVOKE READ ON "mydb" FROM "jdoe" +``` + +### SELECT + +``` +select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] + [ group_by_clause ] [ order_by_clause ] [ limit_clause ] + [ offset_clause ] [ slimit_clause ] [ soffset_clause ] . +``` + +#### Examples: + +```sql +-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals +SELECT mean("value") FROM "cpu" WHERE "region" = 'uswest' GROUP BY time(10m) fill(0) + +-- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy +SELECT mean("value") INTO "cpu_1h".:MEASUREMENT FROM /cpu.*/ +``` + +## Clauses + +``` +from_clause = "FROM" measurements . + +group_by_clause = "GROUP BY" dimensions fill(fill_option). + +into_clause = "INTO" ( measurement | back_ref ). + +limit_clause = "LIMIT" int_lit . + +offset_clause = "OFFSET" int_lit . + +slimit_clause = "SLIMIT" int_lit . + +soffset_clause = "SOFFSET" int_lit . + +on_clause = "ON" db_name . + +order_by_clause = "ORDER BY" sort_fields . + +to_clause = "TO" user_name . + +where_clause = "WHERE" expr . + +with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . + +with_tag_clause = "WITH KEY" ( "=" tag_key | "!=" tag_key | "=~" regex_lit | "IN (" tag_keys ")" ) . +``` + +## Expressions + +``` +binary_op = "+" | "-" | "*" | "/" | "AND" | "OR" | "=" | "!=" | "<>" | "<" | + "<=" | ">" | ">=" . + +expr = unary_expr { binary_op unary_expr } . + +unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | + float_lit | bool_lit | duration_lit | regex_lit . +``` + +## Other + +``` +alias = "AS" identifier . + +back_ref = ( policy_name ".:MEASUREMENT" ) | + ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . + +db_name = identifier . + +dimension = expr . + +dimensions = dimension { "," dimension } . + +field_key = identifier . + +field = expr [ alias ] . + +fields = field { "," field } . + +fill_option = "null" | "none" | "previous" | "linear" | int_lit | float_lit . + +host = string_lit . + +measurement = measurement_name | + ( policy_name "." measurement_name ) | + ( db_name "." [ policy_name ] "." measurement_name ) . + +measurements = measurement { "," measurement } . + +measurement_name = identifier | regex_lit . + +password = string_lit . + +policy_name = identifier . + +privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . + +query_id = int_lit . + +query_name = identifier . + +retention_policy = identifier . + +retention_policy_option = retention_policy_duration | + retention_policy_replication | + retention_policy_shard_group_duration | + "DEFAULT" . + +retention_policy_duration = "DURATION" duration_lit . + +retention_policy_replication = "REPLICATION" int_lit . + +retention_policy_shard_group_duration = "SHARD DURATION" duration_lit . + +retention_policy_name = "NAME" identifier . + +series_id = int_lit . + +shard_id = int_lit . + +sort_field = field_key [ ASC | DESC ] . + +sort_fields = sort_field { "," sort_field } . + +subscription_name = identifier . + +tag_key = identifier . + +tag_keys = tag_key { "," tag_key } . + +user_name = identifier . + +var_ref = measurement . +``` + +## Query Engine Internals + +Once you understand the language itself, it's important to know how these +language constructs are implemented in the query engine. This gives you an +intuitive sense for how results will be processed and how to create efficient +queries. + +The life cycle of a query looks like this: + +1. InfluxQL query string is tokenized and then parsed into an abstract syntax + tree (AST). This is the code representation of the query itself. + +2. The AST is passed to the `QueryExecutor` which directs queries to the + appropriate handlers. For example, queries related to meta data are executed + by the meta service and `SELECT` statements are executed by the shards + themselves. + +3. The query engine then determines the shards that match the `SELECT` + statement's time range. From these shards, iterators are created for each + field in the statement. + +4. Iterators are passed to the emitter which drains them and joins the resulting + points. The emitter's job is to convert simple time/value points into the + more complex result objects that are returned to the client. + + +### Understanding Iterators + +Iterators are at the heart of the query engine. They provide a simple interface +for looping over a set of points. For example, this is an iterator over Float +points: + +``` +type FloatIterator interface { + Next() *FloatPoint +} +``` + +These iterators are created through the `IteratorCreator` interface: + +``` +type IteratorCreator interface { + CreateIterator(opt *IteratorOptions) (Iterator, error) +} +``` + +The `IteratorOptions` provide arguments about field selection, time ranges, +and dimensions that the iterator creator can use when planning an iterator. +The `IteratorCreator` interface is used at many levels such as the `Shards`, +`Shard`, and `Engine`. This allows optimizations to be performed when applicable +such as returning a precomputed `COUNT()`. + +Iterators aren't just for reading raw data from storage though. Iterators can be +composed so that they provided additional functionality around an input +iterator. For example, a `DistinctIterator` can compute the distinct values for +each time window for an input iterator. Or a `FillIterator` can generate +additional points that are missing from an input iterator. + +This composition also lends itself well to aggregation. For example, a statement +such as this: + +``` +SELECT MEAN(value) FROM cpu GROUP BY time(10m) +``` + +In this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the +underlying shards. However, if we can add an additional iterator to determine +the derivative of the mean: + +``` +SELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m) +``` + + +### Understanding Auxiliary Fields + +Because InfluxQL allows users to use selector functions such as `FIRST()`, +`LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related +data at the same time with the selected point. + +For example, in this query: + +``` +SELECT FIRST(value), host FROM cpu GROUP BY time(1h) +``` + +We are selecting the first `value` that occurs every hour but we also want to +retrieve the `host` associated with that point. Since the `Point` types only +specify a single typed `Value` for efficiency, we push the `host` into the +auxiliary fields of the point. These auxiliary fields are attached to the point +until it is passed to the emitter where the fields get split off to their own +iterator. + + +### Built-in Iterators + +There are many helper iterators that let us build queries: + +* Merge Iterator - This iterator combines one or more iterators into a single + new iterator of the same type. This iterator guarantees that all points + within a window will be output before starting the next window but does not + provide ordering guarantees within the window. This allows for fast access + for aggregate queries which do not need stronger sorting guarantees. + +* Sorted Merge Iterator - This iterator also combines one or more iterators + into a new iterator of the same type. However, this iterator guarantees + time ordering of every point. This makes it slower than the `MergeIterator` + but this ordering guarantee is required for non-aggregate queries which + return the raw data points. + +* Limit Iterator - This iterator limits the number of points per name/tag + group. This is the implementation of the `LIMIT` & `OFFSET` syntax. + +* Fill Iterator - This iterator injects extra points if they are missing from + the input iterator. It can provide `null` points, points with the previous + value, or points with a specific value. + +* Buffered Iterator - This iterator provides the ability to "unread" a point + back onto a buffer so it can be read again next time. This is used extensively + to provide lookahead for windowing. + +* Reduce Iterator - This iterator calls a reduction function for each point in + a window. When the window is complete then all points for that window are + output. This is used for simple aggregate functions such as `COUNT()`. + +* Reduce Slice Iterator - This iterator collects all points for a window first + and then passes them all to a reduction function at once. The results are + returned from the iterator. This is used for aggregate functions such as + `DERIVATIVE()`. + +* Transform Iterator - This iterator calls a transform function for each point + from an input iterator. This is used for executing binary expressions. + +* Dedupe Iterator - This iterator only outputs unique points. It is resource + intensive so it is only used for small queries such as meta query statements. + + +### Call Iterators + +Function calls in InfluxQL are implemented at two levels. Some calls can be +wrapped at multiple layers to improve efficiency. For example, a `COUNT()` can +be performed at the shard level and then multiple `CountIterator`s can be +wrapped with another `CountIterator` to compute the count of all shards. These +iterators can be created using `NewCallIterator()`. + +Some iterators are more complex or need to be implemented at a higher level. +For example, the `DERIVATIVE()` needs to retrieve all points for a window first +before performing the calculation. This iterator is created by the engine itself +and is never requested to be created by the lower levels. diff -Nru influxdb-0.10.0+dfsg1/influxql/result.go influxdb-1.1.1+dfsg1/influxql/result.go --- influxdb-0.10.0+dfsg1/influxql/result.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/result.go 2016-12-06 21:36:15.000000000 +0000 @@ -3,8 +3,14 @@ import ( "encoding/json" "errors" + "fmt" - "github.com/influxdb/influxdb/models" + "github.com/influxdata/influxdb/models" +) + +const ( + // WarningLevel is the message level for a warning. + WarningLevel = "warning" ) // TagSet is a fundamental concept within the query system. It represents a composite series, @@ -22,13 +28,39 @@ t.Filters = append(t.Filters, filter) } -// Rows represents a list of rows that can be sorted consistently by name/tag. +func (t *TagSet) Len() int { return len(t.SeriesKeys) } +func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] } +func (t *TagSet) Swap(i, j int) { + t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] + t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] +} + +// Message represents a user-facing message to be included with the result. +type Message struct { + Level string `json:"level"` + Text string `json:"text"` +} + +// ReadOnlyWarning generates a warning message that tells the user the command +// they are using is being used for writing in a read only context. +// +// This is a temporary method while to be used while transitioning to read only +// operations for issue #6290. +func ReadOnlyWarning(stmt string) *Message { + return &Message{ + Level: WarningLevel, + Text: fmt.Sprintf("deprecated use of '%s' in a read only context, please use a POST request instead", stmt), + } +} + // Result represents a resultset returned from a single statement. +// Rows represents a list of rows that can be sorted consistently by name/tag. type Result struct { // StatementID is just the statement's position in the query. It's used // to combine statement results if they're being buffered in memory. StatementID int `json:"-"` Series models.Rows + Messages []*Message Err error } @@ -36,12 +68,14 @@ func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []*models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Series = r.Series + o.Messages = r.Messages if r.Err != nil { o.Err = r.Err.Error() } @@ -52,8 +86,9 @@ // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []*models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } err := json.Unmarshal(b, &o) @@ -61,126 +96,9 @@ return err } r.Series = o.Series + r.Messages = o.Messages if o.Err != "" { r.Err = errors.New(o.Err) } return nil } - -func GetProcessor(expr Expr, startIndex int) (Processor, int) { - switch expr := expr.(type) { - case *VarRef: - return newEchoProcessor(startIndex), startIndex + 1 - case *Call: - return newEchoProcessor(startIndex), startIndex + 1 - case *BinaryExpr: - return getBinaryProcessor(expr, startIndex) - case *ParenExpr: - return GetProcessor(expr.Expr, startIndex) - case *NumberLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *StringLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *BooleanLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *TimeLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *DurationLiteral: - return newLiteralProcessor(expr.Val), startIndex - } - panic("unreachable") -} - -type Processor func(values []interface{}) interface{} - -func newEchoProcessor(index int) Processor { - return func(values []interface{}) interface{} { - if index > len(values)-1 { - return nil - } - return values[index] - } -} - -func newLiteralProcessor(val interface{}) Processor { - return func(values []interface{}) interface{} { - return val - } -} - -func getBinaryProcessor(expr *BinaryExpr, startIndex int) (Processor, int) { - lhs, index := GetProcessor(expr.LHS, startIndex) - rhs, index := GetProcessor(expr.RHS, index) - - return newBinaryExprEvaluator(expr.Op, lhs, rhs), index -} - -func newBinaryExprEvaluator(op Token, lhs, rhs Processor) Processor { - switch op { - case ADD: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf + rf - } - return nil - } - case SUB: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf - rf - } - return nil - } - case MUL: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf * rf - } - return nil - } - case DIV: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf / rf - } - return nil - } - default: - // we shouldn't get here, but give them back nils if it goes this way - return func(values []interface{}) interface{} { - return nil - } - } -} - -func processorValuesAsFloat64(lhs interface{}, rhs interface{}) (float64, float64, bool) { - var lf float64 - var rf float64 - var ok bool - - lf, ok = lhs.(float64) - if !ok { - var li int64 - if li, ok = lhs.(int64); !ok { - return 0, 0, false - } - lf = float64(li) - } - rf, ok = rhs.(float64) - if !ok { - var ri int64 - if ri, ok = rhs.(int64); !ok { - return 0, 0, false - } - rf = float64(ri) - } - return lf, rf, true -} diff -Nru influxdb-0.10.0+dfsg1/influxql/sanitize.go influxdb-1.1.1+dfsg1/influxql/sanitize.go --- influxdb-0.10.0+dfsg1/influxql/sanitize.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/sanitize.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,47 @@ +package influxql + +import ( + "bytes" + "regexp" +) + +var ( + sanitizeSetPassword = regexp.MustCompile(`(?i)password\s+for[^=]*=\s+(["']?[^\s"]+["']?)`) + + sanitizeCreatePassword = regexp.MustCompile(`(?i)with\s+password\s+(["']?[^\s"]+["']?)`) +) + +// Sanitize attempts to sanitize passwords out of a raw query. +// It looks for patterns that may be related to the SET PASSWORD and CREATE USER +// statements and will redact the password that should be there. It will attempt +// to redact information from common invalid queries too, but it's not guaranteed +// to succeed on improper queries. +// +// This function works on the raw query and attempts to retain the original input +// as much as possible. +func Sanitize(query string) string { + if matches := sanitizeSetPassword.FindAllStringSubmatchIndex(query, -1); matches != nil { + var buf bytes.Buffer + i := 0 + for _, match := range matches { + buf.WriteString(query[i:match[2]]) + buf.WriteString("[REDACTED]") + i = match[3] + } + buf.WriteString(query[i:]) + query = buf.String() + } + + if matches := sanitizeCreatePassword.FindAllStringSubmatchIndex(query, -1); matches != nil { + var buf bytes.Buffer + i := 0 + for _, match := range matches { + buf.WriteString(query[i:match[2]]) + buf.WriteString("[REDACTED]") + i = match[3] + } + buf.WriteString(query[i:]) + query = buf.String() + } + return query +} diff -Nru influxdb-0.10.0+dfsg1/influxql/sanitize_test.go influxdb-1.1.1+dfsg1/influxql/sanitize_test.go --- influxdb-0.10.0+dfsg1/influxql/sanitize_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/sanitize_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,49 @@ +package influxql_test + +import ( + "testing" + + "github.com/influxdata/influxdb/influxql" +) + +func TestSanitize(t *testing.T) { + var tests = []struct { + s string + stmt string + }{ + // Proper statements that should be redacted. + { + s: `create user "admin" with password 'admin'`, + stmt: `create user "admin" with password [REDACTED]`, + }, + { + s: `set password for "admin" = 'admin'`, + stmt: `set password for "admin" = [REDACTED]`, + }, + + // Common invalid statements that should still be redacted. + { + s: `create user "admin" with password "admin"`, + stmt: `create user "admin" with password [REDACTED]`, + }, + { + s: `set password for "admin" = "admin"`, + stmt: `set password for "admin" = [REDACTED]`, + }, + } + + for i, tt := range tests { + stmt := influxql.Sanitize(tt.s) + if tt.stmt != stmt { + t.Errorf("%d. %q\n\nsanitize mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) + } + } +} + +func BenchmarkSanitize(b *testing.B) { + b.ReportAllocs() + q := `create user "admin" with password 'admin'; set password for "admin" = 'admin'` + for i := 0; i < b.N; i++ { + influxql.Sanitize(q) + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/scanner.go influxdb-1.1.1+dfsg1/influxql/scanner.go --- influxdb-0.10.0+dfsg1/influxql/scanner.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/scanner.go 2016-12-06 21:36:15.000000000 +0000 @@ -6,7 +6,6 @@ "errors" "fmt" "io" - "strings" ) // Scanner represents a lexical scanner for InfluxQL. @@ -33,7 +32,7 @@ return s.scanWhitespace() } else if isLetter(ch0) || ch0 == '_' { s.r.unread() - return s.scanIdent() + return s.scanIdent(true) } else if isDigit(ch0) { return s.scanNumber() } @@ -44,7 +43,7 @@ return EOF, pos, "" case '"': s.r.unread() - return s.scanIdent() + return s.scanIdent(true) case '\'': return s.scanString() case '.': @@ -54,6 +53,12 @@ return s.scanNumber() } return DOT, pos, "" + case '$': + tok, _, lit = s.scanIdent(false) + if tok != IDENT { + return tok, pos, "$" + lit + } + return BOUNDPARAM, pos, "$" + lit case '+', '-': return s.scanNumber() case '*': @@ -96,6 +101,10 @@ case ';': return SEMICOLON, pos, "" case ':': + if ch1, _ := s.r.read(); ch1 == ':' { + return DOUBLECOLON, pos, "" + } + s.r.unread() return COLON, pos, "" } @@ -126,7 +135,7 @@ return WS, pos, buf.String() } -func (s *Scanner) scanIdent() (tok Token, pos Pos, lit string) { +func (s *Scanner) scanIdent(lookup bool) (tok Token, pos Pos, lit string) { // Save the starting position of the identifier. _, pos = s.r.read() s.r.unread() @@ -152,10 +161,11 @@ lit = buf.String() // If the literal matches a keyword then return that keyword. - if tok = Lookup(lit); tok != IDENT { - return tok, pos, "" + if lookup { + if tok = Lookup(lit); tok != IDENT { + return tok, pos, "" + } } - return IDENT, pos, lit } @@ -176,6 +186,7 @@ return STRING, pos, lit } +// ScanRegex consumes a token to find escapes func (s *Scanner) ScanRegex() (tok Token, pos Pos, lit string) { _, pos = s.r.curr() @@ -237,35 +248,48 @@ _, _ = buf.WriteString(s.scanDigits()) // If next code points are a full stop and digit then consume them. + isDecimal := false if ch0, _ := s.r.read(); ch0 == '.' { + isDecimal = true if ch1, _ := s.r.read(); isDigit(ch1) { _, _ = buf.WriteRune(ch0) _, _ = buf.WriteRune(ch1) _, _ = buf.WriteString(s.scanDigits()) } else { s.r.unread() - s.r.unread() } } else { s.r.unread() } - // Attempt to read as a duration if it doesn't have a fractional part. - if !strings.Contains(buf.String(), ".") { - // If the next rune is a duration unit (u,µ,ms,s) then return a duration token - if ch0, _ := s.r.read(); ch0 == 'u' || ch0 == 'µ' || ch0 == 's' || ch0 == 'h' || ch0 == 'd' || ch0 == 'w' { + // Read as a duration or integer if it doesn't have a fractional part. + if !isDecimal { + // If the next rune is a letter then this is a duration token. + if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' { _, _ = buf.WriteRune(ch0) - return DURATION_VAL, pos, buf.String() - } else if ch0 == 'm' { - _, _ = buf.WriteRune(ch0) - if ch1, _ := s.r.read(); ch1 == 's' { + for { + ch1, _ := s.r.read() + if !isLetter(ch1) && ch1 != 'µ' { + s.r.unread() + break + } _, _ = buf.WriteRune(ch1) - } else { - s.r.unread() } - return DURATION_VAL, pos, buf.String() + + // Continue reading digits and letters as part of this token. + for { + if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' || isDigit(ch0) { + _, _ = buf.WriteRune(ch0) + } else { + s.r.unread() + break + } + } + return DURATIONVAL, pos, buf.String() + } else { + s.r.unread() + return INTEGER, pos, buf.String() } - s.r.unread() } return NUMBER, pos, buf.String() } @@ -444,6 +468,7 @@ // eof is a marker code point to signify that the reader can't read any more. const eof = rune(0) +// ScanDelimited reads a delimited set of runes func ScanDelimited(r io.RuneScanner, start, end rune, escapes map[rune]rune, escapesPassThru bool) ([]byte, error) { // Scan start delimiter. if ch, _, err := r.ReadRune(); err != nil { diff -Nru influxdb-0.10.0+dfsg1/influxql/scanner_test.go influxdb-1.1.1+dfsg1/influxql/scanner_test.go --- influxdb-0.10.0+dfsg1/influxql/scanner_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/scanner_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,7 +5,7 @@ "strings" "testing" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/influxql" ) // Ensure the scanner can scan tokens correctly. @@ -57,6 +57,8 @@ {s: `.`, tok: influxql.DOT}, {s: `=~`, tok: influxql.EQREGEX}, {s: `!~`, tok: influxql.NEQREGEX}, + {s: `:`, tok: influxql.COLON}, + {s: `::`, tok: influxql.DOUBLECOLON}, // Identifiers {s: `foo`, tok: influxql.IDENT, lit: `foo`}, @@ -68,6 +70,8 @@ {s: `"foo\"bar\""`, tok: influxql.IDENT, lit: `foo"bar"`}, {s: `test"`, tok: influxql.BADSTRING, lit: "", pos: influxql.Pos{Line: 0, Char: 3}}, {s: `"test`, tok: influxql.BADSTRING, lit: `test`}, + {s: `$host`, tok: influxql.BOUNDPARAM, lit: `$host`}, + {s: `$"host param"`, tok: influxql.BOUNDPARAM, lit: `$host param`}, {s: `true`, tok: influxql.TRUE}, {s: `false`, tok: influxql.FALSE}, @@ -81,7 +85,8 @@ {s: `'test\g'`, tok: influxql.BADESCAPE, lit: `\g`, pos: influxql.Pos{Line: 0, Char: 6}}, // Numbers - {s: `100`, tok: influxql.NUMBER, lit: `100`}, + {s: `100`, tok: influxql.INTEGER, lit: `100`}, + {s: `-100`, tok: influxql.INTEGER, lit: `-100`}, {s: `100.23`, tok: influxql.NUMBER, lit: `100.23`}, {s: `+100.23`, tok: influxql.NUMBER, lit: `+100.23`}, {s: `-100.23`, tok: influxql.NUMBER, lit: `-100.23`}, @@ -95,15 +100,15 @@ {s: `10.3s`, tok: influxql.NUMBER, lit: `10.3`}, // Durations - {s: `10u`, tok: influxql.DURATION_VAL, lit: `10u`}, - {s: `10µ`, tok: influxql.DURATION_VAL, lit: `10µ`}, - {s: `10ms`, tok: influxql.DURATION_VAL, lit: `10ms`}, - {s: `-1s`, tok: influxql.DURATION_VAL, lit: `-1s`}, - {s: `10m`, tok: influxql.DURATION_VAL, lit: `10m`}, - {s: `10h`, tok: influxql.DURATION_VAL, lit: `10h`}, - {s: `10d`, tok: influxql.DURATION_VAL, lit: `10d`}, - {s: `10w`, tok: influxql.DURATION_VAL, lit: `10w`}, - {s: `10x`, tok: influxql.NUMBER, lit: `10`}, // non-duration unit + {s: `10u`, tok: influxql.DURATIONVAL, lit: `10u`}, + {s: `10µ`, tok: influxql.DURATIONVAL, lit: `10µ`}, + {s: `10ms`, tok: influxql.DURATIONVAL, lit: `10ms`}, + {s: `-1s`, tok: influxql.DURATIONVAL, lit: `-1s`}, + {s: `10m`, tok: influxql.DURATIONVAL, lit: `10m`}, + {s: `10h`, tok: influxql.DURATIONVAL, lit: `10h`}, + {s: `10d`, tok: influxql.DURATIONVAL, lit: `10d`}, + {s: `10w`, tok: influxql.DURATIONVAL, lit: `10w`}, + {s: `10x`, tok: influxql.DURATIONVAL, lit: `10x`}, // non-duration unit, but scanned as a duration value // Keywords {s: `ALL`, tok: influxql.ALL}, @@ -123,26 +128,23 @@ {s: `DURATION`, tok: influxql.DURATION}, {s: `END`, tok: influxql.END}, {s: `EVERY`, tok: influxql.EVERY}, - {s: `EXISTS`, tok: influxql.EXISTS}, {s: `EXPLAIN`, tok: influxql.EXPLAIN}, {s: `FIELD`, tok: influxql.FIELD}, {s: `FROM`, tok: influxql.FROM}, {s: `GRANT`, tok: influxql.GRANT}, {s: `GROUP`, tok: influxql.GROUP}, {s: `GROUPS`, tok: influxql.GROUPS}, - {s: `IF`, tok: influxql.IF}, - {s: `INNER`, tok: influxql.INNER}, {s: `INSERT`, tok: influxql.INSERT}, {s: `INTO`, tok: influxql.INTO}, {s: `KEY`, tok: influxql.KEY}, {s: `KEYS`, tok: influxql.KEYS}, + {s: `KILL`, tok: influxql.KILL}, {s: `LIMIT`, tok: influxql.LIMIT}, {s: `SHOW`, tok: influxql.SHOW}, {s: `SHARD`, tok: influxql.SHARD}, {s: `SHARDS`, tok: influxql.SHARDS}, {s: `MEASUREMENT`, tok: influxql.MEASUREMENT}, {s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS}, - {s: `NOT`, tok: influxql.NOT}, {s: `OFFSET`, tok: influxql.OFFSET}, {s: `ON`, tok: influxql.ON}, {s: `ORDER`, tok: influxql.ORDER}, @@ -159,8 +161,6 @@ {s: `REVOKE`, tok: influxql.REVOKE}, {s: `SELECT`, tok: influxql.SELECT}, {s: `SERIES`, tok: influxql.SERIES}, - {s: `SERVER`, tok: influxql.SERVER}, - {s: `SERVERS`, tok: influxql.SERVERS}, {s: `TAG`, tok: influxql.TAG}, {s: `TO`, tok: influxql.TO}, {s: `USER`, tok: influxql.USER}, diff -Nru influxdb-0.10.0+dfsg1/influxql/select.go influxdb-1.1.1+dfsg1/influxql/select.go --- influxdb-0.10.0+dfsg1/influxql/select.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/select.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,1058 @@ +package influxql + +import ( + "errors" + "fmt" + "sort" + "time" +) + +// SelectOptions are options that customize the select call. +type SelectOptions struct { + // The lower bound for a select call. + MinTime time.Time + + // The upper bound for a select call. + MaxTime time.Time + + // Node to exclusively read from. + // If zero, all nodes are used. + NodeID uint64 + + // An optional channel that, if closed, signals that the select should be + // interrupted. + InterruptCh <-chan struct{} + + // Maximum number of concurrent series. + MaxSeriesN int +} + +// Select executes stmt against ic and returns a list of iterators to stream from. +// +// Statements should have all rewriting performed before calling select(). This +// includes wildcard and source expansion. +func Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]Iterator, error) { + // Determine base options for iterators. + opt, err := newIteratorOptionsStmt(stmt, sopt) + if err != nil { + return nil, err + } + + // Retrieve refs for each call and var ref. + info := newSelectInfo(stmt) + if len(info.calls) > 1 && len(info.refs) > 0 { + return nil, errors.New("cannot select fields when selecting multiple aggregates") + } + + // Determine auxiliary fields to be selected. + opt.Aux = make([]VarRef, 0, len(info.refs)) + for ref := range info.refs { + opt.Aux = append(opt.Aux, *ref) + } + sort.Sort(VarRefs(opt.Aux)) + + // If there are multiple auxilary fields and no calls then construct an aux iterator. + if len(info.calls) == 0 && len(info.refs) > 0 { + return buildAuxIterators(stmt.Fields, ic, opt) + } + + // Include auxiliary fields from top() and bottom() + extraFields := 0 + for call := range info.calls { + if call.Name == "top" || call.Name == "bottom" { + for i := 1; i < len(call.Args)-1; i++ { + ref := call.Args[i].(*VarRef) + opt.Aux = append(opt.Aux, *ref) + extraFields++ + } + } + } + + fields := stmt.Fields + if extraFields > 0 { + // Rebuild the list of fields if any extra fields are being implicitly added + fields = make([]*Field, 0, len(stmt.Fields)+extraFields) + for _, f := range stmt.Fields { + fields = append(fields, f) + switch expr := f.Expr.(type) { + case *Call: + if expr.Name == "top" || expr.Name == "bottom" { + for i := 1; i < len(expr.Args)-1; i++ { + fields = append(fields, &Field{Expr: expr.Args[i]}) + } + } + } + } + } + + // Determine if there is one call and it is a selector. + selector := false + if len(info.calls) == 1 { + for call := range info.calls { + switch call.Name { + case "first", "last", "min", "max", "percentile": + selector = true + } + } + } + + return buildFieldIterators(fields, ic, opt, selector) +} + +// buildAuxIterators creates a set of iterators from a single combined auxilary iterator. +func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) { + // Create iterator to read auxilary fields. + input, err := ic.CreateIterator(opt) + if err != nil { + return nil, err + } else if input == nil { + input = &nilFloatIterator{} + } + + // Filter out duplicate rows, if required. + if opt.Dedupe { + // If there is no group by and it is a float iterator, see if we can use a fast dedupe. + if itr, ok := input.(FloatIterator); ok && len(opt.Dimensions) == 0 { + if sz := len(fields); sz > 0 && sz < 3 { + input = newFloatFastDedupeIterator(itr) + } else { + input = NewDedupeIterator(itr) + } + } else { + input = NewDedupeIterator(input) + } + } + + // Apply limit & offset. + if opt.Limit > 0 || opt.Offset > 0 { + input = NewLimitIterator(input, opt) + } + + // Wrap in an auxilary iterator to separate the fields. + aitr := NewAuxIterator(input, opt) + + // Generate iterators for each field. + itrs := make([]Iterator, len(fields)) + if err := func() error { + for i, f := range fields { + expr := Reduce(f.Expr, nil) + switch expr := expr.(type) { + case *VarRef: + itrs[i] = aitr.Iterator(expr.Val, expr.Type) + case *BinaryExpr: + itr, err := buildExprIterator(expr, aitr, opt, false) + if err != nil { + return fmt.Errorf("error constructing iterator for field '%s': %s", f.String(), err) + } + itrs[i] = itr + default: + return fmt.Errorf("invalid expression type: %T", expr) + } + } + return nil + }(); err != nil { + Iterators(Iterators(itrs).filterNonNil()).Close() + aitr.Close() + return nil, err + } + + // Background the primary iterator since there is no reader for it. + aitr.Background() + + return itrs, nil +} + +// buildFieldIterators creates an iterator for each field expression. +func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, selector bool) ([]Iterator, error) { + // Create iterators from fields against the iterator creator. + itrs := make([]Iterator, len(fields)) + + if err := func() error { + hasAuxFields := false + + var input Iterator + for i, f := range fields { + // Build iterators for calls first and save the iterator. + // We do this so we can keep the ordering provided by the user, but + // still build the Call's iterator first. + if ContainsVarRef(f.Expr) { + hasAuxFields = true + continue + } + + expr := Reduce(f.Expr, nil) + itr, err := buildExprIterator(expr, ic, opt, selector) + if err != nil { + return err + } + itrs[i] = itr + input = itr + } + + if input == nil || !hasAuxFields { + return nil + } + + // Build the aux iterators. Previous validation should ensure that only one + // call was present so we build an AuxIterator from that input. + aitr := NewAuxIterator(input, opt) + for i, f := range fields { + if itrs[i] != nil { + itrs[i] = aitr + continue + } + + expr := Reduce(f.Expr, nil) + itr, err := buildExprIterator(expr, aitr, opt, false) + if err != nil { + return err + } + itrs[i] = itr + } + aitr.Start() + return nil + + }(); err != nil { + Iterators(Iterators(itrs).filterNonNil()).Close() + return nil, err + } + + // If there is a limit or offset then apply it. + if opt.Limit > 0 || opt.Offset > 0 { + for i := range itrs { + itrs[i] = NewLimitIterator(itrs[i], opt) + } + } + + return itrs, nil +} + +// buildExprIterator creates an iterator for an expression. +func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions, selector bool) (Iterator, error) { + opt.Expr = expr + + switch expr := expr.(type) { + case *VarRef: + itr, err := ic.CreateIterator(opt) + if err != nil { + return nil, err + } else if itr == nil { + itr = &nilFloatIterator{} + } + return itr, nil + case *Call: + // FIXME(benbjohnson): Validate that only calls with 1 arg are passed to IC. + + switch expr.Name { + case "distinct": + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, selector) + if err != nil { + return nil, err + } + input, err = NewDistinctIterator(input, opt) + if err != nil { + return nil, err + } + return NewIntervalIterator(input, opt), nil + case "sample": + input, err := buildExprIterator(expr.Args[0], ic, opt, selector) + if err != nil { + return nil, err + } + size := expr.Args[1].(*IntegerLiteral) + + return newSampleIterator(input, opt, int(size.Val)) + case "holt_winters", "holt_winters_with_fit": + input, err := buildExprIterator(expr.Args[0], ic, opt, selector) + if err != nil { + return nil, err + } + h := expr.Args[1].(*IntegerLiteral) + m := expr.Args[2].(*IntegerLiteral) + + includeFitData := "holt_winters_with_fit" == expr.Name + + interval := opt.Interval.Duration + // Redifine interval to be unbounded to capture all aggregate results + opt.StartTime = MinTime + opt.EndTime = MaxTime + opt.Interval = Interval{} + + return newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval) + case "derivative", "non_negative_derivative", "difference", "moving_average", "elapsed": + if !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) + } else { + opt.EndTime += int64(opt.Interval.Duration) + } + } + + input, err := buildExprIterator(expr.Args[0], ic, opt, selector) + if err != nil { + return nil, err + } + + switch expr.Name { + case "derivative", "non_negative_derivative": + interval := opt.DerivativeInterval() + isNonNegative := (expr.Name == "non_negative_derivative") + return newDerivativeIterator(input, opt, interval, isNonNegative) + case "elapsed": + interval := opt.ElapsedInterval() + return newElapsedIterator(input, opt, interval) + case "difference": + return newDifferenceIterator(input, opt) + case "moving_average": + n := expr.Args[1].(*IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + return newMovingAverageIterator(input, int(n.Val), opt) + } + panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) + case "cumulative_sum": + input, err := buildExprIterator(expr.Args[0], ic, opt, selector) + if err != nil { + return nil, err + } + return newCumulativeSumIterator(input, opt) + default: + itr, err := func() (Iterator, error) { + switch expr.Name { + case "count": + switch arg := expr.Args[0].(type) { + case *Call: + if arg.Name == "distinct" { + input, err := buildExprIterator(arg, ic, opt, selector) + if err != nil { + return nil, err + } + return newCountIterator(input, opt) + } + } + + itr, err := ic.CreateIterator(opt) + if err != nil { + return nil, err + } else if itr == nil { + itr = &nilFloatIterator{} + } + return itr, nil + case "min", "max", "sum", "first", "last", "mean": + itr, err := ic.CreateIterator(opt) + if err != nil { + return nil, err + } else if itr == nil { + itr = &nilFloatIterator{} + } + return itr, nil + case "median": + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + return newMedianIterator(input, opt) + case "mode": + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + return NewModeIterator(input, opt) + case "stddev": + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + return newStddevIterator(input, opt) + case "spread": + // OPTIMIZE(benbjohnson): convert to map/reduce + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + return newSpreadIterator(input, opt) + case "top": + var tags []int + if len(expr.Args) < 2 { + return nil, fmt.Errorf("top() requires 2 or more arguments, got %d", len(expr.Args)) + } else if len(expr.Args) > 2 { + // We need to find the indices of where the tag values are stored in Aux + // This section is O(n^2), but for what should be a low value. + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*VarRef) + for index, aux := range opt.Aux { + if aux.Val == ref.Val { + tags = append(tags, index) + break + } + } + } + } + + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) + return newTopIterator(input, opt, n, tags) + case "bottom": + var tags []int + if len(expr.Args) < 2 { + return nil, fmt.Errorf("bottom() requires 2 or more arguments, got %d", len(expr.Args)) + } else if len(expr.Args) > 2 { + // We need to find the indices of where the tag values are stored in Aux + // This section is O(n^2), but for what should be a low value. + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*VarRef) + for index, aux := range opt.Aux { + if aux.Val == ref.Val { + tags = append(tags, index) + break + } + } + } + } + + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) + return newBottomIterator(input, opt, n, tags) + case "percentile": + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) + if err != nil { + return nil, err + } + var percentile float64 + switch arg := expr.Args[1].(type) { + case *NumberLiteral: + percentile = arg.Val + case *IntegerLiteral: + percentile = float64(arg.Val) + } + return newPercentileIterator(input, opt, percentile) + default: + return nil, fmt.Errorf("unsupported call: %s", expr.Name) + } + }() + + if err != nil { + return nil, err + } + + if !selector || !opt.Interval.IsZero() { + if expr.Name != "top" && expr.Name != "bottom" { + itr = NewIntervalIterator(itr, opt) + } + if !opt.Interval.IsZero() && opt.Fill != NoFill { + itr = NewFillIterator(itr, expr, opt) + } + } + if opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return itr, nil + } + case *BinaryExpr: + if rhs, ok := expr.RHS.(Literal); ok { + // The right hand side is a literal. It is more common to have the RHS be a literal, + // so we check that one first and have this be the happy path. + if lhs, ok := expr.LHS.(Literal); ok { + // We have two literals that couldn't be combined by Reduce. + return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) + } + + lhs, err := buildExprIterator(expr.LHS, ic, opt, false) + if err != nil { + return nil, err + } + return buildRHSTransformIterator(lhs, rhs, expr.Op, ic, opt) + } else if lhs, ok := expr.LHS.(Literal); ok { + rhs, err := buildExprIterator(expr.RHS, ic, opt, false) + if err != nil { + return nil, err + } + return buildLHSTransformIterator(lhs, rhs, expr.Op, ic, opt) + } else { + // We have two iterators. Combine them into a single iterator. + lhs, err := buildExprIterator(expr.LHS, ic, opt, false) + if err != nil { + return nil, err + } + rhs, err := buildExprIterator(expr.RHS, ic, opt, false) + if err != nil { + return nil, err + } + return buildTransformIterator(lhs, rhs, expr.Op, ic, opt) + } + case *ParenExpr: + return buildExprIterator(expr.Expr, ic, opt, selector) + default: + return nil, fmt.Errorf("invalid expression type: %T", expr) + } +} + +func buildRHSTransformIterator(lhs Iterator, rhs Literal, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { + fn := binaryExprFunc(iteratorDataType(lhs), literalDataType(rhs), op) + switch fn := fn.(type) { + case func(float64, float64) float64: + var input FloatIterator + switch lhs := lhs.(type) { + case FloatIterator: + input = lhs + case IntegerIterator: + input = &integerFloatCastIterator{input: lhs} + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a FloatIterator", lhs) + } + + var val float64 + switch rhs := rhs.(type) { + case *NumberLiteral: + val = rhs.Val + case *IntegerLiteral: + val = float64(rhs.Val) + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a NumberLiteral", rhs) + } + return &floatTransformIterator{ + input: input, + fn: func(p *FloatPoint) *FloatPoint { + if p == nil { + return nil + } else if p.Nil { + return p + } + p.Value = fn(p.Value, val) + return p + }, + }, nil + case func(int64, int64) float64: + input, ok := lhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerIterator", lhs) + } + + var val int64 + switch rhs := rhs.(type) { + case *IntegerLiteral: + val = rhs.Val + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a IntegerLiteral", rhs) + } + return &integerFloatTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *FloatPoint { + if p == nil { + return nil + } + + fp := &FloatPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + fp.Nil = true + } else { + fp.Value = fn(p.Value, val) + } + return fp + }, + }, nil + case func(float64, float64) bool: + var input FloatIterator + switch lhs := lhs.(type) { + case FloatIterator: + input = lhs + case IntegerIterator: + input = &integerFloatCastIterator{input: lhs} + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a FloatIterator", lhs) + } + + var val float64 + switch rhs := rhs.(type) { + case *NumberLiteral: + val = rhs.Val + case *IntegerLiteral: + val = float64(rhs.Val) + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a NumberLiteral", rhs) + } + return &floatBoolTransformIterator{ + input: input, + fn: func(p *FloatPoint) *BooleanPoint { + if p == nil { + return nil + } + + bp := &BooleanPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + bp.Nil = true + } else { + bp.Value = fn(p.Value, val) + } + return bp + }, + }, nil + case func(int64, int64) int64: + var input IntegerIterator + switch lhs := lhs.(type) { + case IntegerIterator: + input = lhs + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as an IntegerIterator", lhs) + } + + var val int64 + switch rhs := rhs.(type) { + case *IntegerLiteral: + val = rhs.Val + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as an IntegerLiteral", rhs) + } + return &integerTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *IntegerPoint { + if p == nil { + return nil + } else if p.Nil { + return p + } + p.Value = fn(p.Value, val) + return p + }, + }, nil + case func(int64, int64) bool: + var input IntegerIterator + switch lhs := lhs.(type) { + case IntegerIterator: + input = lhs + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as an IntegerIterator", lhs) + } + + var val int64 + switch rhs := rhs.(type) { + case *IntegerLiteral: + val = rhs.Val + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as an IntegerLiteral", rhs) + } + return &integerBoolTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *BooleanPoint { + if p == nil { + return nil + } + + bp := &BooleanPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + bp.Nil = true + } else { + bp.Value = fn(p.Value, val) + } + return bp + }, + }, nil + } + return nil, fmt.Errorf("unable to construct rhs transform iterator from %T and %T", lhs, rhs) +} + +func buildLHSTransformIterator(lhs Literal, rhs Iterator, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { + fn := binaryExprFunc(literalDataType(lhs), iteratorDataType(rhs), op) + switch fn := fn.(type) { + case func(float64, float64) float64: + var input FloatIterator + switch rhs := rhs.(type) { + case FloatIterator: + input = rhs + case IntegerIterator: + input = &integerFloatCastIterator{input: rhs} + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a FloatIterator", rhs) + } + + var val float64 + switch lhs := lhs.(type) { + case *NumberLiteral: + val = lhs.Val + case *IntegerLiteral: + val = float64(lhs.Val) + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a NumberLiteral", lhs) + } + return &floatTransformIterator{ + input: input, + fn: func(p *FloatPoint) *FloatPoint { + if p == nil { + return nil + } else if p.Nil { + return p + } + p.Value = fn(val, p.Value) + return p + }, + }, nil + case func(int64, int64) float64: + input, ok := rhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a IntegerIterator", lhs) + } + + var val int64 + switch lhs := lhs.(type) { + case *IntegerLiteral: + val = lhs.Val + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerLiteral", rhs) + } + return &integerFloatTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *FloatPoint { + if p == nil { + return nil + } + + fp := &FloatPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + fp.Nil = true + } else { + fp.Value = fn(val, p.Value) + } + return fp + }, + }, nil + case func(float64, float64) bool: + var input FloatIterator + switch rhs := rhs.(type) { + case FloatIterator: + input = rhs + case IntegerIterator: + input = &integerFloatCastIterator{input: rhs} + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a FloatIterator", rhs) + } + + var val float64 + switch lhs := lhs.(type) { + case *NumberLiteral: + val = lhs.Val + case *IntegerLiteral: + val = float64(lhs.Val) + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a NumberLiteral", lhs) + } + return &floatBoolTransformIterator{ + input: input, + fn: func(p *FloatPoint) *BooleanPoint { + if p == nil { + return nil + } + + bp := &BooleanPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + bp.Nil = true + } else { + bp.Value = fn(val, p.Value) + } + return bp + }, + }, nil + case func(int64, int64) int64: + var input IntegerIterator + switch rhs := rhs.(type) { + case IntegerIterator: + input = rhs + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as an IntegerIterator", rhs) + } + + var val int64 + switch lhs := lhs.(type) { + case *IntegerLiteral: + val = lhs.Val + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as an IntegerLiteral", lhs) + } + return &integerTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *IntegerPoint { + if p == nil { + return nil + } else if p.Nil { + return p + } + p.Value = fn(val, p.Value) + return p + }, + }, nil + case func(int64, int64) bool: + var input IntegerIterator + switch rhs := rhs.(type) { + case IntegerIterator: + input = rhs + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as an IntegerIterator", rhs) + } + + var val int64 + switch lhs := lhs.(type) { + case *IntegerLiteral: + val = lhs.Val + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as an IntegerLiteral", lhs) + } + return &integerBoolTransformIterator{ + input: input, + fn: func(p *IntegerPoint) *BooleanPoint { + if p == nil { + return nil + } + + bp := &BooleanPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Aux: p.Aux, + } + if p.Nil { + bp.Nil = true + } else { + bp.Value = fn(val, p.Value) + } + return bp + }, + }, nil + } + return nil, fmt.Errorf("unable to construct lhs transform iterator from %T and %T", lhs, rhs) +} + +func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { + fn := binaryExprFunc(iteratorDataType(lhs), iteratorDataType(rhs), op) + switch fn := fn.(type) { + case func(float64, float64) float64: + var left FloatIterator + switch lhs := lhs.(type) { + case FloatIterator: + left = lhs + case IntegerIterator: + left = &integerFloatCastIterator{input: lhs} + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a FloatIterator", lhs) + } + + var right FloatIterator + switch rhs := rhs.(type) { + case FloatIterator: + right = rhs + case IntegerIterator: + right = &integerFloatCastIterator{input: rhs} + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a FloatIterator", rhs) + } + return newFloatExprIterator(left, right, opt, fn), nil + case func(int64, int64) float64: + left, ok := lhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerIterator", lhs) + } + right, ok := rhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a IntegerIterator", rhs) + } + return newIntegerFloatExprIterator(left, right, opt, fn), nil + case func(int64, int64) int64: + left, ok := lhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerIterator", lhs) + } + right, ok := rhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a IntegerIterator", rhs) + } + return newIntegerExprIterator(left, right, opt, fn), nil + case func(float64, float64) bool: + var left FloatIterator + switch lhs := lhs.(type) { + case FloatIterator: + left = lhs + case IntegerIterator: + left = &integerFloatCastIterator{input: lhs} + default: + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a FloatIterator", lhs) + } + + var right FloatIterator + switch rhs := rhs.(type) { + case FloatIterator: + right = rhs + case IntegerIterator: + right = &integerFloatCastIterator{input: rhs} + default: + return nil, fmt.Errorf("type mismatch on RHS, unable to use %T as a FloatIterator", rhs) + } + return newFloatBooleanExprIterator(left, right, opt, fn), nil + case func(int64, int64) bool: + left, ok := lhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerIterator", lhs) + } + right, ok := rhs.(IntegerIterator) + if !ok { + return nil, fmt.Errorf("type mismatch on LHS, unable to use %T as a IntegerIterator", rhs) + } + return newIntegerBooleanExprIterator(left, right, opt, fn), nil + } + return nil, fmt.Errorf("unable to construct transform iterator from %T and %T", lhs, rhs) +} + +func iteratorDataType(itr Iterator) DataType { + switch itr.(type) { + case FloatIterator: + return Float + case IntegerIterator: + return Integer + case StringIterator: + return String + case BooleanIterator: + return Boolean + default: + return Unknown + } +} + +func literalDataType(lit Literal) DataType { + switch lit.(type) { + case *NumberLiteral: + return Float + case *IntegerLiteral: + return Integer + case *StringLiteral: + return String + case *BooleanLiteral: + return Boolean + default: + return Unknown + } +} + +func binaryExprFunc(typ1 DataType, typ2 DataType, op Token) interface{} { + var fn interface{} + switch typ1 { + case Float: + fn = floatBinaryExprFunc(op) + case Integer: + switch typ2 { + case Float: + fn = floatBinaryExprFunc(op) + default: + fn = integerBinaryExprFunc(op) + } + } + return fn +} + +func floatBinaryExprFunc(op Token) interface{} { + switch op { + case ADD: + return func(lhs, rhs float64) float64 { return lhs + rhs } + case SUB: + return func(lhs, rhs float64) float64 { return lhs - rhs } + case MUL: + return func(lhs, rhs float64) float64 { return lhs * rhs } + case DIV: + return func(lhs, rhs float64) float64 { + if rhs == 0 { + return float64(0) + } + return lhs / rhs + } + case EQ: + return func(lhs, rhs float64) bool { return lhs == rhs } + case NEQ: + return func(lhs, rhs float64) bool { return lhs != rhs } + case LT: + return func(lhs, rhs float64) bool { return lhs < rhs } + case LTE: + return func(lhs, rhs float64) bool { return lhs <= rhs } + case GT: + return func(lhs, rhs float64) bool { return lhs > rhs } + case GTE: + return func(lhs, rhs float64) bool { return lhs >= rhs } + } + return nil +} + +func integerBinaryExprFunc(op Token) interface{} { + switch op { + case ADD: + return func(lhs, rhs int64) int64 { return lhs + rhs } + case SUB: + return func(lhs, rhs int64) int64 { return lhs - rhs } + case MUL: + return func(lhs, rhs int64) int64 { return lhs * rhs } + case DIV: + return func(lhs, rhs int64) float64 { + if rhs == 0 { + return float64(0) + } + return float64(lhs) / float64(rhs) + } + case EQ: + return func(lhs, rhs int64) bool { return lhs == rhs } + case NEQ: + return func(lhs, rhs int64) bool { return lhs != rhs } + case LT: + return func(lhs, rhs int64) bool { return lhs < rhs } + case LTE: + return func(lhs, rhs int64) bool { return lhs <= rhs } + case GT: + return func(lhs, rhs int64) bool { return lhs > rhs } + case GTE: + return func(lhs, rhs int64) bool { return lhs >= rhs } + } + return nil +} + +// stringSetSlice returns a sorted slice of keys from a string set. +func stringSetSlice(m map[string]struct{}) []string { + if m == nil { + return nil + } + + a := make([]string, 0, len(m)) + for k := range m { + a = append(a, k) + } + sort.Strings(a) + return a +} diff -Nru influxdb-0.10.0+dfsg1/influxql/select_test.go influxdb-1.1.1+dfsg1/influxql/select_test.go --- influxdb-0.10.0+dfsg1/influxql/select_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/select_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,2962 @@ +package influxql_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Second represents a helper for type converting durations. +const Second = int64(time.Second) + +// Ensure a SELECT min() query can be executed. +func TestSelect_Min(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + if !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) { + t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) + } + + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT min(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100, Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT distinct() query can be executed. +func TestSelect_Distinct_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT distinct() query can be executed. +func TestSelect_Distinct_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT distinct() query can be executed. +func TestSelect_Distinct_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "c"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: "d"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT distinct() query can be executed. +func TestSelect_Distinct_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mean() query can be executed. +func TestSelect_Mean_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 3.2, Aggregated: 5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mean() query can be executed. +func TestSelect_Mean_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 3.2, Aggregated: 5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mean() query cannot be executed on strings. +func TestSelect_Mean_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&StringIterator{}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err == nil || err.Error() != "unsupported mean iterator type: *influxql_test.StringIterator" { + t.Errorf("unexpected error: %s", err) + } + + if itrs != nil { + influxql.Iterators(itrs).Close() + } +} + +// Ensure a SELECT mean() query cannot be executed on booleans. +func TestSelect_Mean_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&BooleanIterator{}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err == nil || err.Error() != "unsupported mean iterator type: *influxql_test.BooleanIterator" { + t.Errorf("unexpected error: %s", err) + } + + if itrs != nil { + influxql.Iterators(itrs).Close() + } +} + +// Ensure a SELECT median() query can be executed. +func TestSelect_Median_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19.5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2.5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT median() query can be executed. +func TestSelect_Median_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19.5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2.5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT median() query cannot be executed on strings. +func TestSelect_Median_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &StringIterator{}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err == nil || err.Error() != "unsupported median iterator type: *influxql_test.StringIterator" { + t.Errorf("unexpected error: %s", err) + } + + if itrs != nil { + influxql.Iterators(itrs).Close() + } +} + +// Ensure a SELECT median() query cannot be executed on booleans. +func TestSelect_Median_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &BooleanIterator{}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err == nil || err.Error() != "unsupported median iterator type: *influxql_test.BooleanIterator" { + t.Errorf("unexpected error: %s", err) + } + + if itrs != nil { + influxql.Iterators(itrs).Close() + } +} + +// Ensure a SELECT mode() query can be executed. +func TestSelect_Mode_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mode() query can be executed. +func TestSelect_Mode_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mode() query cannot be executed on strings. +func TestSelect_Mode_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "cxxx"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: "zzzz"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 7 * Second, Value: "zzzz"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 8 * Second, Value: "zxxx"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: "zzzz"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: "d"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT mode() query cannot be executed on booleans. +func TestSelect_Mode_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 2 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed. +func TestSelect_Top_NoTags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed. +func TestSelect_Top_NoTags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed with tags. +func TestSelect_Top_Tags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "B"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 30 * Second, Value: 5, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "B"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed with tags. +func TestSelect_Top_Tags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 10, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "B"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 30 * Second, Value: 5, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "B"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed with tags and group by. +func TestSelect_Top_GroupByTags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: 19, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: "A"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT top() query can be executed with tags and group by. +func TestSelect_Top_GroupByTags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::integer, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: 19, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: "A"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed. +func TestSelect_Bottom_NoTags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed. +func TestSelect_Bottom_NoTags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::integer, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed with tags. +func TestSelect_Bottom_Tags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 2, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "B"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 30 * Second, Value: 1, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "B"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "A"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed with tags. +func TestSelect_Bottom_Tags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 2, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 10, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 0 * Second, Value: "B"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 30 * Second, Value: 1, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "B"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Time: 30 * Second, Value: 100, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Time: 30 * Second, Value: "A"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed with tags and group by. +func TestSelect_Bottom_GroupByTags_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: 2, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: 3, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: 1, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: "B"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT bottom() query can be executed with tags and group by. +func TestSelect_Bottom_GroupByTags_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: 2, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=east"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: 3, Aux: []interface{}{"A"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 0 * Second, Value: "A"}, + }, + { + &influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: 1, Aux: []interface{}{"B"}}, + &influxql.StringPoint{Name: "cpu", Tags: ParseTags("region=west"), Time: 30 * Second, Value: "B"}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT query with a fill(null) statement can be executed. +func TestSelect_Fill_Null_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(null)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Nil: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT query with a fill() statement can be executed. +func TestSelect_Fill_Number_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(1)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Value: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT query with a fill(previous) statement can be executed. +func TestSelect_Fill_Previous_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(previous)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT query with a fill(linear) statement can be executed. +func TestSelect_Fill_Linear_Float_One(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 4, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Nil: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Fill_Linear_Float_Many(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 62 * Second, Value: 7}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Value: 6}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 60 * Second, Value: 7, Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT query with a fill(linear) statement can be executed for integers. +func TestSelect_Fill_Linear_Integer_One(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 1, Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 4, Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Nil: true}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Nil: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Fill_Linear_Integer_Many(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Nil: true}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 1, Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 60 * Second, Value: 8}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 70 * Second, Value: 10, Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT stddev() query can be executed. +func TestSelect_Stddev_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 0.7071067811865476}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 0.7071067811865476}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 1.5811388300841898}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT stddev() query can be executed. +func TestSelect_Stddev_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 0.7071067811865476}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 0.7071067811865476}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 1.5811388300841898}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT spread() query can be executed. +func TestSelect_Spread_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 0}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 0}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT spread() query can be executed. +func TestSelect_Spread_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 0}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 0}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT percentile() query can be executed. +func TestSelect_Percentile_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 9}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT percentile() query can be executed. +func TestSelect_Percentile_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 9}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT sample() query can be executed. +func TestSelect_Sample_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: 19}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 15 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT sample() query can be executed. +func TestSelect_Sample_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5 * Second, Value: 10}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: 19}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 15 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT sample() query can be executed. +func TestSelect_Sample_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: true}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 15 * Second, Value: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT sample() query can be executed. +func TestSelect_Sample_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: "c"}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: "d"}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5 * Second, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 15 * Second, Value: "d"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a simple raw SELECT statement can be executed. +func TestSelect_Raw(t *testing.T) { + // Mock two iterators -- one for each value in the query. + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + if !reflect.DeepEqual(opt.Aux, []influxql.VarRef{{Val: "v1", Type: influxql.Float}, {Val: "v2", Type: influxql.Float}}) { + t.Fatalf("unexpected options: %s", spew.Sdump(opt.Expr)) + + } + return &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Aux: []interface{}{float64(1), nil}}, + {Time: 1, Aux: []interface{}{nil, float64(2)}}, + {Time: 5, Aux: []interface{}{float64(3), float64(4)}}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT v1::float, v2::float FROM cpu`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Time: 0, Value: 1}, + &influxql.FloatPoint{Time: 0, Nil: true}, + }, + { + &influxql.FloatPoint{Time: 1, Nil: true}, + &influxql.FloatPoint{Time: 1, Value: 2}, + }, + { + &influxql.FloatPoint{Time: 5, Value: 3}, + &influxql.FloatPoint{Time: 5, Value: 4}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure a SELECT binary expr queries can be executed as floats. +func TestSelect_BinaryExpr_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + makeAuxFields := func(value float64) []interface{} { + aux := make([]interface{}, len(opt.Aux)) + for i := range aux { + aux[i] = value + } + return aux + } + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20, Aux: makeAuxFields(20)}, + {Name: "cpu", Time: 5 * Second, Value: 10, Aux: makeAuxFields(10)}, + {Name: "cpu", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + + for _, test := range []struct { + Name string + Statement string + Points [][]influxql.Point + }{ + { + Name: "rhs binary add number", + Statement: `SELECT value + 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "rhs binary add integer", + Statement: `SELECT value + 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "lhs binary add number", + Statement: `SELECT 2.0 + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "lhs binary add integer", + Statement: `SELECT 2 + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "two variable binary add", + Statement: `SELECT value + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "rhs binary multiply number", + Statement: `SELECT value * 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "rhs binary multiply integer", + Statement: `SELECT value * 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "lhs binary multiply number", + Statement: `SELECT 2.0 * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "lhs binary multiply integer", + Statement: `SELECT 2 * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "two variable binary multiply", + Statement: `SELECT value * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 400}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 100}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 361}}, + }, + }, + { + Name: "rhs binary subtract number", + Statement: `SELECT value - 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 17}}, + }, + }, + { + Name: "rhs binary subtract integer", + Statement: `SELECT value - 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 17}}, + }, + }, + { + Name: "lhs binary subtract number", + Statement: `SELECT 2.0 - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: -18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: -8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: -17}}, + }, + }, + { + Name: "lhs binary subtract integer", + Statement: `SELECT 2 - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: -18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: -8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: -17}}, + }, + }, + { + Name: "two variable binary subtract", + Statement: `SELECT value - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 0}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 0}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 0}}, + }, + }, + { + Name: "rhs binary division number", + Statement: `SELECT value / 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: float64(19) / 2}}, + }, + }, + { + Name: "rhs binary division integer", + Statement: `SELECT value / 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: float64(19) / 2}}, + }, + }, + { + Name: "lhs binary division number", + Statement: `SELECT 38.0 / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1.9}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 3.8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 2}}, + }, + }, + { + Name: "lhs binary division integer", + Statement: `SELECT 38 / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1.9}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 3.8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 2}}, + }, + }, + { + Name: "two variable binary division", + Statement: `SELECT value / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 1}}, + }, + }, + } { + stmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic) + if err != nil { + t.Errorf("%s: rewrite error: %s", test.Name, err) + } + + itrs, err := influxql.Select(stmt, &ic, nil) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if !deep.Equal(a, test.Points) { + t.Errorf("%s: unexpected points: %s", test.Name, spew.Sdump(a)) + } + } +} + +// Ensure a SELECT binary expr queries can be executed as integers. +func TestSelect_BinaryExpr_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + makeAuxFields := func(value int64) []interface{} { + aux := make([]interface{}, len(opt.Aux)) + for i := range aux { + aux[i] = value + } + return aux + } + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20, Aux: makeAuxFields(20)}, + {Name: "cpu", Time: 5 * Second, Value: 10, Aux: makeAuxFields(10)}, + {Name: "cpu", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + return map[string]influxql.DataType{"value": influxql.Integer}, nil, nil + } + + for _, test := range []struct { + Name string + Statement string + Points [][]influxql.Point + }{ + { + Name: "rhs binary add number", + Statement: `SELECT value + 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "rhs binary add integer", + Statement: `SELECT value + 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "lhs binary add number", + Statement: `SELECT 2.0 + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "lhs binary add integer", + Statement: `SELECT 2 + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 22}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 12}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 21}}, + }, + }, + { + Name: "two variable binary add", + Statement: `SELECT value + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "rhs binary multiply number", + Statement: `SELECT value * 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "rhs binary multiply integer", + Statement: `SELECT value * 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "lhs binary multiply number", + Statement: `SELECT 2.0 * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "lhs binary multiply integer", + Statement: `SELECT 2 * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 40}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 38}}, + }, + }, + { + Name: "two variable binary multiply", + Statement: `SELECT value * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 400}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 100}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 361}}, + }, + }, + { + Name: "rhs binary subtract number", + Statement: `SELECT value - 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 17}}, + }, + }, + { + Name: "rhs binary subtract integer", + Statement: `SELECT value - 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 18}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 8}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 17}}, + }, + }, + { + Name: "lhs binary subtract number", + Statement: `SELECT 2.0 - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: -18}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: -8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: -17}}, + }, + }, + { + Name: "lhs binary subtract integer", + Statement: `SELECT 2 - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: -18}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: -8}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: -17}}, + }, + }, + { + Name: "two variable binary subtract", + Statement: `SELECT value - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 0}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5 * Second, Value: 0}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 9 * Second, Value: 0}}, + }, + }, + { + Name: "rhs binary division number", + Statement: `SELECT value / 2.0 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 9.5}}, + }, + }, + { + Name: "rhs binary division integer", + Statement: `SELECT value / 2 FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: float64(19) / 2}}, + }, + }, + { + Name: "lhs binary division number", + Statement: `SELECT 38.0 / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1.9}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 3.8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 2.0}}, + }, + }, + { + Name: "lhs binary division integer", + Statement: `SELECT 38 / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1.9}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 3.8}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 2}}, + }, + }, + { + Name: "two variable binary division", + Statement: `SELECT value / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 1}}, + }, + }, + } { + stmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic) + if err != nil { + t.Errorf("%s: rewrite error: %s", test.Name, err) + } + + itrs, err := influxql.Select(stmt, &ic, nil) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if !deep.Equal(a, test.Points) { + t.Errorf("%s: unexpected points: %s", test.Name, spew.Sdump(a)) + } + } +} + +// Ensure a SELECT binary expr queries can be executed on mixed iterators. +func TestSelect_BinaryExpr_Mixed(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), int64(10)}}, + {Name: "cpu", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), int64(15)}}, + {Name: "cpu", Time: 9 * Second, Value: 19, Aux: []interface{}{float64(19), int64(5)}}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + return map[string]influxql.DataType{ + "total": influxql.Float, + "value": influxql.Integer, + }, nil, nil + } + + for _, test := range []struct { + Name string + Statement string + Points [][]influxql.Point + }{ + { + Name: "mixed binary add", + Statement: `SELECT total + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 30}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 24}}, + }, + }, + { + Name: "mixed binary subtract", + Statement: `SELECT total - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: -5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 14}}, + }, + }, + { + Name: "mixed binary multiply", + Statement: `SELECT total * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 200}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 150}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: 95}}, + }, + }, + { + Name: "mixed binary division", + Statement: `SELECT total / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: float64(10) / float64(15)}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Value: float64(19) / float64(5)}}, + }, + }, + } { + stmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic) + if err != nil { + t.Errorf("%s: rewrite error: %s", test.Name, err) + } + + itrs, err := influxql.Select(stmt, &ic, nil) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if !deep.Equal(a, test.Points) { + t.Errorf("%s: unexpected points: %s", test.Name, spew.Sdump(a)) + } + } +} + +// Ensure a SELECT binary expr with nil values can be executed. +// Nil values may be present when a field is missing from one iterator, +// but not the other. +func TestSelect_BinaryExpr_NilValues(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), nil}}, + {Name: "cpu", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), float64(15)}}, + {Name: "cpu", Time: 9 * Second, Value: 19, Aux: []interface{}{nil, float64(5)}}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + return map[string]influxql.DataType{ + "total": influxql.Float, + "value": influxql.Float, + }, nil, nil + } + + for _, test := range []struct { + Name string + Statement string + Points [][]influxql.Point + }{ + { + Name: "nil binary add", + Statement: `SELECT total + value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Nil: true}}, + }, + }, + { + Name: "nil binary subtract", + Statement: `SELECT total - value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: -5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Nil: true}}, + }, + }, + { + Name: "nil binary multiply", + Statement: `SELECT total * value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: 150}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Nil: true}}, + }, + }, + { + Name: "nil binary division", + Statement: `SELECT total / value FROM cpu`, + Points: [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Nil: true}}, + {&influxql.FloatPoint{Name: "cpu", Time: 5 * Second, Value: float64(10) / float64(15)}}, + {&influxql.FloatPoint{Name: "cpu", Time: 9 * Second, Nil: true}}, + }, + }, + } { + stmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic) + if err != nil { + t.Errorf("%s: rewrite error: %s", test.Name, err) + } + + itrs, err := influxql.Select(stmt, &ic, nil) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if !deep.Equal(a, test.Points) { + t.Errorf("%s: unexpected points: %s", test.Name, spew.Sdump(a)) + } + } +} + +// Ensure a SELECT (...) query can be executed. +func TestSelect_ParenExpr(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + if !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) { + t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) + } + + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT (min(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10, Aggregated: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 100, Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, nil + } + + // Execute selection. + itrs, err = influxql.Select(MustParseSelectStatement(`SELECT (distinct(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 19}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 10}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 2.25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: -4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.5}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 2.25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: -4}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Desc_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 0 * Second, Value: 20}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Errorf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 2.5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Desc_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 0 * Second, Value: 20}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Errorf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.25}}, + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 2.5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Duplicate_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Derivative_Duplicate_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -2.5}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Difference_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -10}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 9}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: -16}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Difference_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: -10}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 9}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 12 * Second, Value: -16}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Difference_Duplicate_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: -10}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Difference_Duplicate_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: -10}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Elapsed_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 11 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 11 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Elapsed_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 11 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 11 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Elapsed_String(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0 * Second, Value: "a"}, + {Name: "cpu", Time: 4 * Second, Value: "b"}, + {Name: "cpu", Time: 8 * Second, Value: "c"}, + {Name: "cpu", Time: 11 * Second, Value: "d"}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 11 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_Elapsed_Boolean(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0 * Second, Value: true}, + {Name: "cpu", Time: 4 * Second, Value: false}, + {Name: "cpu", Time: 8 * Second, Value: false}, + {Name: "cpu", Time: 11 * Second, Value: true}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 11 * Second, Value: 3}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_MovingAverage_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: 15, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 14.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: 11, Aggregated: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_MovingAverage_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: 15, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 14.5, Aggregated: 2}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: 11, Aggregated: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_CumulativeSum_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: 30}}, + {&influxql.FloatPoint{Name: "cpu", Time: 8 * Second, Value: 49}}, + {&influxql.FloatPoint{Name: "cpu", Time: 12 * Second, Value: 52}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_CumulativeSum_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 30}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 8 * Second, Value: 49}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 12 * Second, Value: 52}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_CumulativeSum_Duplicate_Float(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 20}}, + {&influxql.FloatPoint{Name: "cpu", Time: 0 * Second, Value: 39}}, + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: 49}}, + {&influxql.FloatPoint{Name: "cpu", Time: 4 * Second, Value: 52}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_CumulativeSum_Duplicate_Integer(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, nil + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 20}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0 * Second, Value: 39}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 49}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 4 * Second, Value: 52}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_HoltWinters_GroupBy_Agg(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 10 * Second, Value: 4}, + {Name: "cpu", Time: 11 * Second, Value: 6}, + + {Name: "cpu", Time: 12 * Second, Value: 9}, + {Name: "cpu", Time: 13 * Second, Value: 11}, + + {Name: "cpu", Time: 14 * Second, Value: 5}, + {Name: "cpu", Time: 15 * Second, Value: 7}, + + {Name: "cpu", Time: 16 * Second, Value: 10}, + {Name: "cpu", Time: 17 * Second, Value: 12}, + + {Name: "cpu", Time: 18 * Second, Value: 6}, + {Name: "cpu", Time: 19 * Second, Value: 8}, + }}, opt) + } + + // Execute selection. + itrs, err := influxql.Select(MustParseSelectStatement(`SELECT holt_winters(mean(value), 2, 2) FROM cpu WHERE time >= '1970-01-01T00:00:10Z' AND time < '1970-01-01T00:00:20Z' GROUP BY time(2s)`), &ic, nil) + if err != nil { + t.Fatal(err) + } else if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 20 * Second, Value: 11.960623419918432}}, + {&influxql.FloatPoint{Name: "cpu", Time: 22 * Second, Value: 7.953140268154609}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestSelect_UnsupportedCall(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{}, nil + } + + _, err := influxql.Select(MustParseSelectStatement(`SELECT foobar(value) FROM cpu`), &ic, nil) + if err == nil || err.Error() != "unsupported call: foobar" { + t.Errorf("unexpected error: %s", err) + } +} + +func TestSelect_InvalidQueries(t *testing.T) { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{}, nil + } + + tests := []struct { + q string + err string + }{ + { + q: `SELECT foobar(value) FROM cpu`, + err: `unsupported call: foobar`, + }, + { + q: `SELECT 'value' FROM cpu`, + err: `invalid expression type: *influxql.StringLiteral`, + }, + { + q: `SELECT 'value', value FROM cpu`, + err: `invalid expression type: *influxql.StringLiteral`, + }, + } + + for i, tt := range tests { + itrs, err := influxql.Select(MustParseSelectStatement(tt.q), &ic, nil) + if err == nil || err.Error() != tt.err { + t.Errorf("%d. expected error '%s', got '%s'", i, tt.err, err) + } + influxql.Iterators(itrs).Close() + } +} + +func BenchmarkSelect_Raw_1K(b *testing.B) { benchmarkSelectRaw(b, 1000) } +func BenchmarkSelect_Raw_100K(b *testing.B) { benchmarkSelectRaw(b, 1000000) } + +func benchmarkSelectRaw(b *testing.B, pointN int) { + benchmarkSelect(b, MustParseSelectStatement(`SELECT fval FROM cpu`), NewRawBenchmarkIteratorCreator(pointN)) +} + +func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, ic influxql.IteratorCreator) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + itrs, err := influxql.Select(stmt, ic, nil) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterators(itrs) + } +} + +// NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields. +func NewRawBenchmarkIteratorCreator(pointN int) *IteratorCreator { + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + if opt.Expr != nil { + panic("unexpected expression") + } + + p := influxql.FloatPoint{ + Name: "cpu", + Aux: make([]interface{}, len(opt.Aux)), + } + + for i := range opt.Aux { + switch opt.Aux[i].Val { + case "fval": + p.Aux[i] = float64(100) + default: + panic("unknown iterator expr: " + opt.Expr.String()) + } + } + + return &FloatPointGenerator{N: pointN, Fn: func(i int) *influxql.FloatPoint { + p.Time = int64(time.Duration(i) * (10 * time.Second)) + return &p + }}, nil + } + return &ic +} + +func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) { + stmt := MustParseSelectStatement(`SELECT sval::string FROM cpu`) + stmt.Dedupe = true + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + if opt.Expr != nil { + panic("unexpected expression") + } + + p := influxql.FloatPoint{ + Name: "tags", + Aux: []interface{}{nil}, + } + + return &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *influxql.FloatPoint { + p.Aux[0] = fmt.Sprintf("server%d", i%seriesN) + return &p + }}, nil + } + + b.ResetTimer() + benchmarkSelect(b, stmt, &ic) +} + +func BenchmarkSelect_Dedupe_1K(b *testing.B) { benchmarkSelectDedupe(b, 1000, 100) } diff -Nru influxdb-0.10.0+dfsg1/influxql/statement_rewriter.go influxdb-1.1.1+dfsg1/influxql/statement_rewriter.go --- influxdb-0.10.0+dfsg1/influxql/statement_rewriter.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/statement_rewriter.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,230 @@ +package influxql + +import "errors" + +// RewriteStatement rewrites stmt into a new statement, if applicable. +func RewriteStatement(stmt Statement) (Statement, error) { + switch stmt := stmt.(type) { + case *ShowFieldKeysStatement: + return rewriteShowFieldKeysStatement(stmt) + case *ShowMeasurementsStatement: + return rewriteShowMeasurementsStatement(stmt) + case *ShowSeriesStatement: + return rewriteShowSeriesStatement(stmt) + case *ShowTagKeysStatement: + return rewriteShowTagKeysStatement(stmt) + case *ShowTagValuesStatement: + return rewriteShowTagValuesStatement(stmt) + default: + return stmt, nil + } +} + +func rewriteShowFieldKeysStatement(stmt *ShowFieldKeysStatement) (Statement, error) { + return &SelectStatement{ + Fields: Fields([]*Field{ + {Expr: &VarRef{Val: "fieldKey"}}, + {Expr: &VarRef{Val: "fieldType"}}, + }), + Sources: rewriteSources(stmt.Sources, "_fieldKeys", stmt.Database), + Condition: rewriteSourcesCondition(stmt.Sources, nil), + Offset: stmt.Offset, + Limit: stmt.Limit, + SortFields: stmt.SortFields, + OmitTime: true, + Dedupe: true, + }, nil +} + +func rewriteShowMeasurementsStatement(stmt *ShowMeasurementsStatement) (Statement, error) { + // Check for time in WHERE clause (not supported). + if HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW MEASUREMENTS doesn't support time in WHERE clause") + } + + condition := stmt.Condition + if stmt.Source != nil { + condition = rewriteSourcesCondition(Sources([]Source{stmt.Source}), stmt.Condition) + } + return &ShowMeasurementsStatement{ + Database: stmt.Database, + Condition: condition, + Limit: stmt.Limit, + Offset: stmt.Offset, + SortFields: stmt.SortFields, + }, nil +} + +func rewriteShowSeriesStatement(stmt *ShowSeriesStatement) (Statement, error) { + // Check for time in WHERE clause (not supported). + if HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW SERIES doesn't support time in WHERE clause") + } + + return &SelectStatement{ + Fields: []*Field{ + {Expr: &VarRef{Val: "key"}}, + }, + Sources: rewriteSources(stmt.Sources, "_series", stmt.Database), + Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), + Offset: stmt.Offset, + Limit: stmt.Limit, + SortFields: stmt.SortFields, + OmitTime: true, + Dedupe: true, + }, nil +} + +func rewriteShowTagValuesStatement(stmt *ShowTagValuesStatement) (Statement, error) { + // Check for time in WHERE clause (not supported). + if HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW TAG VALUES doesn't support time in WHERE clause") + } + + condition := stmt.Condition + var expr Expr + if list, ok := stmt.TagKeyExpr.(*ListLiteral); ok { + for _, tagKey := range list.Vals { + tagExpr := &BinaryExpr{ + Op: EQ, + LHS: &VarRef{Val: "_tagKey"}, + RHS: &StringLiteral{Val: tagKey}, + } + + if expr != nil { + expr = &BinaryExpr{ + Op: OR, + LHS: expr, + RHS: tagExpr, + } + } else { + expr = tagExpr + } + } + } else { + expr = &BinaryExpr{ + Op: stmt.Op, + LHS: &VarRef{Val: "_tagKey"}, + RHS: stmt.TagKeyExpr, + } + } + + // Set condition or "AND" together. + if condition == nil { + condition = expr + } else { + condition = &BinaryExpr{ + Op: AND, + LHS: &ParenExpr{Expr: condition}, + RHS: &ParenExpr{Expr: expr}, + } + } + condition = rewriteSourcesCondition(stmt.Sources, condition) + + return &ShowTagValuesStatement{ + Database: stmt.Database, + Op: stmt.Op, + TagKeyExpr: stmt.TagKeyExpr, + Condition: condition, + SortFields: stmt.SortFields, + Limit: stmt.Limit, + Offset: stmt.Offset, + }, nil +} + +func rewriteShowTagKeysStatement(stmt *ShowTagKeysStatement) (Statement, error) { + // Check for time in WHERE clause (not supported). + if HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW TAG KEYS doesn't support time in WHERE clause") + } + + return &SelectStatement{ + Fields: []*Field{ + {Expr: &VarRef{Val: "tagKey"}}, + }, + Sources: rewriteSources(stmt.Sources, "_tagKeys", stmt.Database), + Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), + Offset: stmt.Offset, + Limit: stmt.Limit, + SortFields: stmt.SortFields, + OmitTime: true, + Dedupe: true, + }, nil +} + +// rewriteSources rewrites sources with previous database and retention policy +func rewriteSources(sources Sources, measurementName, defaultDatabase string) Sources { + newSources := Sources{} + for _, src := range sources { + if src == nil { + continue + } + mm := src.(*Measurement) + database := mm.Database + if database == "" { + database = defaultDatabase + } + newSources = append(newSources, + &Measurement{ + Database: database, + RetentionPolicy: mm.RetentionPolicy, + Name: measurementName, + }) + } + if len(newSources) <= 0 { + return append(newSources, &Measurement{ + Database: defaultDatabase, + Name: measurementName, + }) + } + return newSources +} + +// rewriteSourcesCondition rewrites sources into `name` expressions. +// Merges with cond and returns a new condition. +func rewriteSourcesCondition(sources Sources, cond Expr) Expr { + if len(sources) == 0 { + return cond + } + + // Generate an OR'd set of filters on source name. + var scond Expr + for _, source := range sources { + mm := source.(*Measurement) + + // Generate a filtering expression on the measurement name. + var expr Expr + if mm.Regex != nil { + expr = &BinaryExpr{ + Op: EQREGEX, + LHS: &VarRef{Val: "_name"}, + RHS: &RegexLiteral{Val: mm.Regex.Val}, + } + } else if mm.Name != "" { + expr = &BinaryExpr{ + Op: EQ, + LHS: &VarRef{Val: "_name"}, + RHS: &StringLiteral{Val: mm.Name}, + } + } + + if scond == nil { + scond = expr + } else { + scond = &BinaryExpr{ + Op: OR, + LHS: scond, + RHS: expr, + } + } + } + + if cond != nil { + return &BinaryExpr{ + Op: AND, + LHS: &ParenExpr{Expr: scond}, + RHS: &ParenExpr{Expr: cond}, + } + } + return scond +} diff -Nru influxdb-0.10.0+dfsg1/influxql/statement_rewriter_test.go influxdb-1.1.1+dfsg1/influxql/statement_rewriter_test.go --- influxdb-0.10.0+dfsg1/influxql/statement_rewriter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/statement_rewriter_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,161 @@ +package influxql_test + +import ( + "testing" + + "github.com/influxdata/influxdb/influxql" +) + +func TestRewriteStatement(t *testing.T) { + tests := []struct { + stmt string + s string + }{ + { + stmt: `SHOW FIELD KEYS`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys`, + }, + { + stmt: `SHOW FIELD KEYS ON db0`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys`, + }, + { + stmt: `SHOW FIELD KEYS FROM cpu`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM cpu`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS FROM /c.*/`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM /c.*/`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS FROM mydb.myrp2.cpu`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2.cpu`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS FROM mydb.myrp2./c.*/`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2./c.*/`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW SERIES`, + s: `SELECT "key" FROM _series`, + }, + { + stmt: `SHOW SERIES ON db0`, + s: `SELECT "key" FROM db0.._series`, + }, + { + stmt: `SHOW SERIES FROM cpu`, + s: `SELECT "key" FROM _series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES ON db0 FROM cpu`, + s: `SELECT "key" FROM db0.._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1.cpu`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1./c.*/`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS`, + s: `SELECT tagKey FROM _tagKeys`, + }, + { + stmt: `SHOW TAG KEYS ON db0`, + s: `SELECT tagKey FROM db0.._tagKeys`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu`, + s: `SELECT tagKey FROM _tagKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu`, + s: `SELECT tagKey FROM db0.._tagKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS FROM /c.*/`, + s: `SELECT tagKey FROM _tagKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM /c.*/`, + s: `SELECT tagKey FROM db0.._tagKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`, + s: `SELECT tagKey FROM _tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest'`, + s: `SELECT tagKey FROM db0.._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest'`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest'`, + s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SELECT value FROM cpu`, + s: `SELECT value FROM cpu`, + }, + } + + for _, test := range tests { + stmt, err := influxql.ParseStatement(test.stmt) + if err != nil { + t.Errorf("error parsing statement: %s", err) + } else { + stmt, err = influxql.RewriteStatement(stmt) + if err != nil { + t.Errorf("error rewriting statement: %s", err) + } else if s := stmt.String(); s != test.s { + t.Errorf("error rendering string. expected %s, actual: %s", test.s, s) + } + } + } +} diff -Nru influxdb-0.10.0+dfsg1/influxql/task_manager.go influxdb-1.1.1+dfsg1/influxql/task_manager.go --- influxdb-0.10.0+dfsg1/influxql/task_manager.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/task_manager.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,263 @@ +package influxql + +import ( + "fmt" + "io/ioutil" + "log" + "sync" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultQueryTimeout is the default timeout for executing a query. + // A value of zero will have no query timeout. + DefaultQueryTimeout = time.Duration(0) +) + +// TaskManager takes care of all aspects related to managing running queries. +type TaskManager struct { + // Query execution timeout. + QueryTimeout time.Duration + + // Log queries if they are slower than this time. + // If zero, slow queries will never be logged. + LogQueriesAfter time.Duration + + // Maximum number of concurrent queries. + MaxConcurrentQueries int + + // Logger to use for all logging. + // Defaults to discarding all log output. + Logger *log.Logger + + // Used for managing and tracking running queries. + queries map[uint64]*QueryTask + nextID uint64 + mu sync.RWMutex + shutdown bool +} + +// NewTaskManager creates a new TaskManager. +func NewTaskManager() *TaskManager { + return &TaskManager{ + QueryTimeout: DefaultQueryTimeout, + Logger: log.New(ioutil.Discard, "[query] ", log.LstdFlags), + queries: make(map[uint64]*QueryTask), + nextID: 1, + } +} + +// ExecuteStatement executes a statement containing one of the task management queries. +func (t *TaskManager) ExecuteStatement(stmt Statement, ctx ExecutionContext) error { + switch stmt := stmt.(type) { + case *ShowQueriesStatement: + rows, err := t.executeShowQueriesStatement(stmt) + if err != nil { + return err + } + + ctx.Results <- &Result{ + StatementID: ctx.StatementID, + Series: rows, + } + case *KillQueryStatement: + var messages []*Message + if ctx.ReadOnly { + messages = append(messages, ReadOnlyWarning(stmt.String())) + } + + if err := t.executeKillQueryStatement(stmt); err != nil { + return err + } + ctx.Results <- &Result{ + StatementID: ctx.StatementID, + Messages: messages, + } + default: + return ErrInvalidQuery + } + return nil +} + +func (t *TaskManager) executeKillQueryStatement(stmt *KillQueryStatement) error { + return t.KillQuery(stmt.QueryID) +} + +func (t *TaskManager) executeShowQueriesStatement(q *ShowQueriesStatement) (models.Rows, error) { + t.mu.RLock() + defer t.mu.RUnlock() + + now := time.Now() + + values := make([][]interface{}, 0, len(t.queries)) + for id, qi := range t.queries { + d := now.Sub(qi.startTime) + + switch { + case d >= time.Second: + d = d - (d % time.Second) + case d >= time.Millisecond: + d = d - (d % time.Millisecond) + case d >= time.Microsecond: + d = d - (d % time.Microsecond) + } + + values = append(values, []interface{}{id, qi.query, qi.database, d.String()}) + } + + return []*models.Row{{ + Columns: []string{"qid", "query", "database", "duration"}, + Values: values, + }}, nil +} + +func (t *TaskManager) query(qid uint64) (*QueryTask, bool) { + t.mu.RLock() + query, ok := t.queries[qid] + t.mu.RUnlock() + return query, ok +} + +// AttachQuery attaches a running query to be managed by the TaskManager. +// Returns the query id of the newly attached query or an error if it was +// unable to assign a query id or attach the query to the TaskManager. +// This function also returns a channel that will be closed when this +// query finishes running. +// +// After a query finishes running, the system is free to reuse a query id. +func (t *TaskManager) AttachQuery(q *Query, database string, interrupt <-chan struct{}) (uint64, *QueryTask, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.shutdown { + return 0, nil, ErrQueryEngineShutdown + } + + if t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries { + return 0, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries) + } + + qid := t.nextID + query := &QueryTask{ + query: q.String(), + database: database, + startTime: time.Now(), + closing: make(chan struct{}), + monitorCh: make(chan error), + } + t.queries[qid] = query + + go t.waitForQuery(qid, query.closing, interrupt, query.monitorCh) + if t.LogQueriesAfter != 0 { + go query.monitor(func(closing <-chan struct{}) error { + timer := time.NewTimer(t.LogQueriesAfter) + defer timer.Stop() + + select { + case <-timer.C: + t.Logger.Printf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)", + query.query, qid, query.database, t.LogQueriesAfter) + case <-closing: + } + return nil + }) + } + t.nextID++ + return qid, query, nil +} + +// KillQuery stops and removes a query from the TaskManager. +// This method can be used to forcefully terminate a running query. +func (t *TaskManager) KillQuery(qid uint64) error { + t.mu.Lock() + defer t.mu.Unlock() + + query, ok := t.queries[qid] + if !ok { + return fmt.Errorf("no such query id: %d", qid) + } + + close(query.closing) + delete(t.queries, qid) + return nil +} + +// QueryInfo represents the information for a query. +type QueryInfo struct { + ID uint64 `json:"id"` + Query string `json:"query"` + Database string `json:"database"` + Duration time.Duration `json:"duration"` +} + +// Queries returns a list of all running queries with information about them. +func (t *TaskManager) Queries() []QueryInfo { + t.mu.RLock() + defer t.mu.RUnlock() + + now := time.Now() + queries := make([]QueryInfo, 0, len(t.queries)) + for id, qi := range t.queries { + queries = append(queries, QueryInfo{ + ID: id, + Query: qi.query, + Database: qi.database, + Duration: now.Sub(qi.startTime), + }) + } + return queries +} + +func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) { + var timerCh <-chan time.Time + if t.QueryTimeout != 0 { + timer := time.NewTimer(t.QueryTimeout) + timerCh = timer.C + defer timer.Stop() + } + + select { + case <-closing: + query, ok := t.query(qid) + if !ok { + break + } + query.setError(ErrQueryInterrupted) + case err := <-monitorCh: + if err == nil { + break + } + + query, ok := t.query(qid) + if !ok { + break + } + query.setError(err) + case <-timerCh: + query, ok := t.query(qid) + if !ok { + break + } + query.setError(ErrQueryTimeoutLimitExceeded) + case <-interrupt: + // Query was manually closed so exit the select. + return + } + t.KillQuery(qid) +} + +// Close kills all running queries and prevents new queries from being attached. +func (t *TaskManager) Close() error { + t.mu.Lock() + defer t.mu.Unlock() + + t.shutdown = true + for _, query := range t.queries { + query.setError(ErrQueryEngineShutdown) + close(query.closing) + } + t.queries = nil + return nil +} diff -Nru influxdb-0.10.0+dfsg1/influxql/tmpldata influxdb-1.1.1+dfsg1/influxql/tmpldata --- influxdb-0.10.0+dfsg1/influxql/tmpldata 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/tmpldata 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,30 @@ +[ + { + "Name":"Float", + "name":"float", + "Type":"float64", + "Nil":"0", + "Zero":"float64(0)" + }, + { + "Name":"Integer", + "name":"integer", + "Type":"int64", + "Nil":"0", + "Zero":"int64(0)" + }, + { + "Name":"String", + "name":"string", + "Type":"string", + "Nil":"\"\"", + "Zero":"\"\"" + }, + { + "Name":"Boolean", + "name":"boolean", + "Type":"bool", + "Nil":"false", + "Zero":"false" + } +] diff -Nru influxdb-0.10.0+dfsg1/influxql/token.go influxdb-1.1.1+dfsg1/influxql/token.go --- influxdb-0.10.0+dfsg1/influxql/token.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxql/token.go 2016-12-06 21:36:15.000000000 +0000 @@ -7,28 +7,31 @@ // Token is a lexical token of the InfluxQL language. type Token int +// These are a comprehensive list of InfluxQL language tokens. const ( - // Special tokens + // ILLEGAL Token, EOF, WS are Special InfluxQL tokens. ILLEGAL Token = iota EOF WS - literal_beg - // Literals - IDENT // main - NUMBER // 12345.67 - DURATION_VAL // 13h - STRING // "abc" - BADSTRING // "abc - BADESCAPE // \q - TRUE // true - FALSE // false - REGEX // Regular expressions - BADREGEX // `.* - literal_end + literalBeg + // IDENT and the following are InfluxQL literal tokens. + IDENT // main + BOUNDPARAM // $param + NUMBER // 12345.67 + INTEGER // 12345 + DURATIONVAL // 13h + STRING // "abc" + BADSTRING // "abc + BADESCAPE // \q + TRUE // true + FALSE // false + REGEX // Regular expressions + BADREGEX // `.* + literalEnd - operator_beg - // Operators + operatorBeg + // ADD and the following are InfluxQL Operators ADD // + SUB // - MUL // * @@ -45,17 +48,18 @@ LTE // <= GT // > GTE // >= - operator_end + operatorEnd - LPAREN // ( - RPAREN // ) - COMMA // , - COLON // : - SEMICOLON // ; - DOT // . + LPAREN // ( + RPAREN // ) + COMMA // , + COLON // : + DOUBLECOLON // :: + SEMICOLON // ; + DOT // . - keyword_beg - // Keywords + keywordBeg + // ALL and the following are InfluxQL Keywords ALL ALTER ANY @@ -65,7 +69,6 @@ BY CREATE CONTINUOUS - DATA DATABASE DATABASES DEFAULT @@ -78,30 +81,25 @@ DURATION END EVERY - EXISTS EXPLAIN FIELD FOR - FORCE FROM GRANT GRANTS GROUP GROUPS - IF IN INF - INNER INSERT INTO KEY KEYS + KILL LIMIT - META MEASUREMENT MEASUREMENTS NAME - NOT OFFSET ON ORDER @@ -118,8 +116,6 @@ REVOKE SELECT SERIES - SERVER - SERVERS SET SHOW SHARD @@ -137,7 +133,7 @@ WHERE WITH WRITE - keyword_end + keywordEnd ) var tokens = [...]string{ @@ -145,15 +141,15 @@ EOF: "EOF", WS: "WS", - IDENT: "IDENT", - NUMBER: "NUMBER", - DURATION_VAL: "DURATION_VAL", - STRING: "STRING", - BADSTRING: "BADSTRING", - BADESCAPE: "BADESCAPE", - TRUE: "TRUE", - FALSE: "FALSE", - REGEX: "REGEX", + IDENT: "IDENT", + NUMBER: "NUMBER", + DURATIONVAL: "DURATIONVAL", + STRING: "STRING", + BADSTRING: "BADSTRING", + BADESCAPE: "BADESCAPE", + TRUE: "TRUE", + FALSE: "FALSE", + REGEX: "REGEX", ADD: "+", SUB: "-", @@ -172,12 +168,13 @@ GT: ">", GTE: ">=", - LPAREN: "(", - RPAREN: ")", - COMMA: ",", - COLON: ":", - SEMICOLON: ";", - DOT: ".", + LPAREN: "(", + RPAREN: ")", + COMMA: ",", + COLON: ":", + DOUBLECOLON: "::", + SEMICOLON: ";", + DOT: ".", ALL: "ALL", ALTER: "ALTER", @@ -188,7 +185,6 @@ BY: "BY", CREATE: "CREATE", CONTINUOUS: "CONTINUOUS", - DATA: "DATA", DATABASE: "DATABASE", DATABASES: "DATABASES", DEFAULT: "DEFAULT", @@ -201,30 +197,25 @@ DURATION: "DURATION", END: "END", EVERY: "EVERY", - EXISTS: "EXISTS", EXPLAIN: "EXPLAIN", FIELD: "FIELD", FOR: "FOR", - FORCE: "FORCE", FROM: "FROM", GRANT: "GRANT", GRANTS: "GRANTS", GROUP: "GROUP", GROUPS: "GROUPS", - IF: "IF", IN: "IN", INF: "INF", - INNER: "INNER", INSERT: "INSERT", INTO: "INTO", KEY: "KEY", KEYS: "KEYS", + KILL: "KILL", LIMIT: "LIMIT", MEASUREMENT: "MEASUREMENT", MEASUREMENTS: "MEASUREMENTS", - META: "META", NAME: "NAME", - NOT: "NOT", OFFSET: "OFFSET", ON: "ON", ORDER: "ORDER", @@ -241,8 +232,6 @@ REVOKE: "REVOKE", SELECT: "SELECT", SERIES: "SERIES", - SERVER: "SERVER", - SERVERS: "SERVERS", SET: "SET", SHOW: "SHOW", SHARD: "SHARD", @@ -266,7 +255,7 @@ func init() { keywords = make(map[string]Token) - for tok := keyword_beg + 1; tok < keyword_end; tok++ { + for tok := keywordBeg + 1; tok < keywordEnd; tok++ { keywords[strings.ToLower(tokens[tok])] = tok } for _, tok := range []Token{AND, OR} { @@ -302,7 +291,7 @@ } // isOperator returns true for operator tokens. -func (tok Token) isOperator() bool { return tok > operator_beg && tok < operator_end } +func (tok Token) isOperator() bool { return tok > operatorBeg && tok < operatorEnd } // tokstr returns a literal if provided, otherwise returns the token string. func tokstr(tok Token, lit string) string { diff -Nru influxdb-0.10.0+dfsg1/influxvar.go influxdb-1.1.1+dfsg1/influxvar.go --- influxdb-0.10.0+dfsg1/influxvar.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/influxvar.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -package influxdb - -import ( - "expvar" - "sync" -) - -var expvarMu sync.Mutex - -// NewStatistics returns an expvar-based map with the given key. Within that map -// is another map. Within there "name" is the Measurement name, "tags" are the tags, -// and values are placed at the key "values". -func NewStatistics(key, name string, tags map[string]string) *expvar.Map { - expvarMu.Lock() - defer expvarMu.Unlock() - - // Add expvar for this service. - var v expvar.Var - if v = expvar.Get(key); v == nil { - v = expvar.NewMap(key) - } - m := v.(*expvar.Map) - - // Set the name - nameVar := &expvar.String{} - nameVar.Set(name) - m.Set("name", nameVar) - - // Set the tags - tagsVar := &expvar.Map{} - tagsVar.Init() - for k, v := range tags { - value := &expvar.String{} - value.Set(v) - tagsVar.Set(k, value) - } - m.Set("tags", tagsVar) - - // Create and set the values entry used for actual stats. - statMap := &expvar.Map{} - statMap.Init() - m.Set("values", statMap) - - return statMap -} diff -Nru influxdb-0.10.0+dfsg1/internal/meta_client.go influxdb-1.1.1+dfsg1/internal/meta_client.go --- influxdb-0.10.0+dfsg1/internal/meta_client.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/internal/meta_client.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,162 @@ +package internal + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClientMock is a mockable implementation of meta.MetaClient. +type MetaClientMock struct { + CloseFn func() error + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) + CreateShardGroupFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (*meta.UserInfo, error) + + DatabaseFn func(name string) *meta.DatabaseInfo + DatabasesFn func() []meta.DatabaseInfo + + DataFn func() meta.Data + DeleteShardGroupFn func(database string, policy string, id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropShardFn func(id uint64) error + DropUserFn func(name string) error + + OpenFn func() error + + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + + SetAdminPrivilegeFn func(username string, admin bool) error + SetDataFn func(*meta.Data) error + SetDefaultRetentionPolicyFn func(database, name string) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardsByTimeRangeFn func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClientMock) Close() error { + return c.CloseFn() +} + +func (c *MetaClientMock) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClientMock) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClientMock) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (c *MetaClientMock) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, spec) +} + +func (c *MetaClientMock) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return c.CreateShardGroupFn(database, policy, timestamp) +} + +func (c *MetaClientMock) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClientMock) CreateUser(name, password string, admin bool) (*meta.UserInfo, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClientMock) Database(name string) *meta.DatabaseInfo { + return c.DatabaseFn(name) +} + +func (c *MetaClientMock) Databases() []meta.DatabaseInfo { + return c.DatabasesFn() +} + +func (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error { + return c.DeleteShardGroup(database, policy, id) +} + +func (c *MetaClientMock) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClientMock) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClientMock) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClientMock) DropShard(id uint64) error { + return c.DropShardFn(id) +} + +func (c *MetaClientMock) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClientMock) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClientMock) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClientMock) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClientMock) SetDefaultRetentionPolicy(database, name string) error { + return c.SetDefaultRetentionPolicyFn(database, name) +} + +func (c *MetaClientMock) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClientMock) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return c.ShardsByTimeRangeFn(sources, tmin, tmax) +} + +func (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) { + return c.ShardOwnerFn(shardID) +} + +func (c *MetaClientMock) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate) error { + return c.UpdateRetentionPolicyFn(database, name, rpu) +} + +func (c *MetaClientMock) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClientMock) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() } + +func (c *MetaClientMock) Open() error { return c.OpenFn() } +func (c *MetaClientMock) Data() meta.Data { return c.DataFn() } +func (c *MetaClientMock) SetData(d *meta.Data) error { return c.SetDataFn(d) } diff -Nru influxdb-0.10.0+dfsg1/LICENSE influxdb-1.1.1+dfsg1/LICENSE --- influxdb-0.10.0+dfsg1/LICENSE 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/LICENSE 2016-12-06 21:36:15.000000000 +0000 @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2015 Errplane Inc. +Copyright (c) 2013-2016 Errplane Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff -Nru influxdb-0.10.0+dfsg1/LICENSE_OF_DEPENDENCIES.md influxdb-1.1.1+dfsg1/LICENSE_OF_DEPENDENCIES.md --- influxdb-0.10.0+dfsg1/LICENSE_OF_DEPENDENCIES.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/LICENSE_OF_DEPENDENCIES.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,19 +1,23 @@ # List -- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) - github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) -- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) - github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) -- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) -- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) +- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) +- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) - github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) -- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) -- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) -- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) -- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) - glyphicons [LICENSE](http://glyphicons.com/license/) -- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) -- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) -- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) - +- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) diff -Nru influxdb-0.10.0+dfsg1/Makefile influxdb-1.1.1+dfsg1/Makefile --- influxdb-0.10.0+dfsg1/Makefile 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/Makefile 2016-12-06 21:36:15.000000000 +0000 @@ -34,5 +34,6 @@ go get github.com/opennota/check/... go get github.com/golang/lint/golint go get github.com/kisielk/errcheck + go get github.com/sparrc/gdm -.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools \ No newline at end of file +.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools diff -Nru influxdb-0.10.0+dfsg1/man/footer.txt influxdb-1.1.1+dfsg1/man/footer.txt --- influxdb-0.10.0+dfsg1/man/footer.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/footer.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,13 @@ +BUGS +---- +Report bugs to the GitHub issue tracker . + +AUTHORS +------- +InfluxDB is written and maintained by InfluxData . + +COPYRIGHT +--------- +InfluxDB is released under the MIT license. + +This man page is released under Creative Commons Attribution 4.0 International License. diff -Nru influxdb-0.10.0+dfsg1/man/influxd-backup.txt influxdb-1.1.1+dfsg1/man/influxd-backup.txt --- influxdb-0.10.0+dfsg1/man/influxd-backup.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd-backup.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,37 @@ +influxd-backup(1) +================= + +NAME +---- +influxd-backup - Downloads a snapshot of a data node and saves it to disk + +SYNOPSIS +-------- +'influxd backup' [options] + +DESCRIPTION +----------- +Downloads a snapshot of a data node and saves it to disk. + +OPTIONS +------- +-host :: + The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'. + +-database :: + The database to backup. Required. + +-retention :: + The retention policy to backup. Optional. + +-shard :: + The shard id to backup. Optional. If specified, '-retention ' is required. + +-since <2015-12-24T08:12:13>:: + Do an incremental backup since the passed in time. The time needs to be in the RFC3339 format. Optional. + +SEE ALSO +-------- +*influxd-restore*(1) + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influxd-config.txt influxdb-1.1.1+dfsg1/man/influxd-config.txt --- influxdb-0.10.0+dfsg1/man/influxd-config.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd-config.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,40 @@ +influxd-config(1) +================= + +NAME +---- +influxd-config - Generate configuration files for InfluxDB + +SYNOPSIS +-------- +[verse] +'influxd' config (-config ) +'influxd config' -config /dev/null + +DESCRIPTION +----------- +'influxd config' will generate a configuration file for InfluxDB. The configuration file will be output to standard output and can be written to a file by redirecting the shell output to another file. + +When a configuration file is specified using '-config ', this configuration file will be read and will overwrite the default values for any values that are present. It can be used to provide a configuration fragment with only the options you want to customize and generate a new configuration file from that file. If '-config ' is not specified, the command will look for a default configuration file using the same method as *influxd-run*(1). + +When using this command to regenerate a configuration file in place, be sure to use a temporary file as the output. This command will not work: + +=== +# DO NOT USE! +$ influxd config -config influxdb.conf > influxdb.conf + +# PROPER METHOD! +$ influxd config -config influxdb.conf > influxdb.conf.tmp && \ + mv influxdb.conf.tmp influxdb.conf +=== + +The shell will truncate the configuration file before 'influxd config' can read it and you will lose all of your custom options. For safety, redirect output to a temporary file instead and use 'mv' to move the file afterwards. + +The second command version will force 'influxd config' to output the default configuration file. Setting the configuration file to */dev/null* will cause the command to output only the defaults and will not read any values from any existing configuration files. + +OPTIONS +------- +-config :: + Customize the default configuration file to load. Disables automatic loading when the path is */dev/null*. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influxd-restore.txt influxdb-1.1.1+dfsg1/man/influxd-restore.txt --- influxdb-0.10.0+dfsg1/man/influxd-restore.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd-restore.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,37 @@ +influxd-restore(1) +================== + +NAME +---- +influxd-restore - Restores the metastore, databases, retention policies, or specific shards + +SYNOPSIS +-------- +'influxd restore' [options] PATH + +DESCRIPTION +----------- +Uses backups from the PATH to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be running during a restore. + +OPTIONS +------- +-metadir :: + If set, the metastore will be recovered to the given path. Optional. + +-datadir :: + If set, the restore process will recover the specified database, retention policy, or shard to the given directory. Optional. + +-database :: + Will restore the database TSM files. Required if no metadir is given. Optional. + +-retention :: + Will restore the retention policy's TSM files. If given, database is required. Optional. + +-shard :: + Will restore the shard's TSM files. If given, database and retention are required. Optional. + +SEE ALSO +-------- +*influxd-backup*(1) + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influxd-run.txt influxdb-1.1.1+dfsg1/man/influxd-run.txt --- influxdb-0.10.0+dfsg1/man/influxd-run.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd-run.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,32 @@ +influxd-run(1) +============== + +NAME +---- +influxd-run - Configure and start an InfluxDB server + +SYNOPSIS +-------- +[verse] +'influxd' [-config ] [-pidfile ] [-cpuprofile ] [-memprofile ] +'influxd run' [-config ] [-pidfile ] [-cpuprofile ] [-memprofile ] + +DESCRIPTION +----------- +Runs the InfluxDB server. + +OPTIONS +------- +-config :: + Sets the path to the configuration file. This defaults to the environment variable *INFLUXDB_CONFIG_PATH*, *~/.influxdb/influxdb.conf*, or */etc/influxdb/influxdb.conf* if a file is present at any of these locations. Disable the automatic loading of a configuration file by using the null device as the path (such as /dev/null on Linux or Mac OS X). + +-pidfile :: + Write process ID to a file. + +-cpuprofile :: + Write CPU profiling information to a file. + +-memprofile :: + Write memory usage information to a file. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influxd.txt influxdb-1.1.1+dfsg1/man/influxd.txt --- influxdb-0.10.0+dfsg1/man/influxd.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,40 @@ +influxd(1) +========== + +NAME +---- +influxd - InfluxDB server daemon + +SYNOPSIS +-------- +[verse] +'influxd' [command] [options] + +DESCRIPTION +----------- +'influxd' is the server daemon for InfluxDB. + +COMMANDS +-------- +These commands can be invoked using the 'influxd' program. The default is 'run' if the command parameter is skipped. + +backup:: + Downloads a snapshot of a data node and saves it to disk. + +config:: + Displays the default configuration. This can also read an existing configuration file and output the default values for any missing fields. Default values and existing entries in a configuration file can be customized through environment variables. + +restore:: + Uses backups to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be running during a restore. + +run:: + Runs the InfluxDB server. This is the default command if none is specified. + +version:: + Displays the InfluxDB version, build branch, and git commit hash. + +SEE ALSO +-------- +*influxd-backup*(1), *influxd-config*(1), *influxd-restore*(1), *influxd-run*(1), *influxd-version*(1) + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influxd-version.txt influxdb-1.1.1+dfsg1/man/influxd-version.txt --- influxdb-0.10.0+dfsg1/man/influxd-version.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influxd-version.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,17 @@ +influxd-version(1) +================== + +NAME +---- +influxd-version - Display the version of influxdb + +SYNOPSIS +-------- +[verse] +'influxd version' + +DESCRIPTION +----------- +'influxd version' will output the version of the InfluxDB server. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influx_inspect.txt influxdb-1.1.1+dfsg1/man/influx_inspect.txt --- influxdb-0.10.0+dfsg1/man/influx_inspect.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influx_inspect.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,77 @@ +influx_inspect(1) +================= + +NAME +---- +influx_inspect - Displays detailed information about InfluxDB data files + +SYNPOSIS +-------- +[verse] +'influx_inspect dumptsm' [options] +'influx_inspect export' [options] +'influx_inspect report' [options] + +DESCRIPTION +----------- +Displays detailed information about InfluxDB data files through one of the +following commands. + +*dumptsm*:: + Dumps low-level details about tsm1 files. + +*export*:: + Exports TSM files into InfluxDB line protocol format. + +*report*:: + Displays shard level report. + +DUMPTSM OPTIONS +--------------- +-all:: + Dump all data. Caution: This may print a lot of information. + +-blocks:: + Dump raw block data. + +-filter-key :: + Only display index and block data that match this key substring. + +-index:: + Dump raw index data. + +EXPORT OPTIONS +-------------- +-compress:: + Compress the output. + +-db :: + The database to export. Optional. + +-rp :: + The retention policy to export. Optional. Requires the '-db ' option to be specified. + +-data-dir :: + Data storage path. Defaults to '~/.influxdb/data'. + +-wal-dir :: + Wal storage path. Defaults to '~/.influxdb/wal'. + +-start :: + The start time of the export. The timestamp is in RFC3339 format. Optional. + +-end :: + The end time of the export. The timestamp is in RFC3339 format. Optional. + +-out :: + Destination file to write exported data to. Defaults to '~/.influxdb/export'. + +REPORT OPTIONS +-------------- +-detailed:: + Report detailed cardinality estimates. + +-pattern :: + Include only files matching a pattern. + +include:footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influx_stress.txt influxdb-1.1.1+dfsg1/man/influx_stress.txt --- influxdb-0.10.0+dfsg1/man/influx_stress.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influx_stress.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,52 @@ +influx_stress(1) +================ + +NAME +---- +influx_stress - Runs a stress test against one or multiple InfluxDB servers + +SYNOPSIS +-------- +[verse] +'influx_stress' [options] + +DESCRIPTION +----------- +Runs write and query stress tests against one or multiple InfluxDB servers to +create reproducible performance benchmarks against InfluxDB. + +OPTIONS +------- +-addr :: + IP address and port of the database where response times will persist. This + is not for specifying which database to test against. That option is located + inside of the configuration file. The default is 'http://localhost:8086'. + +-database :: + The database where response times will persist. This is not for specifying + which database to test against. See '-db' or the configuration file for that + option. The default is 'stress'. + +-retention-policy :: + The retention policy where response times will persist. This is not for + specifying which retention policy to test against. See the configuration file + for that option. The default is an empty string which will use the default + retention policy. + +-config :: + The stress configuration file. + +-cpuprofile :: + Write the cpu profile to the path. No cpu profile is written unless this is + used. This profiles 'influx_stress', not the InfluxDB server. + +-db :: + The target database within the test system for write and query load. + +-tags :: + A comma separated list of tags. + +-v2:: + Use version 2 of the stress tool. The default is to use version 1. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influx_tsm.txt influxdb-1.1.1+dfsg1/man/influx_tsm.txt --- influxdb-0.10.0+dfsg1/man/influx_tsm.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influx_tsm.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,58 @@ +influx_tsm(1) +============= + +NAME +---- +influx_tsm - Convert a database from b1 or bz1 format to tsm1 format + +SYNPOSIS +-------- +[verse] +'influx_tsm' [options] + +DESCRIPTION +----------- +This tool can be used to convert a database from the deprecated b1 or bz1 +formats to tsm1 format. The b1 and bz1 formats were deprecated in 0.10 and +removed in 0.12. + +This tool will backup the directories before conversion (if not disabled). The +backed-up files must be removed manually, generally after starting up the node +again to make sure all of the data has been converted correctly. + +To restore a backup after attempting to convert to tsm1, you shut down the +node, remove the converted directory, and copy the backed-up directory to the +original location. + +OPTIONS +------- +-backup :: + The location to backup the current databases. Must not be within the data + directory. + +-dbs :: + Comma-delimited list of databases to convert. The default is to convert all + databases. + +-debug :: + If set, http debugging endpoints will be enabled on the given address. + +-interval :: + How often status updates are printed. Default is '5s'. + +-nobackup:: + Disable database backups. Not recommended. + +-parallel:: + Perform parallel conversions (up to GOMAXPROCS shards at once). + +-profile :: + Write a CPU profile to the path. + +-sz :: + Maximum size of individual TSM files. Defaults to 2147483648. + +-y:: + Don't ask, just convert. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/influx.txt influxdb-1.1.1+dfsg1/man/influx.txt --- influxdb-0.10.0+dfsg1/man/influx.txt 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/influx.txt 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,81 @@ +influx(1) +========= + +NAME +---- +influx - InfluxDB client + +SYNOPSIS +-------- +[verse] +'influx' [options] +'influx' -execute [options] +'influx' -import -path (-compressed) [options] +'influx' -version + +DESCRIPTION +----------- +'influx' is the command line program for interacting with an InfluxDB server. + +In the first form, the program starts a CLI that can be used to write data or query the database. The command line is described in *COMMAND LINE*. + +In the second form, this will execute a single command, usually a query. This is the equivalent of starting the command line, running one command, and then exiting. + +In the third form, this imports a previously exported database to the database. + +The fourth form outputs the version of the command line and then immediately exits. + +OPTIONS +------- +-host :: + Host to connect to. Default is localhost. + +-port :: + Port to use when connecting to the host. Default is 8086. + +-database :: + Database to use when connecting to the database. + +-username :: + Username to connect to the server. + +-password :: + Password to connect to the server. If left blank, this will prompt for a password. + +-ssl: + Use https for requests. + +-unsafeSsl:: + Set this with '-ssl' to allow unsafe connections. + +-execute :: + Executes the command and exits. + +-format :: + Sets the format of the server responses. Default is column. + +-precision :: + Specifies the format of the timestamp. Default is ns. + +-consistency :: + Set the write consistency level. Default is one. + +-pretty:: + Turns on pretty print format for the JSON format. + +-import:: + Import a previous database export from a file. If specified, '-path ' must also be specified. + +-path :: + Path to the database export file to import. Must be used with '-import'. + +-pps : + How many points per second the import will allow. By default, it is zero and will not throttle importing. + +-compressed:: + Set if the import file is compressed. Must be used with '-import'. + +-version:: + Outputs the version of the influx client. + +include::footer.txt[] diff -Nru influxdb-0.10.0+dfsg1/man/Makefile influxdb-1.1.1+dfsg1/man/Makefile --- influxdb-0.10.0+dfsg1/man/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/Makefile 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,41 @@ +#!/usr/bin/make -f + +DESTDIR = /usr/local + +MAN1_TXT = +MAN1_TXT += influxd.txt +MAN1_TXT += influxd-backup.txt +MAN1_TXT += influxd-config.txt +MAN1_TXT += influxd-restore.txt +MAN1_TXT += influxd-run.txt +MAN1_TXT += influxd-version.txt +MAN1_TXT += influx.txt +MAN1_TXT += influx_inspect.txt +MAN1_TXT += influx_stress.txt +MAN1_TXT += influx_tsm.txt + +MAN_TXT = $(MAN1_TXT) +MAN_XML = $(patsubst %.txt,%.xml,$(MAN_TXT)) + +DOC_MAN1 = $(patsubst %.txt,%.1,$(MAN1_TXT)) + +build: $(DOC_MAN1) + +install: build + @echo ' INSTALL $(DOC_MAN1)' && \ + mkdir -p $(DESTDIR)/share/man/man1 && \ + install -m 0644 $(DOC_MAN1) $(DESTDIR)/share/man/man1 + +clean: + rm -f $(MAN_XML) $(DOC_MAN1) + +%.xml : %.txt + @echo ' ASCIIDOC $@' && rm -f $@+ && \ + asciidoc -d manpage -b docbook -o $@+ $< && \ + mv $@+ $@ + +%.1 : %.xml + @echo ' XMLTO $@' && \ + xmlto man $< 2> /dev/null + +.PHONY: build install clean diff -Nru influxdb-0.10.0+dfsg1/man/README.md influxdb-1.1.1+dfsg1/man/README.md --- influxdb-0.10.0+dfsg1/man/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/man/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,38 @@ +# Building the Man Pages + +The man pages are created with `asciidoc`, `docbook`, and `xmlto`. + +## Debian/Ubuntu + +This is the easiest since Debian and Ubuntu automatically install the +dependencies correctly. + +```bash +$ sudo apt-get install -y build-essential asciidoc xmlto +``` + +You should then be able to run `make` and the man pages will be +produced. + +## Mac OS X + +Mac OS X also has the tools necessary to build the docs, but one of the +dependencies gets installed incorrectly and you need an environment +variable to run it correctly. + +Use Homebrew to install the dependencies. There might be other methods +to get the dependencies, but that's left up to the reader if they want +to use a different package manager. + +If you have Homebrew installed, you should already have the Xcode tools +and that should include `make`. + +```bash +$ brew install asciidoc xmlto +``` + +Then set the following environment variable everytime you run `make`. + +```bash +export XML_CATALOG_FILES=/usr/local/etc/xml/catalog +``` diff -Nru influxdb-0.10.0+dfsg1/.mention-bot influxdb-1.1.1+dfsg1/.mention-bot --- influxdb-0.10.0+dfsg1/.mention-bot 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/.mention-bot 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "maxReviewers": 3, + "fileBlacklist": ["CHANGELOG.md"], + "userBlacklist": ["pauldix", "toddboom", "aviau", "mark-rushakoff"], + "requiredOrgs": ["influxdata"] +} diff -Nru influxdb-0.10.0+dfsg1/models/consistency.go influxdb-1.1.1+dfsg1/models/consistency.go --- influxdb-0.10.0+dfsg1/models/consistency.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/consistency.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,46 @@ +package models + +import ( + "errors" + "strings" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful +type ConsistencyLevel int + +const ( + // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write + ConsistencyLevelAll +) + +var ( + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} diff -Nru influxdb-0.10.0+dfsg1/models/inline_fnv.go influxdb-1.1.1+dfsg1/models/inline_fnv.go --- influxdb-0.10.0+dfsg1/models/inline_fnv.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/inline_fnv.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,27 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +type InlineFNV64a uint64 + +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff -Nru influxdb-0.10.0+dfsg1/models/inline_fnv_test.go influxdb-1.1.1+dfsg1/models/inline_fnv_test.go --- influxdb-0.10.0+dfsg1/models/inline_fnv_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/inline_fnv_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,29 @@ +package models_test + +import ( + "hash/fnv" + "testing" + "testing/quick" + + "github.com/influxdata/influxdb/models" +) + +func TestInlineFNV64aEquivalenceFuzz(t *testing.T) { + f := func(data []byte) bool { + stdlibFNV := fnv.New64a() + stdlibFNV.Write(data) + want := stdlibFNV.Sum64() + + inlineFNV := models.NewInlineFNV64a() + inlineFNV.Write(data) + got := inlineFNV.Sum64() + + return want == got + } + cfg := &quick.Config{ + MaxCount: 10000, + } + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} diff -Nru influxdb-0.10.0+dfsg1/models/inline_strconv_parse.go influxdb-1.1.1+dfsg1/models/inline_strconv_parse.go --- influxdb-0.10.0+dfsg1/models/inline_strconv_parse.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/inline_strconv_parse.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,38 @@ +package models // import "github.com/influxdata/influxdb/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff -Nru influxdb-0.10.0+dfsg1/models/inline_strconv_parse_test.go influxdb-1.1.1+dfsg1/models/inline_strconv_parse_test.go --- influxdb-0.10.0+dfsg1/models/inline_strconv_parse_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/inline_strconv_parse_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,103 @@ +package models + +import ( + "strconv" + "testing" + "testing/quick" +) + +func TestParseIntBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, base int, bitSize int) bool { + exp, expErr := strconv.ParseInt(string(b), base, bitSize) + got, gotErr := parseIntBytes(b, base, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n int64) bool { + buf = strconv.AppendInt(buf[:0], n, 10) + + exp, expErr := strconv.ParseInt(string(buf), 10, 64) + got, gotErr := parseIntBytes(buf, 10, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, bitSize int) bool { + exp, expErr := strconv.ParseFloat(string(b), bitSize) + got, gotErr := parseFloatBytes(b, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n float64) bool { + buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64) + + exp, expErr := strconv.ParseFloat(string(buf), 64) + got, gotErr := parseFloatBytes(buf, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseBoolBytesEquivalence(t *testing.T) { + var buf []byte + for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} { + buf = append(buf[:0], s...) + + exp, expErr := strconv.ParseBool(s) + got, gotErr := parseBoolBytes(buf) + + if got != exp || !checkErrs(expErr, gotErr) { + t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr) + } + } +} + +func checkErrs(a, b error) bool { + if (a == nil) != (b == nil) { + return false + } + + return a == nil || a.Error() == b.Error() +} diff -Nru influxdb-0.10.0+dfsg1/models/points.go influxdb-1.1.1+dfsg1/models/points.go --- influxdb-0.10.0+dfsg1/models/points.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/points.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,17 +1,17 @@ -package models +package models // import "github.com/influxdata/influxdb/models" import ( "bytes" "encoding/binary" + "errors" "fmt" - "hash/fnv" "math" "sort" "strconv" "strings" "time" - "github.com/influxdb/influxdb/pkg/escape" + "github.com/influxdata/influxdb/pkg/escape" ) var ( @@ -25,6 +25,15 @@ ' ': []byte(`\ `), '=': []byte(`\=`), } + + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + ErrInvalidNumber = errors.New("invalid number") + ErrInvalidPoint = errors.New("point is invalid") + ErrMaxKeyLengthExceeded = errors.New("max key length exceeded") +) + +const ( + MaxKeyLength = 65535 ) // Point defines the values that will be written to the database @@ -37,7 +46,6 @@ SetTags(tags Tags) Fields() Fields - AddField(name string, value interface{}) Time() time.Time SetTime(t time.Time) @@ -66,6 +74,48 @@ // is a timestamp associated with the point, then it will be rounded to the // given duration RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String() + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map + FieldIterator() FieldIterator +} + +type FieldType int + +const ( + Integer FieldType = iota + Float + Boolean + String + Empty +) + +type FieldIterator interface { + Next() bool + FieldKey() []byte + Type() FieldType + StringValue() string + IntegerValue() int64 + BooleanValue() bool + FloatValue() float64 + + Delete() + Reset() } // Points represents a sortable list of points by timestamp. @@ -98,6 +148,11 @@ // cached version of parsed name from key cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator } const ( @@ -129,27 +184,36 @@ return ParsePoints([]byte(buf)) } -func ParseKey(buf string) (string, Tags, error) { - _, keyBuf, err := scanKey([]byte(buf), 0) - tags := parseTags([]byte(buf)) - return string(keyBuf), tags, err +// ParseKey returns the measurement name and tags from a point. +func ParseKey(buf []byte) (string, Tags, error) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var tags Tags + if state == tagKeyState { + tags = parseTags(buf) + // scanMeasurement returns the location of the comma if there are tags, strip that off + return string(buf[:i-1]), tags, nil + } + return string(buf[:i]), tags, nil } // ParsePointsWithPrecision is similar to ParsePoints, but allows the // caller to provide a precision for time. func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { - points := []Point{} + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) var ( pos int block []byte failed []string ) - for { + for pos < len(buf) { pos, block = scanLine(buf, pos) pos++ if len(block) == 0 { - break + continue } // lines which start with '#' are comments @@ -169,17 +233,13 @@ block = block[:len(block)-1] } - pt, err := parsePoint(block[start:len(block)], defaultTime, precision) + pt, err := parsePoint(block[start:], defaultTime, precision) if err != nil { failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err)) } else { points = append(points, pt) } - if pos >= len(buf) { - break - } - } if len(failed) > 0 { return points, fmt.Errorf("%s", strings.Join(failed, "\n")) @@ -200,6 +260,10 @@ return nil, fmt.Errorf("missing measurement") } + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + // scan the second block is which is field1=value1[,field2=value2,...] pos, fields, err := scanFields(buf, pos) if err != nil { @@ -213,7 +277,6 @@ // scan the last block which is an optional integer timestamp pos, ts, err := scanTime(buf, pos) - if err != nil { return nil, err } @@ -228,7 +291,7 @@ pt.time = defaultTime pt.SetPrecision(precision) } else { - ts, err := strconv.ParseInt(string(ts), 10, 64) + ts, err := parseIntBytes(ts, 10, 64) if err != nil { return nil, err } @@ -236,6 +299,15 @@ if err != nil { return nil, err } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } } return pt, nil } @@ -294,24 +366,24 @@ } } - // Now we know where the key region is within buf, and the locations of tags, we - // need to determine if duplicate tags exist and if the tags are sorted. This iterates - // 1/2 of the list comparing each end with each other, walking towards the center from - // both sides. - for j := 0; j < commas/2; j++ { + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { // get the left and right tags _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') - // If the tags are equal, then there are duplicate tags, and we should abort - if bytes.Equal(left, right) { - return i, buf[start:i], fmt.Errorf("duplicate tags") - } - - // If left is greater than right, the tags are not sorted. We must continue - // since their could be duplicate tags still. - if bytes.Compare(left, right) > 0 { + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") } } @@ -337,6 +409,20 @@ pos += copy(b[pos:], v) } + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + return i, b, nil } @@ -357,7 +443,7 @@ // Check first byte of measurement, anything except a comma is fine. // It can't be a space, since whitespace is stripped prior to this // function call. - if buf[i] == ',' { + if i >= len(buf) || buf[i] == ',' { return -1, i, fmt.Errorf("missing measurement") } @@ -504,15 +590,6 @@ return bytes.Compare(a, b) < 0 } -func isFieldEscapeChar(b byte) bool { - for c := range escape.Codes { - if c == b { - return true - } - } - return false -} - // scanFields scans buf, starting at i for the fields section of a point. It returns // the ending position and the byte slice of the fields within buf func scanFields(buf []byte, i int) (int, []byte, error) { @@ -613,32 +690,34 @@ return i, buf[start:i], nil } -// scanTime scans buf, starting at i for the time section of a point. It returns -// the ending position and the byte slice of the fields within buf and error if the -// timestamp is not in the correct numeric format +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. func scanTime(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start + for { // reached the end of buf? if i >= len(buf) { break } - // Timestamps should be integers, make sure they are so we don't need to actually - // parse the timestamp until needed - if buf[i] < '0' || buf[i] > '9' { - // Handle negative timestamps - if i == start && buf[i] == '-' { - i++ - continue - } - return i, buf[start:i], fmt.Errorf("bad timestamp") + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break } - // reached end of block? - if buf[i] == '\n' { - break + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") } i++ } @@ -661,12 +740,12 @@ i++ // There must be more characters now, as just '-' is illegal. if i == len(buf) { - return i, fmt.Errorf("invalid number") + return i, ErrInvalidNumber } } // how many decimal points we've see - decimals := 0 + decimal := false // indicates the number is float in scientific notation scientific := false @@ -687,12 +766,11 @@ } if buf[i] == '.' { - decimals++ - } - - // Can't have more than 1 decimal (e.g. 1.1.1 should fail) - if decimals > 1 { - return i, fmt.Errorf("invalid number") + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true } // `e` is valid for floats but not as the first char @@ -710,16 +788,32 @@ // NaN is an unsupported value if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { - return i, fmt.Errorf("invalid number") + return i, ErrInvalidNumber } if !isNumeric(buf[i]) { - return i, fmt.Errorf("invalid number") + return i, ErrInvalidNumber } i++ } - if isInt && (decimals > 0 || scientific) { - return i, fmt.Errorf("invalid number") + + if isInt && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber } // It's more common that numbers will be within min/max range for their type but we need to prevent @@ -729,19 +823,19 @@ if isInt { // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) if buf[i-1] != 'i' { - return i, fmt.Errorf("invalid number") + return i, ErrInvalidNumber } // Parse the int to check bounds the number of digits could be larger than the max range // We subtract 1 from the index to remove the `i` from our tests if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { - if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) } } } else { // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { - if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { return i, fmt.Errorf("invalid float") } } @@ -888,8 +982,8 @@ break } - // reached end of block? - if buf[i] == stop && buf[i-1] != '\\' { + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { break } i++ @@ -943,13 +1037,9 @@ func scanFieldValue(buf []byte, i int) (int, []byte) { start := i quoted := false - for { - if i >= len(buf) { - break - } - - // Only escape char for a field value is a double-quote - if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' { + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { i += 2 continue } @@ -985,7 +1075,7 @@ func escapeTag(in []byte) []byte { for b, esc := range tagEscapeCodes { - if bytes.Contains(in, []byte{b}) { + if bytes.IndexByte(in, b) != -1 { in = bytes.Replace(in, []byte{b}, esc, -1) } } @@ -993,17 +1083,21 @@ } func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + for b, esc := range tagEscapeCodes { - if bytes.Contains(in, []byte{b}) { + if bytes.IndexByte(in, b) != -1 { in = bytes.Replace(in, esc, []byte{b}, -1) } } return in } -// escapeStringField returns a copy of in with any double quotes or +// EscapeStringField returns a copy of in with any double quotes or // backslashes with escaped values -func escapeStringField(in string) string { +func EscapeStringField(in string) string { var out []byte i := 0 for { @@ -1034,6 +1128,10 @@ // unescapeStringField returns a copy of in with any escaped double-quotes // or backslashes unescaped func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + var out []byte i := 0 for { @@ -1061,37 +1159,67 @@ // NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If // an unsupported field value (NaN) or out of range time is passed, this function returns an error. -func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) { +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { if len(fields) == 0 { - return nil, fmt.Errorf("Point without fields is unsupported") + return nil, ErrPointMustHaveAField } - if !time.IsZero() { - if err := CheckTime(time); err != nil { + + if !t.IsZero() { + if err := CheckTime(t); err != nil { return nil, err } } for key, value := range fields { - if fv, ok := value.(float64); ok { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: // Ensure the caller validates and handles invalid field values - if math.IsNaN(fv) { + if math.IsNaN(float64(value)) { return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) } } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } } - return &point{ - key: MakeKey([]byte(name), tags), - time: time, - fields: fields.MarshalBinary(), - }, nil + key := MakeKey([]byte(measurement), tags) + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + return key, nil } +// NewPointFromBytes returns a new Point from a marshalled Point. func NewPointFromBytes(b []byte) (Point, error) { p := &point{} if err := p.UnmarshalBinary(b); err != nil { return nil, err } + if len(p.Fields()) == 0 { + return nil, ErrPointMustHaveAField + } return p, nil } @@ -1147,40 +1275,57 @@ p.time = t } +// Round implements Point.Round +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + // Tags returns the tag set for the point func (p *point) Tags() Tags { - return parseTags(p.key) + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key) + return p.cachedTags } func parseTags(buf []byte) Tags { - tags := map[string]string{} + if len(buf) == 0 { + return nil + } - if len(buf) != 0 { - pos, name := scanTo(buf, 0, ',') + pos, name := scanTo(buf, 0, ',') - // it's an empyt key, so there are no tags - if len(name) == 0 { - return tags - } + // it's an empty key, so there are no tags + if len(name) == 0 { + return nil + } - i := pos + 1 - var key, value []byte - for { - if i >= len(buf) { - break - } - i, key = scanTo(buf, i, '=') - i, value = scanTagValue(buf, i+1) + tags := make(Tags, 0, bytes.Count(buf, []byte(","))) + hasEscape := bytes.IndexByte(buf, '\\') != -1 - if len(value) == 0 { - continue - } + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) - tags[string(unescapeTag(key))] = string(unescapeTag(value)) + if len(value) == 0 { + continue + } - i++ + if hasEscape { + tags = append(tags, Tag{Key: unescapeTag(key), Value: unescapeTag(value)}) + } else { + tags = append(tags, Tag{Key: key, Value: value}) } + + i++ } + return tags } @@ -1194,12 +1339,15 @@ // SetTags replaces the tags for the point func (p *point) SetTags(tags Tags) { p.key = MakeKey([]byte(p.Name()), tags) + p.cachedTags = tags } // AddTag adds or replaces a tag value for a point func (p *point) AddTag(key, value string) { tags := p.Tags() - tags[key] = value + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags p.key = MakeKey([]byte(p.Name()), tags) } @@ -1212,14 +1360,6 @@ return p.cachedFields } -// AddField adds or replaces a field value for a point -func (p *point) AddField(name string, value interface{}) { - fields := p.Fields() - fields[name] = value - p.fields = fields.MarshalBinary() - p.cachedFields = nil -} - // SetPrecision will round a time to the specified precision func (p *point) SetPrecision(precision string) { switch precision { @@ -1244,30 +1384,73 @@ return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) } -func (p *point) MarshalBinary() ([]byte, error) { - b := u32tob(uint32(len(p.Key()))) - b = append(b, p.Key()...) +// AppendString implements Point.AppendString +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} - b = append(b, u32tob(uint32(len(p.fields)))...) - b = append(b, p.fields...) +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +func (p *point) MarshalBinary() ([]byte, error) { tb, err := p.time.MarshalBinary() if err != nil { return nil, err } - b = append(b, tb...) + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) return b, nil } func (p *point) UnmarshalBinary(b []byte) error { var i int - keyLen := int(btou32(b[:4])) + keyLen := int(binary.BigEndian.Uint32(b[:4])) i += int(4) p.key = b[i : i+keyLen] i += keyLen - fieldLen := int(btou32(b[i : i+4])) + fieldLen := int(binary.BigEndian.Uint32(b[i : i+4])) i += int(4) p.fields = b[i : i+fieldLen] @@ -1295,11 +1478,28 @@ } func (p *point) unmarshalBinary() Fields { - return newFieldsFromBinary(p.fields) + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + fields[string(iter.FieldKey())] = iter.FloatValue() + case Integer: + fields[string(iter.FieldKey())] = iter.IntegerValue() + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + fields[string(iter.FieldKey())] = iter.BooleanValue() + } + } + return fields } func (p *point) HashID() uint64 { - h := fnv.New64a() + h := NewInlineFNV64a() h.Write(p.key) sum := h.Sum64() return sum @@ -1309,50 +1509,176 @@ return p.Time().UnixNano() } -// Tags represents a mapping between a Point's tag names and their -// values. -type Tags map[string]string +func (p *point) Split(size int) []Point { + if p.time.IsZero() || len(p.String()) <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, Tag{Key: []byte(k), Value: []byte(v)}) + } + sort.Sort(a) + return a +} + +func (a Tags) Len() int { return len(a) } +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for _, t := range *a { + if bytes.Equal(t.Key, key) { + t.Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} // HashKey hashes all of a tag's keys. -func (t Tags) HashKey() []byte { +func (a Tags) HashKey() []byte { // Empty maps marshal to empty bytes. - if len(t) == 0 { + if len(a) == 0 { return nil } - escaped := Tags{} - for k, v := range t { - ek := escapeTag([]byte(k)) - ev := escapeTag([]byte(v)) + escaped := make(Tags, 0, len(a)) + for _, t := range a { + ek := escapeTag(t.Key) + ev := escapeTag(t.Value) if len(ev) > 0 { - escaped[string(ek)] = string(ev) + escaped = append(escaped, Tag{Key: ek, Value: ev}) } } // Extract keys and determine final size. sz := len(escaped) + (len(escaped) * 2) // separators - keys := make([]string, len(escaped)+1) - i := 0 - for k, v := range escaped { - keys[i] = k - i++ - sz += len(k) + len(v) + keys := make([][]byte, len(escaped)+1) + for i, t := range escaped { + keys[i] = t.Key + sz += len(t.Key) + len(t.Value) } - keys = keys[:i] - sort.Strings(keys) + keys = keys[:len(escaped)] + sort.Sort(byteSlices(keys)) + // Generate marshaled bytes. b := make([]byte, sz) buf := b idx := 0 - for _, k := range keys { + for i, k := range keys { buf[idx] = ',' idx++ copy(buf[idx:idx+len(k)], k) idx += len(k) buf[idx] = '=' idx++ - v := escaped[k] + v := escaped[i].Value copy(buf[idx:idx+len(v)], v) idx += len(v) } @@ -1366,173 +1692,225 @@ func parseNumber(val []byte) (interface{}, error) { if val[len(val)-1] == 'i' { val = val[:len(val)-1] - return strconv.ParseInt(string(val), 10, 64) + return parseIntBytes(val, 10, 64) } for i := 0; i < len(val); i++ { // If there is a decimal or an N (NaN), I (Inf), parse as float if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' { - return strconv.ParseFloat(string(val), 64) + return parseFloatBytes(val, 64) } if val[i] < '0' && val[i] > '9' { return string(val), nil } } - return strconv.ParseFloat(string(val), 64) + return parseFloatBytes(val, 64) } -func newFieldsFromBinary(buf []byte) Fields { - fields := Fields{} - var ( - i int - name, valueBuf []byte - value interface{} - err error - ) - for { - if i >= len(buf) { - break - } +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} - i, name = scanTo(buf, i, '=') - if len(name) == 0 { - continue - } - name = escape.Unescape(name) +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} - i, valueBuf = scanFieldValue(buf, i+1) - if len(valueBuf) == 0 { - fields[string(name)] = nil - continue - } +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } - // If the first char is a double-quote, then unmarshal as string - if valueBuf[0] == '"' { - value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1])) - // Check for numeric characters and special NaN or Inf - } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '+' || valueBuf[0] == '.' || - valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN - valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf - - value, err = parseNumber(valueBuf) - if err != nil { - panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err)) - } + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } - // Otherwise parse it as bool + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiI`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] } else { - value, err = strconv.ParseBool(string(valueBuf)) - if err != nil { - panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err)) - } + p.it.fieldType = Float } - fields[string(name)] = value - i++ + return true } - return fields + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +func (p *point) FieldKey() []byte { + return p.it.key +} + +func (p *point) Type() FieldType { + return p.it.fieldType +} + +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +func (p *point) IntegerValue() int64 { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + panic(fmt.Sprintf("unable to parse integer value %q: %v", p.it.valueBuf, err)) + } + return n +} + +func (p *point) BooleanValue() bool { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + panic(fmt.Sprintf("unable to parse bool value %q: %v", p.it.valueBuf, err)) + } + return b +} + +func (p *point) FloatValue() float64 { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + // panic because that's what the non-iterator code does + panic(fmt.Sprintf("unable to parse floating point value %q: %v", p.it.valueBuf, err)) + } + return f +} + +func (p *point) Delete() { + switch { + case p.it.end == p.it.start: + case p.it.end >= len(p.fields): + p.fields = p.fields[:p.it.start] + case p.it.start == 0: + p.fields = p.fields[p.it.end:] + default: + p.fields = append(p.fields[:p.it.start], p.fields[p.it.end:]...) + } + + p.it.end = p.it.start + p.it.key = nil + p.it.valueBuf = nil + p.it.fieldType = Empty +} + +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 } // MarshalBinary encodes all the fields to their proper type and returns the binary // represenation // NOTE: uint64 is specifically not supported due to potential overflow when we decode // again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... func (p Fields) MarshalBinary() []byte { - b := []byte{} - keys := make([]string, len(p)) - i := 0 + var b []byte + keys := make([]string, 0, len(p)) + for k := range p { - keys[i] = k - i++ + keys = append(keys, k) } - sort.Strings(keys) - for _, k := range keys { - v := p[k] - b = append(b, []byte(escape.String(k))...) - b = append(b, '=') - switch t := v.(type) { - case int: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int64: - b = append(b, []byte(strconv.FormatInt(t, 10))...) - b = append(b, 'i') - case uint: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case float32: - val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32)) - b = append(b, val...) - case float64: - val := []byte(strconv.FormatFloat(t, 'f', -1, 64)) - b = append(b, val...) - case bool: - b = append(b, []byte(strconv.FormatBool(t))...) - case []byte: - b = append(b, t...) - case string: - b = append(b, '"') - b = append(b, []byte(escapeStringField(t))...) - b = append(b, '"') - case nil: - // skip - default: - // Can't determine the type, so convert to string - b = append(b, '"') - b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...) - b = append(b, '"') + // Not really necessary, can probably be removed. + sort.Strings(keys) + for i, k := range keys { + if i > 0 { + b = append(b, ',') } - b = append(b, ',') + b = appendField(b, k, p[k]) } - if len(b) > 0 { - return b[0 : len(b)-1] - } - return b -} - -type indexedSlice struct { - indices []int - b []byte -} -func (s *indexedSlice) Less(i, j int) bool { - _, a := scanTo(s.b, s.indices[i], '=') - _, b := scanTo(s.b, s.indices[j], '=') - return bytes.Compare(a, b) < 0 + return b } -func (s *indexedSlice) Swap(i, j int) { - s.indices[i], s.indices[j] = s.indices[j], s.indices[i] -} +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + // TODO: 'uint' should be considered just as "dangerous" as a uint64, + // perhaps the value should be checked and capped at MaxInt64? We could + // then include uint64 as an accepted value + case uint: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') -func (s *indexedSlice) Len() int { - return len(s.indices) -} + } -func u32tob(v uint32) []byte { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, v) return b } -func btou32(b []byte) uint32 { - return uint32(binary.BigEndian.Uint32(b)) -} +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff -Nru influxdb-0.10.0+dfsg1/models/points_test.go influxdb-1.1.1+dfsg1/models/points_test.go --- influxdb-0.10.0+dfsg1/models/points_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/points_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -11,11 +11,19 @@ "testing" "time" - "github.com/influxdb/influxdb/models" + "github.com/influxdata/influxdb/models" ) var ( - tags = models.Tags{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"} + tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"}) + fields = models.Fields{ + "int64": int64(math.MaxInt64), + "uint32": uint32(math.MaxUint32), + "string": "String field that has a decent length, probably some log message or something", + "boolean": false, + "float64-tiny": float64(math.SmallestNonzeroFloat64), + "float64-large": float64(math.MaxFloat64), + } maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) ) @@ -35,6 +43,72 @@ } } +func TestPoint_StringSize(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + l := p.StringSize() + s := p.String() + + if l != len(s) { + t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s)) + } + }) + +} + +func TestPoint_AppendString(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + got := p.AppendString(nil) + exp := []byte(p.String()) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp) + } + }) +} + +func testPoint_cube(t *testing.T, f func(p models.Point)) { + // heard of a table-driven test? let's make a cube-driven test... + tagList := []models.Tags{nil, {models.Tag{Key: []byte("foo"), Value: []byte("bar")}}, tags} + fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields} + timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()} + + for _, tagSet := range tagList { + for _, fieldSet := range fieldList { + for _, pointTime := range timeList { + p, err := models.NewPoint("test", tagSet, fieldSet, pointTime) + if err != nil { + t.Errorf("unexpected error creating point: %v", err) + continue + } + + f(p) + } + } + } +} + +var p models.Point + +func BenchmarkNewPoint(b *testing.B) { + ts := time.Now() + for i := 0; i < b.N; i++ { + p, _ = models.NewPoint("measurement", tags, fields, ts) + } +} + +func BenchmarkParsePointNoTags5000(b *testing.B) { + var batch [5000]string + for i := 0; i < len(batch); i++ { + batch[i] = `cpu value=1i 1000000000` + } + lines := strings.Join(batch[:], "\n") + b.ResetTimer() + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(lines)) + b.SetBytes(int64(len(lines))) + } +} + func BenchmarkParsePointNoTags(b *testing.B) { line := `cpu value=1i 1000000000` for i := 0; i < b.N; i++ { @@ -112,6 +186,13 @@ } } +func BenchmarkParseKey(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5` + for i := 0; i < b.N; i++ { + models.ParseKey([]byte(line)) + } +} + // TestPoint wraps a models.Point but also makes available the raw // arguments to the Point. // @@ -156,9 +237,9 @@ t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) } - for tag, value := range pts[0].Tags() { - if value != point.RawTags[tag] { - t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, value, point.RawTags[tag]) + for _, tag := range pts[0].Tags() { + if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) { + t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key)) } } @@ -362,17 +443,22 @@ } func TestParsePointBadNumber(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1a`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1a`) - } - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=1ii`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1ii`) - } - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.0i`) + for _, tt := range []string{ + "cpu v=- ", + "cpu v=-i ", + "cpu v=-. ", + "cpu v=. ", + "cpu v=1.0i ", + "cpu v=1ii ", + "cpu v=1a ", + "cpu v=-e-e-e ", + "cpu v=42+3 ", + "cpu v= ", + } { + _, err := models.ParsePointsString(tt) + if err == nil { + t.Errorf("Point %q should be invalid", tt) + } } } @@ -581,12 +667,53 @@ } } +func TestParsePointWhitespace(t *testing.T) { + examples := []string{ + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000 `, + `cpu value=1.0 1257894000000000000 +`, + `cpu value=1.0 1257894000000000000 +`, + } + + expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000)) + for i, example := range examples { + pts, err := models.ParsePoints([]byte(example)) + if err != nil { + t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err) + } + + if got, exp := len(pts), 1; got != exp { + t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) + } + + if got, exp := pts[0].Name(), expPoint.Name(); got != exp { + t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) + } + + if got, exp := len(pts[0].Fields()), len(expPoint.Fields()); got != exp { + t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp) + } + + if got, exp := pts[0].Fields()["value"], expPoint.Fields()["value"]; got != exp { + t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp) + } + + if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp { + t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp) + } + } +} + func TestParsePointUnescape(t *testing.T) { // commas in measurement name test(t, `foo\,bar value=1i`, NewTestPoint( "foo,bar", // comma in the name - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": int64(1), }, @@ -596,9 +723,9 @@ test(t, `cpu\,main,regions=east value=1.0`, NewTestPoint( "cpu,main", // comma in the name - models.Tags{ + models.NewTags(map[string]string{ "regions": "east", - }, + }), models.Fields{ "value": 1.0, }, @@ -608,9 +735,9 @@ test(t, `cpu\ load,region=east value=1.0`, NewTestPoint( "cpu load", // space in the name - models.Tags{ + models.NewTags(map[string]string{ "region": "east", - }, + }), models.Fields{ "value": 1.0, }, @@ -620,9 +747,9 @@ test(t, `cpu\=load,region=east value=1.0`, NewTestPoint( `cpu\=load`, // backslash is literal - models.Tags{ + models.NewTags(map[string]string{ "region": "east", - }, + }), models.Fields{ "value": 1.0, }, @@ -632,9 +759,9 @@ test(t, `cpu=load,region=east value=1.0`, NewTestPoint( `cpu=load`, // literal equals is fine in measurement name - models.Tags{ + models.NewTags(map[string]string{ "region": "east", - }, + }), models.Fields{ "value": 1.0, }, @@ -643,9 +770,9 @@ // commas in tag names test(t, `cpu,region\,zone=east value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "region,zone": "east", // comma in the tag key - }, + }), models.Fields{ "value": 1.0, }, @@ -654,9 +781,9 @@ // spaces in tag name test(t, `cpu,region\ zone=east value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "region zone": "east", // space in the tag name - }, + }), models.Fields{ "value": 1.0, }, @@ -665,9 +792,9 @@ // backslash with escaped equals in tag name test(t, `cpu,reg\\=ion=east value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ `reg\=ion`: "east", - }, + }), models.Fields{ "value": 1.0, }, @@ -676,9 +803,9 @@ // space is tag name test(t, `cpu,\ =east value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ " ": "east", // tag name is single space - }, + }), models.Fields{ "value": 1.0, }, @@ -687,9 +814,9 @@ // commas in tag values test(t, `cpu,regions=east\,west value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east,west", // comma in the tag value - }, + }), models.Fields{ "value": 1.0, }, @@ -699,9 +826,9 @@ test(t, `cpu,regions=\\ east value=1.0`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": `\ east`, - }, + }), models.Fields{ "value": 1.0, }, @@ -711,9 +838,9 @@ test(t, `cpu,regions=eas\\ t value=1.0`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": `eas\ t`, - }, + }), models.Fields{ "value": 1.0, }, @@ -723,9 +850,9 @@ test(t, `cpu,regions=east\\ value=1.0`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": `east\ `, - }, + }), models.Fields{ "value": 1.0, }, @@ -734,9 +861,9 @@ // spaces in tag values test(t, `cpu,regions=east\ west value=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east west", // comma in the tag value - }, + }), models.Fields{ "value": 1.0, }, @@ -745,9 +872,9 @@ // commas in field keys test(t, `cpu,regions=east value\,ms=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east", - }, + }), models.Fields{ "value,ms": 1.0, // comma in the field keys }, @@ -756,9 +883,9 @@ // spaces in field keys test(t, `cpu,regions=east value\ ms=1.0`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east", - }, + }), models.Fields{ "value ms": 1.0, // comma in the field keys }, @@ -767,10 +894,10 @@ // tag with no value test(t, `cpu,regions=east value="1"`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east", "foobar": "", - }, + }), models.Fields{ "value": "1", }, @@ -779,9 +906,9 @@ // commas in field values test(t, `cpu,regions=east value="1,0"`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "east", - }, + }), models.Fields{ "value": "1,0", // comma in the field value }, @@ -791,9 +918,9 @@ test(t, `cpu,regions=eas\t value=1.0`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": "eas\\t", - }, + }), models.Fields{ "value": 1.0, }, @@ -803,9 +930,9 @@ test(t, `cpu,regions=\\,\,\=east value=1.0`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "regions": `\,,=east`, - }, + }), models.Fields{ "value": 1.0, }, @@ -815,7 +942,7 @@ test(t, `cpu \a=1i`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "\\a": int64(1), // Left as parsed since it's not a known escape sequence. }, @@ -825,9 +952,9 @@ test(t, `cpu=load,equals\=foo=tag\=value value=1i`, NewTestPoint( "cpu=load", // Not escaped - models.Tags{ + models.NewTags(map[string]string{ "equals=foo": "tag=value", // Tag and value unescaped - }, + }), models.Fields{ "value": int64(1), }, @@ -839,24 +966,42 @@ test(t, "cpu,host=serverA,region=us-east value=1.0 1000000000", NewTestPoint("cpu", - models.Tags{"host": "serverA", "region": "us-east"}, + models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), models.Fields{"value": 1.0}, time.Unix(1, 0))) } -func TestParsPointWithDuplicateTags(t *testing.T) { - _, err := models.ParsePoints([]byte(`cpu,host=serverA,host=serverB value=1i 1000000000`)) - if err == nil { - t.Fatalf(`ParsePoint() expected error. got nil`) +func TestParsePointWithDuplicateTags(t *testing.T) { + for i, tt := range []struct { + line string + err string + }{ + { + line: `cpu,host=serverA,host=serverB value=1i 1000000000`, + err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,b=1,c=3 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,c=3,b=1 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`, + }, + } { + _, err := models.ParsePointsString(tt.line) + if err == nil || tt.err != err.Error() { + t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err) + } } } func TestParsePointWithStringField(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": "foo", @@ -867,10 +1012,10 @@ test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, NewTestPoint("cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "str": `foo " bar`, }, @@ -883,10 +1028,10 @@ test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": "foo bar", // spaces in string value @@ -899,10 +1044,10 @@ test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": "foo\nbar", // newline in string value @@ -916,10 +1061,10 @@ test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": `foo\,bar`, // commas in string value @@ -931,16 +1076,31 @@ test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": "foo,bar", // commas in string value }, time.Unix(1, 0)), ) + + // string w/ trailing escape chars + test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "str": "foo\\", // trailing escape char + "str2": "bar", + }, + time.Unix(1, 0)), + ) } func TestParsePointQuotedMeasurement(t *testing.T) { @@ -948,10 +1108,10 @@ test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, NewTestPoint( `"cpu"`, - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, }, @@ -963,10 +1123,10 @@ test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ `"host"`: `"serverA"`, "region": "us-east", - }, + }), models.Fields{ "value": 1.0, }, @@ -985,7 +1145,7 @@ } // Expected " in the tag value - exp := models.MustNewPoint("baz", models.Tags{"mytag": `"a`}, + exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}), models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) if pts[0].String() != exp.String() { @@ -993,7 +1153,7 @@ } // Expected two points to ensure we did not overscan the line - exp = models.MustNewPoint("baz", models.Tags{"mytag": `a`}, + exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}), models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) if pts[1].String() != exp.String() { @@ -1007,10 +1167,10 @@ test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": `{Hello"{,}" World}`, }, @@ -1021,10 +1181,10 @@ test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": `{Hello"{\,}" World}`, }, @@ -1036,10 +1196,10 @@ test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": 1.0, "str": "foo=bar", // spaces in string value @@ -1052,7 +1212,7 @@ test(t, `cpu value="test\\\"" 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": `test\"`, }, @@ -1062,7 +1222,7 @@ test(t, `cpu value="test\\" 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": `test\`, }, @@ -1072,7 +1232,7 @@ test(t, `cpu value="test\\\"" 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": `test\"`, }, @@ -1082,7 +1242,7 @@ test(t, `cpu value="test\"" 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": `test"`, }, @@ -1094,10 +1254,10 @@ test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "t": true, "T": true, @@ -1118,10 +1278,10 @@ test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, NewTestPoint( "cpu", - models.Tags{ + models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", - }, + }), models.Fields{ "value": "wè", }, @@ -1133,7 +1293,7 @@ test(t, `cpu value=1 -1`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, @@ -1142,53 +1302,45 @@ } func TestParsePointMaxTimestamp(t *testing.T) { - test(t, `cpu value=1 9223372036854775807`, + test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime), NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, - time.Unix(0, int64(1<<63-1))), + time.Unix(0, models.MaxNanoTime)), ) } func TestParsePointMinTimestamp(t *testing.T) { - test(t, `cpu value=1 -9223372036854775807`, + test(t, `cpu value=1 -9223372036854775806`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, - time.Unix(0, -int64(1<<63-1))), + time.Unix(0, models.MinNanoTime)), ) } func TestParsePointInvalidTimestamp(t *testing.T) { - _, err := models.ParsePointsString("cpu value=1 9223372036854775808") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) - } - _, err = models.ParsePointsString("cpu value=1 -92233720368547758078") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) - } - _, err = models.ParsePointsString("cpu value=1 -") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) - } - _, err = models.ParsePointsString("cpu value=1 -/") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) - } - _, err = models.ParsePointsString("cpu value=1 -1?") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) + examples := []string{ + "cpu value=1 9223372036854775808", + "cpu value=1 -92233720368547758078", + "cpu value=1 -", + "cpu value=1 -/", + "cpu value=1 -1?", + "cpu value=1 1-", + "cpu value=1 9223372036854775807 12", } - _, err = models.ParsePointsString("cpu value=1 1-") - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Fatalf("[Example %d] ParsePoints failed: %v", i, err) + } } } @@ -1196,7 +1348,7 @@ test(t, `cpu value=1 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, @@ -1207,7 +1359,7 @@ test(t, `cpu value=-0.64 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": -0.64, }, @@ -1219,7 +1371,7 @@ test(t, `cpu value=1. 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, @@ -1231,7 +1383,7 @@ test(t, `cpu value=6.632243e+06 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": float64(6632243), }, @@ -1243,7 +1395,7 @@ test(t, `cpu value=6632243i 1000000000`, NewTestPoint( "cpu", - models.Tags{}, + models.NewTags(map[string]string{}), models.Fields{ "value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06 }, @@ -1340,7 +1492,7 @@ t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) } - pt = models.MustNewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"}, + pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, time.Unix(1, 0)) @@ -1537,26 +1689,26 @@ func TestNewPointEscaped(t *testing.T) { // commas - pt := models.MustNewPoint("cpu,main", models.Tags{"tag,bar": "value"}, models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) + pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // spaces - pt = models.MustNewPoint("cpu main", models.Tags{"tag bar": "value"}, models.Fields{"name bar": 1.0}, time.Unix(0, 0)) + pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // equals - pt = models.MustNewPoint("cpu=main", models.Tags{"tag=bar": "value=foo"}, models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) + pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } } func TestNewPointWithoutField(t *testing.T) { - _, err := models.NewPoint("cpu", models.Tags{"tag": "bar"}, models.Fields{}, time.Unix(0, 0)) + _, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0)) if err == nil { t.Fatalf(`NewPoint() expected error. got nil`) } @@ -1582,19 +1734,19 @@ } func TestMakeKeyEscaped(t *testing.T) { - if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.Tags{}); string(got) != exp { + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } - if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.Tags{}); string(got) != exp { + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } - if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.Tags{}); string(got) != exp { + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } - if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.Tags{}); string(got) != exp { + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } @@ -1772,3 +1924,247 @@ t.Fatalf("expected 2 points, got %d", len(points)) } } + +func TestParsePointsBlankLine(t *testing.T) { + buf := `cpu value=1i 1000000000 + +cpu value=2i 2000000000` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + if len(points) != 2 { + t.Fatalf("expected 2 points, got %d", len(points)) + } +} + +func TestNewPointsWithBytesWithCorruptData(t *testing.T) { + corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148} + p, err := models.NewPointFromBytes(corrupted) + if p != nil || err == nil { + t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) + } +} + +func TestNewPointsRejectsEmptyFieldNames(t *testing.T) { + if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil { + t.Fatalf("new point with empty field name. got: nil, expected: error") + } +} + +func TestNewPointsRejectsMaxKey(t *testing.T) { + var key string + for i := 0; i < 65536; i++ { + key += "a" + } + + if _, err := models.NewPoint(key, nil, models.Fields{"value": 1}, time.Now()); err == nil { + t.Fatalf("new point with max key. got: nil, expected: error") + } + + if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1", key)); err == nil { + t.Fatalf("parse point with max key. got: nil, expected: error") + } +} + +func TestParseKeyEmpty(t *testing.T) { + if _, _, err := models.ParseKey(nil); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestPoint_FieldIterator_Simple(t *testing.T) { + + p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`)) + if err != nil { + t.Fatal(err) + } + + if len(p) != 1 { + t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1) + } + + fi := p[0].FieldIterator() + + if !fi.Next() { + t.Fatal("field iterator terminated before first field") + } + + if fi.Type() != models.Integer { + t.Fatalf("'42i' should be an Integer, got %v", fi.Type()) + } + + if fi.IntegerValue() != 42 { + t.Fatalf("'42i' should be 42, got %d", fi.IntegerValue()) + } + + if !fi.Next() { + t.Fatalf("field iterator terminated before second field") + } + + if fi.Type() != models.Float { + t.Fatalf("'42' should be a Float, got %v", fi.Type()) + } + + if fi.FloatValue() != 42.0 { + t.Fatalf("'42' should be %f, got %f", 42.0, fi.FloatValue()) + } + + if fi.Next() { + t.Fatal("field iterator didn't terminate") + } +} + +func toFields(fi models.FieldIterator) models.Fields { + m := make(models.Fields) + for fi.Next() { + var v interface{} + switch fi.Type() { + case models.Float: + v = fi.FloatValue() + case models.Integer: + v = fi.IntegerValue() + case models.String: + v = fi.StringValue() + case models.Boolean: + v = fi.BooleanValue() + case models.Empty: + v = nil + default: + panic("unknown type") + } + m[string(fi.FieldKey())] = v + } + return m +} + +func TestPoint_FieldIterator_FieldMap(t *testing.T) { + + points, err := models.ParsePointsString(` +m v=42 +m v=42i +m v="string" +m v=true +m v="string\"with\"escapes" +m v=42i,f=42,g=42.314 +m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 +`) + + if err != nil { + t.Fatal("failed to parse test points:", err) + } + + for _, p := range points { + exp := p.Fields() + got := toFields(p.FieldIterator()) + + if !reflect.DeepEqual(got, exp) { + t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp) + } + } +} + +func TestPoint_FieldIterator_Delete_Begin(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"b": float64(2), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Middle(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_End(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Next() // c + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "b": float64(2)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Nothing(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "b": float64(2), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Twice(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Delete() + fi.Delete() // no-op + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} diff -Nru influxdb-0.10.0+dfsg1/models/rows.go influxdb-1.1.1+dfsg1/models/rows.go --- influxdb-0.10.0+dfsg1/models/rows.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/rows.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,7 +1,6 @@ package models import ( - "hash/fnv" "sort" ) @@ -11,7 +10,6 @@ Tags map[string]string `json:"tags,omitempty"` Columns []string `json:"columns,omitempty"` Values [][]interface{} `json:"values,omitempty"` - Err error `json:"err,omitempty"` } // SameSeries returns true if r contains values for the same series as o. @@ -21,7 +19,7 @@ // tagsHash returns a hash of tag key/value pairs. func (r *Row) tagsHash() uint64 { - h := fnv.New64a() + h := NewInlineFNV64a() keys := r.tagsKeys() for _, k := range keys { h.Write([]byte(k)) diff -Nru influxdb-0.10.0+dfsg1/models/statistic.go influxdb-1.1.1+dfsg1/models/statistic.go --- influxdb-0.10.0+dfsg1/models/statistic.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/statistic.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,40 @@ +package models + +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff -Nru influxdb-0.10.0+dfsg1/models/statistic_test.go influxdb-1.1.1+dfsg1/models/statistic_test.go --- influxdb-0.10.0+dfsg1/models/statistic_test.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/statistic_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,55 @@ +package models_test + +import ( + "reflect" + "testing" + + "github.com/influxdata/influxdb/models" +) + +func TestTags_Merge(t *testing.T) { + examples := []struct { + Base map[string]string + Arg map[string]string + Result map[string]string + }{ + { + Base: nil, + Arg: nil, + Result: map[string]string{}, + }, + { + Base: nil, + Arg: map[string]string{"foo": "foo"}, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: nil, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: map[string]string{"bar": "bar"}, + Result: map[string]string{"foo": "foo", "bar": "bar"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"zoo": "zoo"}, + Result: map[string]string{"foo": "foo", "bar": "bar", "zoo": "zoo"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"bar": "newbar"}, + Result: map[string]string{"foo": "foo", "bar": "newbar"}, + }, + } + + for i, example := range examples { + i++ + result := models.StatisticTags(example.Base).Merge(example.Arg) + if got, exp := result, example.Result; !reflect.DeepEqual(got, exp) { + t.Errorf("[Example %d] got %#v, expected %#v", i, got, exp) + } + } +} diff -Nru influxdb-0.10.0+dfsg1/models/time.go influxdb-1.1.1+dfsg1/models/time.go --- influxdb-0.10.0+dfsg1/models/time.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/models/time.go 2016-12-06 21:36:15.000000000 +0000 @@ -9,31 +9,53 @@ "time" ) +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + var ( - // Maximum time that can be represented via int64 nanoseconds since the epoch. - MaxNanoTime = time.Unix(0, math.MaxInt64).UTC() - // Minumum time that can be represented via int64 nanoseconds since the epoch. - MinNanoTime = time.Unix(0, math.MinInt64).UTC() + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() - // The time is out of the representable range using int64 nanoseconds since the epoch. - ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime) + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) ) -// Safely calculate the time given. Will return error if the time is outside the +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the // supported range. func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { mult := GetPrecisionMultiplier(precision) if t, ok := safeSignedMult(timestamp, mult); ok { - return time.Unix(0, t).UTC(), nil - } else { - return time.Time{}, ErrTimeOutOfRange + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) } + return time.Time{}, ErrTimeOutOfRange } -// Check that a time is within the safe range. +// CheckTime checks that a time is within the safe range. func CheckTime(t time.Time) error { - if t.Before(MinNanoTime) || t.After(MaxNanoTime) { + if t.Before(minNanoTime) || t.After(maxNanoTime) { return ErrTimeOutOfRange } return nil @@ -44,7 +66,7 @@ if a == 0 || b == 0 || a == 1 || b == 1 { return a * b, true } - if a == math.MinInt64 || b == math.MaxInt64 { + if a == MinNanoTime || b == MaxNanoTime { return 0, false } c := a * b diff -Nru influxdb-0.10.0+dfsg1/monitor/build_info.go influxdb-1.1.1+dfsg1/monitor/build_info.go --- influxdb-0.10.0+dfsg1/monitor/build_info.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/build_info.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,5 +1,7 @@ package monitor +import "github.com/influxdata/influxdb/monitor/diagnostics" + // system captures build diagnostics type build struct { Version string @@ -8,7 +10,7 @@ Time string } -func (b *build) Diagnostics() (*Diagnostic, error) { +func (b *build) Diagnostics() (*diagnostics.Diagnostics, error) { diagnostics := map[string]interface{}{ "Version": b.Version, "Commit": b.Commit, @@ -16,5 +18,5 @@ "Build Time": b.Time, } - return DiagnosticFromMap(diagnostics), nil + return DiagnosticsFromMap(diagnostics), nil } diff -Nru influxdb-0.10.0+dfsg1/monitor/config.go influxdb-1.1.1+dfsg1/monitor/config.go --- influxdb-0.10.0+dfsg1/monitor/config.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/config.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,9 +1,10 @@ package monitor import ( + "errors" "time" - "github.com/influxdb/influxdb/toml" + "github.com/influxdata/influxdb/toml" ) const ( @@ -33,3 +34,11 @@ StoreInterval: toml.Duration(DefaultStoreInterval), } } + +// Validate validates that the configuration is acceptable. +func (c Config) Validate() error { + if c.StoreInterval <= 0 { + return errors.New("monitor store interval must be positive") + } + return nil +} diff -Nru influxdb-0.10.0+dfsg1/monitor/config_test.go influxdb-1.1.1+dfsg1/monitor/config_test.go --- influxdb-0.10.0+dfsg1/monitor/config_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/config_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,7 +5,7 @@ "time" "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/monitor" + "github.com/influxdata/influxdb/monitor" ) func TestConfig_Parse(t *testing.T) { diff -Nru influxdb-0.10.0+dfsg1/monitor/diagnostics/diagnostics.go influxdb-1.1.1+dfsg1/monitor/diagnostics/diagnostics.go --- influxdb-0.10.0+dfsg1/monitor/diagnostics/diagnostics.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/diagnostics/diagnostics.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,41 @@ +package diagnostics // import "github.com/influxdata/influxdb/monitor/diagnostics" + +// Client is the interface modules implement if they register diagnostics with monitor. +type Client interface { + Diagnostics() (*Diagnostics, error) +} + +// The ClientFunc type is an adapter to allow the use of +// ordinary functions as Diagnostics clients. +type ClientFunc func() (*Diagnostics, error) + +// Diagnostics calls f(). +func (f ClientFunc) Diagnostics() (*Diagnostics, error) { + return f() +} + +// Diagnostics represents a table of diagnostic information. The first value +// is the name of the columns, the second is a slice of interface slices containing +// the values for each column, by row. This information is never written to an InfluxDB +// system and is display-only. An example showing, say, connections follows: +// +// source_ip source_port dest_ip dest_port +// 182.1.0.2 2890 127.0.0.1 38901 +// 174.33.1.2 2924 127.0.0.1 38902 +type Diagnostics struct { + Columns []string + Rows [][]interface{} +} + +// NewDiagnostic initialises a new Diagnostics with the specified columns. +func NewDiagnostics(columns []string) *Diagnostics { + return &Diagnostics{ + Columns: columns, + Rows: make([][]interface{}, 0), + } +} + +// AddRow appends the provided row to the Diagnostics' rows. +func (d *Diagnostics) AddRow(r []interface{}) { + d.Rows = append(d.Rows, r) +} diff -Nru influxdb-0.10.0+dfsg1/monitor/go_runtime.go influxdb-1.1.1+dfsg1/monitor/go_runtime.go --- influxdb-0.10.0+dfsg1/monitor/go_runtime.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/go_runtime.go 2016-12-06 21:36:15.000000000 +0000 @@ -2,12 +2,14 @@ import ( "runtime" + + "github.com/influxdata/influxdb/monitor/diagnostics" ) // goRuntime captures Go runtime diagnostics type goRuntime struct{} -func (g *goRuntime) Diagnostics() (*Diagnostic, error) { +func (g *goRuntime) Diagnostics() (*diagnostics.Diagnostics, error) { diagnostics := map[string]interface{}{ "GOARCH": runtime.GOARCH, "GOOS": runtime.GOOS, @@ -15,5 +17,5 @@ "version": runtime.Version(), } - return DiagnosticFromMap(diagnostics), nil + return DiagnosticsFromMap(diagnostics), nil } diff -Nru influxdb-0.10.0+dfsg1/monitor/network.go influxdb-1.1.1+dfsg1/monitor/network.go --- influxdb-0.10.0+dfsg1/monitor/network.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/network.go 2016-12-06 21:36:15.000000000 +0000 @@ -2,12 +2,14 @@ import ( "os" + + "github.com/influxdata/influxdb/monitor/diagnostics" ) // network captures network diagnostics type network struct{} -func (n *network) Diagnostics() (*Diagnostic, error) { +func (n *network) Diagnostics() (*diagnostics.Diagnostics, error) { h, err := os.Hostname() if err != nil { return nil, err @@ -17,5 +19,5 @@ "hostname": h, } - return DiagnosticFromMap(diagnostics), nil + return DiagnosticsFromMap(diagnostics), nil } diff -Nru influxdb-0.10.0+dfsg1/monitor/README.md influxdb-1.1.1+dfsg1/monitor/README.md --- influxdb-0.10.0+dfsg1/monitor/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -39,35 +39,8 @@ ## Registering statistics and diagnostics -To export statistical information with the `monitor` system, code simply calls `influxdb.NewStatistics()` and receives an `expvar.Map` instance in response. This object can then be used to store statistics. - -For example, if you have a component called `Service`, you can statistics like so: - -``` -import ( - "expvar" - "github.com/influxdb/influxdb" -) -. -. -. -. -type Service struct { - ....some other fields.... - statMap *expvar.Map /// Add a map of type *expvar.Map. Check GoDocs for how to use this. -} - - -func NewService() *Service { - s = &NewService{} - s.statMap = NewStatistics(key, name, tags) -} -``` -When calling `NewStatistics` `key` should be unique for the Service instance (if a network service, the protocol and binding port are good to include in the key). `name` will be the name of the Measurement used to store these statistics. Finally, when these statistics are written to the `monitor` database, all points will be tagged with `tags`. A value of nil for `tags` is legal. +To export statistical information with the `monitor` system, a service should implement the `monitor.Reporter` interface. Services added to the Server will be automatically added to the list of statistics returned. Any service that is not added to the `Services` slice will need to modify the `Server`'s `Statistics(map[string]string)` method to aggregate the call to the service's `Statistics(map[string]string)` method so they are combined into a single response. The `Statistics(map[string]string)` method should return a statistics slice with the passed in tags included. The statistics should be kept inside of an internal structure and should be accessed in a thread-safe way. It is common to create a struct for holding the statistics and using `sync/atomic` instead of locking. If using `sync/atomic`, be sure to align the values in the struct so it works properly on `i386`. To register diagnostic information, `monitor.RegisterDiagnosticsClient` is called, passing a `influxdb.monitor.DiagsClient` object to `monitor`. Implementing the `influxdb.monitor.DiagsClient` interface requires that your component have function returning diagnostic information in specific form, so that it can be displayed by the `monitor` system. -## expvar -Statistical information is gathered by each package using [expvar](https://golang.org/pkg/expvar). Each package registers a map using its package name. - -Due to the nature of `expvar`, statistical information is reset to its initial state when a server is restarted. +Statistical information is reset to its initial state when a server is restarted. diff -Nru influxdb-0.10.0+dfsg1/monitor/reporter.go influxdb-1.1.1+dfsg1/monitor/reporter.go --- influxdb-0.10.0+dfsg1/monitor/reporter.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/reporter.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,7 @@ +package monitor + +import "github.com/influxdata/influxdb/models" + +type Reporter interface { + Statistics(tags map[string]string) []models.Statistic +} diff -Nru influxdb-0.10.0+dfsg1/monitor/service.go influxdb-1.1.1+dfsg1/monitor/service.go --- influxdb-0.10.0+dfsg1/monitor/service.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/service.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,8 +1,10 @@ -package monitor +package monitor // import "github.com/influxdata/influxdb/monitor" import ( + "errors" "expvar" "fmt" + "io" "log" "os" "runtime" @@ -11,60 +13,18 @@ "sync" "time" - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/services/meta" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/services/meta" ) -const leaderWaitTimeout = 30 * time.Second - // Policy constants. const ( MonitorRetentionPolicy = "monitor" MonitorRetentionPolicyDuration = 7 * 24 * time.Hour + MonitorRetentionPolicyReplicaN = 1 ) -// DiagsClient is the interface modules implement if they register diags with monitor. -type DiagsClient interface { - Diagnostics() (*Diagnostic, error) -} - -// The DiagsClientFunc type is an adapter to allow the use of -// ordinary functions as Diagnostis clients. -type DiagsClientFunc func() (*Diagnostic, error) - -// Diagnostics calls f(). -func (f DiagsClientFunc) Diagnostics() (*Diagnostic, error) { - return f() -} - -// Diagnostic represents a table of diagnostic information. The first value -// is the name of the columns, the second is a slice of interface slices containing -// the values for each column, by row. This information is never written to an InfluxDB -// system and is display-only. An example showing, say, connections follows: -// -// source_ip source_port dest_ip dest_port -// 182.1.0.2 2890 127.0.0.1 38901 -// 174.33.1.2 2924 127.0.0.1 38902 -type Diagnostic struct { - Columns []string - Rows [][]interface{} -} - -// NewDiagnostic initialises a new Diagnostic with the specified columns. -func NewDiagnostic(columns []string) *Diagnostic { - return &Diagnostic{ - Columns: columns, - Rows: make([][]interface{}, 0), - } -} - -// AddRow appends the provided row to the Diagnostic's rows. -func (d *Diagnostic) AddRow(r []interface{}) { - d.Rows = append(d.Rows, r) -} - // Monitor represents an instance of the monitor system. type Monitor struct { // Build information for diagnostics. @@ -73,53 +33,67 @@ Branch string BuildTime string - wg sync.WaitGroup - done chan struct{} - mu sync.Mutex + wg sync.WaitGroup - diagRegistrations map[string]DiagsClient + mu sync.RWMutex + globalTags map[string]string + diagRegistrations map[string]diagnostics.Client + reporter Reporter + done chan struct{} + storeCreated bool + storeEnabled bool + storeAddress string - storeCreated bool - storeEnabled bool storeDatabase string storeRetentionPolicy string storeRetentionDuration time.Duration storeReplicationFactor int - storeAddress string storeInterval time.Duration MetaClient interface { - ClusterID() uint64 - CreateDatabase(name string) (*meta.DatabaseInfo, error) - CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) - SetDefaultRetentionPolicy(database, name string) error - DropRetentionPolicy(database, name string) error + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + Database(name string) *meta.DatabaseInfo } - NodeID uint64 - - PointsWriter interface { - WritePoints(p *cluster.WritePointsRequest) error - } + // Writer for pushing stats back into the database. + PointsWriter PointsWriter Logger *log.Logger } +// PointsWriter is a simplified interface for writing the points the monitor gathers +type PointsWriter interface { + WritePoints(database, retentionPolicy string, points models.Points) error +} + // New returns a new instance of the monitor system. -func New(c Config) *Monitor { +func New(r Reporter, c Config) *Monitor { return &Monitor{ - done: make(chan struct{}), - diagRegistrations: make(map[string]DiagsClient), - storeEnabled: c.StoreEnabled, - storeDatabase: c.StoreDatabase, - storeInterval: time.Duration(c.StoreInterval), - Logger: log.New(os.Stderr, "[monitor] ", log.LstdFlags), + globalTags: make(map[string]string), + diagRegistrations: make(map[string]diagnostics.Client), + reporter: r, + storeEnabled: c.StoreEnabled, + storeDatabase: c.StoreDatabase, + storeInterval: time.Duration(c.StoreInterval), + storeRetentionPolicy: MonitorRetentionPolicy, + Logger: log.New(os.Stderr, "[monitor] ", log.LstdFlags), } } +func (m *Monitor) open() bool { + m.mu.Lock() + defer m.mu.Unlock() + return m.done != nil +} + // Open opens the monitoring system, using the given clusterID, node ID, and hostname // for identification purpose. func (m *Monitor) Open() error { + if m.open() { + m.Logger.Println("Monitor is already open") + return nil + } + m.Logger.Printf("Starting monitor system") // Self-register various stats and diagnostics. @@ -133,9 +107,12 @@ m.RegisterDiagnosticsClient("network", &network{}) m.RegisterDiagnosticsClient("system", &system{}) + m.mu.Lock() + m.done = make(chan struct{}) + m.mu.Unlock() + // If enabled, record stats in a InfluxDB system. if m.storeEnabled { - // Start periodic writes to system. m.wg.Add(1) go m.storeStatistics() @@ -145,20 +122,69 @@ } // Close closes the monitor system. -func (m *Monitor) Close() { +func (m *Monitor) Close() error { + if !m.open() { + m.Logger.Println("Monitor is already closed.") + return nil + } + m.Logger.Println("shutting down monitor system") + m.mu.Lock() close(m.done) + m.mu.Unlock() + m.wg.Wait() + + m.mu.Lock() m.done = nil + m.mu.Unlock() + + m.DeregisterDiagnosticsClient("build") + m.DeregisterDiagnosticsClient("runtime") + m.DeregisterDiagnosticsClient("network") + m.DeregisterDiagnosticsClient("system") + return nil +} + +// SetGlobalTag can be used to set tags that will appear on all points +// written by the Monitor. +func (m *Monitor) SetGlobalTag(key string, value interface{}) { + m.mu.Lock() + m.globalTags[key] = fmt.Sprintf("%v", value) + m.mu.Unlock() +} + +// RemoteWriterConfig represents the configuration of a remote writer +type RemoteWriterConfig struct { + RemoteAddr string + NodeID string + Username string + Password string + ClusterID uint64 +} + +// SetPointsWriter can be used to set a writer for the monitoring points. +func (m *Monitor) SetPointsWriter(pw PointsWriter) error { + if !m.storeEnabled { + // not enabled, nothing to do + return nil + } + m.mu.Lock() + m.PointsWriter = pw + m.mu.Unlock() + + // Subsequent calls to an already open Monitor are just a no-op. + return m.Open() } -// SetLogger sets the internal logger to the logger passed in. -func (m *Monitor) SetLogger(l *log.Logger) { - m.Logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (m *Monitor) SetLogOutput(w io.Writer) { + m.Logger = log.New(w, "[monitor] ", log.LstdFlags) } // RegisterDiagnosticsClient registers a diagnostics client with the given name and tags. -func (m *Monitor) RegisterDiagnosticsClient(name string, client DiagsClient) { +func (m *Monitor) RegisterDiagnosticsClient(name string, client diagnostics.Client) { m.mu.Lock() defer m.mu.Unlock() m.diagRegistrations[name] = client @@ -184,8 +210,7 @@ } statistic := &Statistic{ - Tags: make(map[string]string), - Values: make(map[string]interface{}), + Statistic: models.NewStatistic(""), } // Add any supplied tags. @@ -250,9 +275,7 @@ // Add Go memstats. statistic := &Statistic{ - Name: "runtime", - Tags: make(map[string]string), - Values: make(map[string]interface{}), + Statistic: models.NewStatistic("runtime"), } // Add any supplied tags to Go memstats @@ -281,17 +304,28 @@ } statistics = append(statistics, statistic) + statistics = m.gatherStatistics(statistics, tags) return statistics, nil } +func (m *Monitor) gatherStatistics(statistics []*Statistic, tags map[string]string) []*Statistic { + m.mu.RLock() + defer m.mu.RUnlock() + + for _, s := range m.reporter.Statistics(tags) { + statistics = append(statistics, &Statistic{Statistic: s}) + } + return statistics +} + // Diagnostics fetches diagnostic information for each registered // diagnostic client. It skips any clients that return an error when // retrieving their diagnostics. -func (m *Monitor) Diagnostics() (map[string]*Diagnostic, error) { +func (m *Monitor) Diagnostics() (map[string]*diagnostics.Diagnostics, error) { m.mu.Lock() defer m.mu.Unlock() - diags := make(map[string]*Diagnostic, len(m.diagRegistrations)) + diags := make(map[string]*diagnostics.Diagnostics, len(m.diagRegistrations)) for k, v := range m.diagRegistrations { d, err := v.Diagnostics() if err != nil { @@ -308,110 +342,107 @@ return } - if _, err := m.MetaClient.CreateDatabase(m.storeDatabase); err != nil { - m.Logger.Printf("failed to create database '%s', failed to create storage: %s", - m.storeDatabase, err.Error()) - return - } - - rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy) - rpi.Duration = MonitorRetentionPolicyDuration - rpi.ReplicaN = 1 - if _, err := m.MetaClient.CreateRetentionPolicy(m.storeDatabase, rpi); err != nil { - m.Logger.Printf("failed to create retention policy '%s', failed to create internal storage: %s", - rpi.Name, err.Error()) - return - } - - if err := m.MetaClient.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil { - m.Logger.Printf("failed to set default retention policy on '%s', failed to create internal storage: %s", - m.storeDatabase, err.Error()) - return - } + if di := m.MetaClient.Database(m.storeDatabase); di == nil { + duration := MonitorRetentionPolicyDuration + replicaN := MonitorRetentionPolicyReplicaN + spec := meta.RetentionPolicySpec{ + Name: MonitorRetentionPolicy, + Duration: &duration, + ReplicaN: &replicaN, + } - err := m.MetaClient.DropRetentionPolicy(m.storeDatabase, "default") - if err != nil && err.Error() != influxdb.ErrRetentionPolicyNotFound("default").Error() { - m.Logger.Printf("failed to delete retention policy 'default', failed to created internal storage: %s", err.Error()) - return + if _, err := m.MetaClient.CreateDatabaseWithRetentionPolicy(m.storeDatabase, &spec); err != nil { + m.Logger.Printf("failed to create database '%s', failed to create storage: %s", + m.storeDatabase, err.Error()) + return + } } // Mark storage creation complete. m.storeCreated = true } +// waitUntilInterval waits until we are on an even interval for the duration. +func (m *Monitor) waitUntilInterval(d time.Duration) error { + now := time.Now() + until := now.Truncate(d).Add(d) + timer := time.NewTimer(until.Sub(now)) + defer timer.Stop() + + select { + case <-timer.C: + return nil + case <-m.done: + return errors.New("interrupted") + } +} + // storeStatistics writes the statistics to an InfluxDB system. func (m *Monitor) storeStatistics() { defer m.wg.Done() m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s", m.storeDatabase, m.storeRetentionPolicy, m.storeInterval) - // Get cluster-level metadata. Nothing different is going to happen if errors occur. - clusterID := m.MetaClient.ClusterID() hostname, _ := os.Hostname() - clusterTags := map[string]string{ - "clusterID": fmt.Sprintf("%d", clusterID), - "nodeID": fmt.Sprintf("%d", m.NodeID), - "hostname": hostname, + m.SetGlobalTag("hostname", hostname) + + // Wait until an even interval to start recording monitor statistics. + // If we are interrupted before the interval for some reason, exit early. + if err := m.waitUntilInterval(m.storeInterval); err != nil { + return } tick := time.NewTicker(m.storeInterval) defer tick.Stop() + for { select { - case <-tick.C: - m.createInternalStorage() + case now := <-tick.C: + now = now.Truncate(m.storeInterval) + func() { + m.mu.Lock() + defer m.mu.Unlock() + m.createInternalStorage() + }() - stats, err := m.Statistics(clusterTags) + stats, err := m.Statistics(m.globalTags) if err != nil { m.Logger.Printf("failed to retrieve registered statistics: %s", err) - continue + return } points := make(models.Points, 0, len(stats)) for _, s := range stats { - pt, err := models.NewPoint(s.Name, s.Tags, s.Values, time.Now().Truncate(time.Second)) + pt, err := models.NewPoint(s.Name, models.NewTags(s.Tags), s.Values, now) if err != nil { m.Logger.Printf("Dropping point %v: %v", s.Name, err) - continue + return } points = append(points, pt) } - err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{ - Database: m.storeDatabase, - RetentionPolicy: m.storeRetentionPolicy, - ConsistencyLevel: cluster.ConsistencyLevelOne, - Points: points, - }) - if err != nil { - m.Logger.Printf("failed to store statistics: %s", err) - } + func() { + m.mu.RLock() + defer m.mu.RUnlock() + + if err := m.PointsWriter.WritePoints(m.storeDatabase, m.storeRetentionPolicy, points); err != nil { + m.Logger.Printf("failed to store statistics: %s", err) + } + }() case <-m.done: m.Logger.Printf("terminating storage of statistics") return } - } } // Statistic represents the information returned by a single monitor client. type Statistic struct { - Name string `json:"name"` - Tags map[string]string `json:"tags"` - Values map[string]interface{} `json:"values"` -} - -// newStatistic returns a new statistic object. -func newStatistic(name string, tags map[string]string, values map[string]interface{}) *Statistic { - return &Statistic{ - Name: name, - Tags: tags, - Values: values, - } + models.Statistic } // valueNames returns a sorted list of the value names, if any. -func (s *Statistic) valueNames() []string { +func (s *Statistic) ValueNames() []string { a := make([]string, 0, len(s.Values)) for k := range s.Values { a = append(a, k) @@ -420,8 +451,16 @@ return a } -// DiagnosticFromMap returns a Diagnostic from a map. -func DiagnosticFromMap(m map[string]interface{}) *Diagnostic { +type Statistics []*Statistic + +func (a Statistics) Len() int { return len(a) } +func (a Statistics) Less(i, j int) bool { + return a[i].Name < a[j].Name +} +func (a Statistics) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// DiagnosticsFromMap returns a Diagnostics from a map. +func DiagnosticsFromMap(m map[string]interface{}) *diagnostics.Diagnostics { // Display columns in deterministic order. sortedKeys := make([]string, 0, len(m)) for k := range m { @@ -429,7 +468,7 @@ } sort.Strings(sortedKeys) - d := NewDiagnostic(sortedKeys) + d := diagnostics.NewDiagnostics(sortedKeys) row := make([]interface{}, len(sortedKeys)) for i, k := range sortedKeys { row[i] = m[k] diff -Nru influxdb-0.10.0+dfsg1/monitor/service_test.go influxdb-1.1.1+dfsg1/monitor/service_test.go --- influxdb-0.10.0+dfsg1/monitor/service_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/service_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -package monitor - -import ( - "strings" - "testing" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/services/meta" -) - -// Test that a registered stats client results in the correct SHOW STATS output. -func Test_RegisterStats(t *testing.T) { - monitor := openMonitor(t) - executor := &StatementExecutor{Monitor: monitor} - - // Register stats without tags. - statMap := influxdb.NewStatistics("foo", "foo", nil) - statMap.Add("bar", 1) - statMap.AddFloat("qux", 2.4) - json := executeShowStatsJSON(t, executor) - if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) || !strings.Contains(json, `"name":"foo"`) { - t.Fatalf("SHOW STATS response incorrect, got: %s\n", json) - } - - // Register a client with tags. - statMap = influxdb.NewStatistics("bar", "baz", map[string]string{"proto": "tcp"}) - statMap.Add("bar", 1) - statMap.AddFloat("qux", 2.4) - json = executeShowStatsJSON(t, executor) - if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) || - !strings.Contains(json, `"name":"baz"`) || - !strings.Contains(json, `"proto":"tcp"`) { - t.Fatalf("SHOW STATS response incorrect, got: %s\n", json) - - } -} - -type mockMetaClient struct{} - -func (m *mockMetaClient) ClusterID() uint64 { return 1 } -func (m *mockMetaClient) IsLeader() bool { return true } -func (m *mockMetaClient) SetDefaultRetentionPolicy(database, name string) error { return nil } -func (m *mockMetaClient) DropRetentionPolicy(database, name string) error { return nil } -func (m *mockMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { - return nil, nil -} -func (m *mockMetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) { - return nil, nil -} - -func openMonitor(t *testing.T) *Monitor { - monitor := New(NewConfig()) - monitor.MetaClient = &mockMetaClient{} - err := monitor.Open() - if err != nil { - t.Fatalf("failed to open monitor: %s", err.Error()) - } - return monitor -} - -func executeShowStatsJSON(t *testing.T, s *StatementExecutor) string { - r := s.ExecuteStatement(&influxql.ShowStatsStatement{}) - b, err := r.MarshalJSON() - if err != nil { - t.Fatalf("failed to decode SHOW STATS response: %s", err.Error()) - } - return string(b) -} diff -Nru influxdb-0.10.0+dfsg1/monitor/statement_executor.go influxdb-1.1.1+dfsg1/monitor/statement_executor.go --- influxdb-0.10.0+dfsg1/monitor/statement_executor.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/statement_executor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -package monitor - -import ( - "fmt" - "sort" - - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/models" -) - -// StatementExecutor translates InfluxQL queries to Monitor methods. -type StatementExecutor struct { - Monitor interface { - Statistics(map[string]string) ([]*Statistic, error) - Diagnostics() (map[string]*Diagnostic, error) - } -} - -// ExecuteStatement executes monitor-related query statements. -func (s *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.Result { - switch stmt := stmt.(type) { - case *influxql.ShowStatsStatement: - return s.executeShowStatistics(stmt.Module) - case *influxql.ShowDiagnosticsStatement: - return s.executeShowDiagnostics(stmt.Module) - default: - panic(fmt.Sprintf("unsupported statement type: %T", stmt)) - } -} - -func (s *StatementExecutor) executeShowStatistics(module string) *influxql.Result { - stats, err := s.Monitor.Statistics(nil) - if err != nil { - return &influxql.Result{Err: err} - } - - var rows []*models.Row - for _, stat := range stats { - if module != "" && stat.Name != module { - continue - } - row := &models.Row{Name: stat.Name, Tags: stat.Tags} - - values := make([]interface{}, 0, len(stat.Values)) - for _, k := range stat.valueNames() { - row.Columns = append(row.Columns, k) - values = append(values, stat.Values[k]) - } - row.Values = [][]interface{}{values} - rows = append(rows, row) - } - return &influxql.Result{Series: rows} -} - -func (s *StatementExecutor) executeShowDiagnostics(module string) *influxql.Result { - diags, err := s.Monitor.Diagnostics() - if err != nil { - return &influxql.Result{Err: err} - } - rows := make([]*models.Row, 0, len(diags)) - - // Get a sorted list of diagnostics keys. - sortedKeys := make([]string, 0, len(diags)) - for k := range diags { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - for _, k := range sortedKeys { - if module != "" && k != module { - continue - } - - row := &models.Row{Name: k} - - row.Columns = diags[k].Columns - row.Values = diags[k].Rows - rows = append(rows, row) - } - return &influxql.Result{Series: rows} -} diff -Nru influxdb-0.10.0+dfsg1/monitor/system.go influxdb-1.1.1+dfsg1/monitor/system.go --- influxdb-0.10.0+dfsg1/monitor/system.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/monitor/system.go 2016-12-06 21:36:15.000000000 +0000 @@ -3,6 +3,8 @@ import ( "os" "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" ) var startTime time.Time @@ -14,7 +16,7 @@ // system captures system-level diagnostics type system struct{} -func (s *system) Diagnostics() (*Diagnostic, error) { +func (s *system) Diagnostics() (*diagnostics.Diagnostics, error) { diagnostics := map[string]interface{}{ "PID": os.Getpid(), "currentTime": time.Now().UTC(), @@ -22,5 +24,5 @@ "uptime": time.Since(startTime).String(), } - return DiagnosticFromMap(diagnostics), nil + return DiagnosticsFromMap(diagnostics), nil } diff -Nru influxdb-0.10.0+dfsg1/node.go influxdb-1.1.1+dfsg1/node.go --- influxdb-0.10.0+dfsg1/node.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/node.go 2016-12-06 21:36:15.000000000 +0000 @@ -16,15 +16,14 @@ ) type Node struct { - path string - ID uint64 - MetaServers []string + path string + ID uint64 } // LoadNode will load the node information from disk if present -func LoadNode(path string, addrs []string) (*Node, error) { +func LoadNode(path string) (*Node, error) { // Always check to see if we are upgrading first - if err := upgradeNodeFile(path, addrs); err != nil { + if err := upgradeNodeFile(path); err != nil { return nil, err } @@ -46,10 +45,9 @@ } // NewNode will return a new node -func NewNode(path string, addrs []string) *Node { +func NewNode(path string) *Node { return &Node{ - path: path, - MetaServers: addrs, + path: path, } } @@ -62,35 +60,20 @@ if err != nil { return err } - defer f.Close() - if err := json.NewEncoder(f).Encode(n); err != nil { + if err = json.NewEncoder(f).Encode(n); err != nil { + f.Close() return err } - return os.Rename(tmpFile, file) -} - -// AddMetaServers adds the addrs to the set of MetaServers known to this node. -// If an addr already exists, it will not be re-added. -func (n *Node) AddMetaServers(addrs []string) { - unique := map[string]struct{}{} - for _, addr := range n.MetaServers { - unique[addr] = struct{}{} - } - - for _, addr := range addrs { - unique[addr] = struct{}{} + if err = f.Close(); nil != err { + return err } - metaServers := []string{} - for addr := range unique { - metaServers = append(metaServers, addr) - } - n.MetaServers = metaServers + return os.Rename(tmpFile, file) } -func upgradeNodeFile(path string, addrs []string) error { +func upgradeNodeFile(path string) error { oldFile := filepath.Join(path, oldNodeFile) b, err := ioutil.ReadFile(oldFile) if err != nil { @@ -118,8 +101,7 @@ } n := &Node{ - path: path, - MetaServers: addrs, + path: path, } if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { return err diff -Nru influxdb-0.10.0+dfsg1/pkg/deep/equal.go influxdb-1.1.1+dfsg1/pkg/deep/equal.go --- influxdb-0.10.0+dfsg1/pkg/deep/equal.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/deep/equal.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,184 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// License. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package deep // import "github.com/influxdata/influxdb/pkg/deep" + +import ( + "fmt" + "math" + "reflect" +) + +// Equal is a copy of reflect.DeepEqual except that it treats NaN == NaN as true. +func Equal(a1, a2 interface{}) bool { + if a1 == nil || a2 == nil { + return a1 == a2 + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return deepValueEqual(v1, v2, make(map[visit]bool), 0) +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + + // if depth > 10 { panic("deepValueEqual") } // for debugging + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + case reflect.String: + return v1.String() == v2.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + case reflect.Float32, reflect.Float64: + // Special handling for floats so that NaN == NaN is true. + f1, f2 := v1.Float(), v2.Float() + if math.IsNaN(f1) && math.IsNaN(f2) { + return true + } + return f1 == f2 + case reflect.Bool: + return v1.Bool() == v2.Bool() + default: + panic(fmt.Sprintf("cannot compare type: %s", v1.Kind().String())) + } +} + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} diff -Nru influxdb-0.10.0+dfsg1/pkg/escape/bytes.go influxdb-1.1.1+dfsg1/pkg/escape/bytes.go --- influxdb-0.10.0+dfsg1/pkg/escape/bytes.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/escape/bytes.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,6 +1,9 @@ -package escape +package escape // import "github.com/influxdata/influxdb/pkg/escape" -import "bytes" +import ( + "bytes" + "strings" +) func Bytes(in []byte) []byte { for b, esc := range Codes { @@ -9,7 +12,54 @@ return in } +const escapeChars = `," =` + +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + i := 0 inLen := len(in) var out []byte diff -Nru influxdb-0.10.0+dfsg1/pkg/escape/bytes_test.go influxdb-1.1.1+dfsg1/pkg/escape/bytes_test.go --- influxdb-0.10.0+dfsg1/pkg/escape/bytes_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/escape/bytes_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,7 +1,9 @@ package escape import ( + "bytes" "reflect" + "strings" "testing" ) @@ -43,3 +45,24 @@ } } } + +func TestAppendUnescaped(t *testing.T) { + cases := strings.Split(strings.TrimSpace(` +normal +inv\alid +goo\"d +sp\ ace +\,\"\ \= +f\\\ x +`), "\n") + + for _, c := range cases { + exp := Unescape([]byte(c)) + got := AppendUnescaped(nil, []byte(c)) + + if !bytes.Equal(got, exp) { + t.Errorf("AppendUnescaped failed for %#q: got %#q, exp %#q", c, got, exp) + } + } + +} diff -Nru influxdb-0.10.0+dfsg1/pkg/escape/strings.go influxdb-1.1.1+dfsg1/pkg/escape/strings.go --- influxdb-0.10.0+dfsg1/pkg/escape/strings.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/escape/strings.go 2016-12-06 21:36:15.000000000 +0000 @@ -20,6 +20,10 @@ } func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + for b, esc := range codesStr { in = strings.Replace(in, esc, b, -1) } diff -Nru influxdb-0.10.0+dfsg1/pkg/limiter/fixed.go influxdb-1.1.1+dfsg1/pkg/limiter/fixed.go --- influxdb-0.10.0+dfsg1/pkg/limiter/fixed.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/limiter/fixed.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,18 @@ +package limiter + +// Fixed is a simple channel based concurrency limiter. It uses a fixed +// size channel to limit callers from proceeding until there is a value avalable +// in the channel. If all are in-use, the caller blocks until one is freed. +type Fixed chan struct{} + +func NewFixed(limit int) Fixed { + return make(Fixed, limit) +} + +func (t Fixed) Take() { + t <- struct{}{} +} + +func (t Fixed) Release() { + <-t +} diff -Nru influxdb-0.10.0+dfsg1/pkg/pool/bytes.go influxdb-1.1.1+dfsg1/pkg/pool/bytes.go --- influxdb-0.10.0+dfsg1/pkg/pool/bytes.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/pool/bytes.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,42 @@ +package pool + +// Bytes is a pool of byte slices that can be re-used. Slices in +// this pool will not be garbage collected when not in use. +type Bytes struct { + pool chan []byte +} + +// NewBytes returns a Bytes pool with capacity for max byte slices +// to be pool. +func NewBytes(max int) *Bytes { + return &Bytes{ + pool: make(chan []byte, max), + } +} + +// Get returns a byte slice size with at least sz capacity. Items +// returned may not be in the zero state and should be reset by the +// caller. +func (p *Bytes) Get(sz int) []byte { + var c []byte + select { + case c = <-p.pool: + default: + return make([]byte, sz) + } + + if cap(c) < sz { + return make([]byte, sz) + } + + return c[:sz] +} + +// Put returns a slice back to the pool. If the pool is full, the byte +// slice is discarded. +func (p *Bytes) Put(c []byte) { + select { + case p.pool <- c: + default: + } +} diff -Nru influxdb-0.10.0+dfsg1/pkg/pool/generic.go influxdb-1.1.1+dfsg1/pkg/pool/generic.go --- influxdb-0.10.0+dfsg1/pkg/pool/generic.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/pool/generic.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,40 @@ +package pool + +// Generic is a pool of types that can be re-used. Items in +// this pool will not be garbage collected when not in use. +type Generic struct { + pool chan interface{} + fn func(sz int) interface{} +} + +// NewGeneric returns a Generic pool with capacity for max items +// to be pool. +func NewGeneric(max int, fn func(sz int) interface{}) *Generic { + return &Generic{ + pool: make(chan interface{}, max), + fn: fn, + } +} + +// Get returns a item from the pool or a new instance if the pool +// is empty. Items returned may not be in the zero state and should +// be reset by the caller. +func (p *Generic) Get(sz int) interface{} { + var c interface{} + select { + case c = <-p.pool: + default: + c = p.fn(sz) + } + + return c +} + +// Put returns an item back to the pool. If the pool is full, the item +// is discarded. +func (p *Generic) Put(c interface{}) { + select { + case p.pool <- c: + default: + } +} diff -Nru influxdb-0.10.0+dfsg1/pkg/slices/strings.go influxdb-1.1.1+dfsg1/pkg/slices/strings.go --- influxdb-0.10.0+dfsg1/pkg/slices/strings.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/pkg/slices/strings.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,4 +1,4 @@ -package slices +package slices // import "github.com/influxdata/influxdb/pkg/slices" import "strings" diff -Nru influxdb-0.10.0+dfsg1/README.md influxdb-1.1.1+dfsg1/README.md --- influxdb-0.10.0+dfsg1/README.md 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -1,41 +1,32 @@ -# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/) -## An Open-Source, Distributed, Time Series Database +## An Open-Source Time Series Database -> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.x clustering should be considered an alpha release. - -InfluxDB is an open source **distributed time series database** with +InfluxDB is an open source **time series database** with **no external dependencies**. It's useful for recording metrics, events, and performing analytics. ## Features -* Built-in [HTTP API](https://docs.influxdata.com/influxdb/v0.9/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. * Data can be tagged, allowing very flexible querying. * SQL-like query language. -* Clustering is supported out of the box, so that you can scale horizontally to handle your data. * Simple to install and manage, and fast to get data in and out. * It aims to answer queries in real-time. That means every data point is indexed as it comes in and is immediately available in queries that should return in < 100ms. -## Getting Started -*The following directions apply only to the 0.9.x series or building from the source on master.* - -### Building +## Installation -You don't need to build the project to use it - you can use any of our -[pre-built packages](https://influxdata.com/downloads/) to install InfluxDB. That's -the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build. -For those adventurous enough, you can -[follow along on our docs](http://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md). +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: -### Starting InfluxDB * `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. * `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. * `$GOPATH/bin/influxd` if you have built InfluxDB from source. -### Creating your first database +## Getting Started + +### Create your first database ``` curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" @@ -65,12 +56,16 @@ --data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" ``` -## Helpful Links +## Documentation + +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). + +## Contributing -* Understand the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/v0.9/introduction/overview/). -* Follow the [getting started guide](https://docs.influxdata.com/influxdb/v0.9/introduction/getting_started/) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes. -* See the [HTTP API documentation to start writing a library for your favorite language](https://docs.influxdata.com/influxdb/v0.9/guides/writing_data/). +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. ## Looking for Support? -InfluxDB has technical support subscriptions to help your project succeed. We offer Developer Support for organizations in active development and Production Support for companies requiring the best response times and SLAs on technical fixes. Visit our [support page](https://influxdata.com/services/) to learn which subscription is right for you, or contact sales@influxdb.com for a quote. +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. diff -Nru influxdb-0.10.0+dfsg1/scripts/influxdb.service influxdb-1.1.1+dfsg1/scripts/influxdb.service --- influxdb-0.10.0+dfsg1/scripts/influxdb.service 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/scripts/influxdb.service 2016-12-06 21:36:15.000000000 +0000 @@ -2,17 +2,15 @@ [Unit] Description=InfluxDB is an open-source, distributed, time series database -Documentation=https://influxdb.com/docs/ -After=network.target +Documentation=https://docs.influxdata.com/influxdb/ +After=network-online.target [Service] User=influxdb Group=influxdb LimitNOFILE=65536 -Environment='STDOUT=/dev/null' -Environment='STDERR=/var/log/influxdb/influxd.log' EnvironmentFile=-/etc/default/influxdb -ExecStart=/bin/sh -c "/usr/bin/influxd -config /etc/influxdb/influxdb.conf ${INFLUXD_OPTS} >>${STDOUT} 2>>${STDERR}" +ExecStart=/usr/bin/influxd -config /etc/influxdb/influxdb.conf ${INFLUXD_OPTS} KillMode=control-group Restart=on-failure diff -Nru influxdb-0.10.0+dfsg1/scripts/init.sh influxdb-1.1.1+dfsg1/scripts/init.sh --- influxdb-0.10.0+dfsg1/scripts/init.sh 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/scripts/init.sh 2016-12-06 21:36:15.000000000 +0000 @@ -1,25 +1,17 @@ -#! /usr/bin/env bash - +#!/bin/bash ### BEGIN INIT INFO # Provides: influxd # Required-Start: $all # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 -# Short-Description: Start influxd at boot time +# Short-Description: Start the InfluxDB process ### END INIT INFO # If you modify this, please make sure to also edit influxdb.service -# this init script supports three different variations: -# 1. New lsb that define start-stop-daemon -# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc -# 3. Centos installations without lsb-core installed -# -# In the third case we have to define our own functions which are very dumb -# and expect the args to be positioned correctly. # Command-line options that can be set in /etc/default/influxdb. These will override -# any config file values. Example: "-join http://1.2.3.4:8086" +# any config file values. DEFAULT=/etc/default/influxdb # Daemon options @@ -32,10 +24,19 @@ USER=influxdb GROUP=influxdb -# Daemon name, where is the actual executable -# If the daemon is not there, then exit. +# Check for sudo or root privileges before continuing +if [ "$UID" != "0" ]; then + echo "You must be root to run this script" + exit 1 +fi + +# Daemon name, where is the actual executable If the daemon is not +# there, then exit. DAEMON=/usr/bin/influxd -[ -x $DAEMON ] || exit 5 +if [ ! -x $DAEMON ]; then + echo "Executable $DAEMON does not exist!" + exit 5 +fi # Configuration file CONFIG=/etc/influxdb/influxdb.conf @@ -72,139 +73,151 @@ mkdir -p $(dirname $STDERR) fi -# Overwrite init script variables with /etc/default/influxdb values +# Override init script variables with DEFAULT values if [ -r $DEFAULT ]; then source $DEFAULT fi -function pidofproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile daemon-name" - fi +function log_failure_msg() { + echo "$@" "[ FAILED ]" +} - PID=`pgrep -f $3` - local PIDFILE=`cat $2` +function log_success_msg() { + echo "$@" "[ OK ]" +} - if [ "x$PIDFILE" == "x" ]; then - return 1 +function start() { + # Check if config file exist + if [ ! -r $CONFIG ]; then + log_failure_msg "config file $CONFIG doesn't exist (or you don't have permission to view)" + exit 4 fi - if [ "x$PID" != "x" -a "$PIDFILE" == "$PID" ]; then - return 0 + # Check that the PID file exists, and check the actual status of process + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 "$PID" &>/dev/null; then + # Process is already up + log_success_msg "$NAME process is already running" + return 0 + fi + else + su -s /bin/sh -c "touch $PIDFILE" $USER &>/dev/null + if [ $? -ne 0 ]; then + log_failure_msg "$PIDFILE not writable, check permissions" + exit 5 + fi fi - return 1 -} - -function killproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile signal" + # Bump the file limits, before launching the daemon. These will + # carry over to launched processes. + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "Unable to set ulimit to $OPEN_FILE_LIMIT" + exit 1 fi - PID=`cat $2` + # Launch process + echo "Starting $NAME..." + if which start-stop-daemon &>/dev/null; then + start-stop-daemon \ + --chuid $GROUP:$USER \ + --start \ + --quiet \ + --pidfile $PIDFILE \ + --exec $DAEMON \ + -- \ + -pidfile $PIDFILE \ + -config $CONFIG \ + $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & + else + local CMD="$DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" + su -s /bin/sh -c "$CMD" $USER + fi - /bin/kill -s $3 $PID - while true; do - pidof `basename $DAEMON` >/dev/null - if [ $? -ne 0 ]; then + # Sleep to verify process is still up + sleep 1 + if [ -f $PIDFILE ]; then + # PIDFILE exists + if kill -0 $(cat $PIDFILE) &>/dev/null; then + # PID up, service running + log_success_msg "$NAME process was started" return 0 fi + fi + log_failure_msg "$NAME process was unable to start" + exit 1 +} - sleep 1 - n=$(expr $n + 1) - if [ $n -eq 30 ]; then - /bin/kill -s SIGKILL $PID - return 0 +function stop() { + # Stop the daemon. + if [ -f $PIDFILE ]; then + local PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + echo "Stopping $NAME..." + # Process still up, send SIGTERM and remove PIDFILE + kill -s SIGTERM $PID &>/dev/null && rm -f "$PIDFILE" &>/dev/null + while true; do + # Enter loop to ensure process is stopped + kill -0 $PID &>/dev/null + if [ "$?" != "0" ]; then + # Process stopped, break from loop + log_success_msg "$NAME process was stopped" + return 0 + fi + + # Process still up after signal, sleep and wait + sleep 1 + n=$(expr $n + 1) + if [ $n -eq 30 ]; then + # After 30 seconds, send SIGKILL + echo "Timeout exceeded, sending SIGKILL..." + kill -s SIGKILL $PID &>/dev/null + elif [ $? -eq 40 ]; then + # After 40 seconds, error out + log_failure_msg "could not stop $NAME process" + exit 1 + fi + done fi - done + fi + log_success_msg "$NAME process already stopped" } -function log_failure_msg() { - echo "$@" "[ FAILED ]" +function restart() { + # Restart the daemon. + stop + start } -function log_success_msg() { - echo "$@" "[ OK ]" +function status() { + # Check the status of the process. + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + log_success_msg "$NAME process is running" + exit 0 + fi + fi + log_failure_msg "$NAME process is not running" + exit 1 } case $1 in start) - # Check if config file exist - if [ ! -r $CONFIG ]; then - log_failure_msg "config file doesn't exist (or you don't have permission to view)" - exit 4 - fi - - # Checked the PID file exists and check the actual status of process - if [ -e $PIDFILE ]; then - PID="$(pgrep -f $PIDFILE)" - if test ! -z $PID && kill -0 "$PID" &>/dev/null; then - # If the status is SUCCESS then don't need to start again. - log_failure_msg "$NAME process is running" - exit 0 # Exit - fi - # if PID file does not exist, check if writable - else - su -s /bin/sh -c "touch $PIDFILE" $USER > /dev/null 2>&1 - if [ $? -ne 0 ]; then - log_failure_msg "$PIDFILE not writable, check permissions" - exit 5 - fi - fi - - # Bump the file limits, before launching the daemon. These will carry over to - # launched processes. - ulimit -n $OPEN_FILE_LIMIT - if [ $? -ne 0 ]; then - log_failure_msg "set open file limit to $OPEN_FILE_LIMIT" - exit 1 - fi - - log_success_msg "Starting the process" "$NAME" - if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $PIDFILE --exec $DAEMON -- -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & - else - su -s /bin/sh -c "nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" $USER - fi - log_success_msg "$NAME process was started" + start ;; stop) - # Stop the daemon. - if [ -e $PIDFILE ]; then - PID="$(pgrep -f $PIDFILE)" - if test ! -z $PID && kill -0 "$PID" &>/dev/null; then - if killproc -p $PIDFILE SIGTERM && /bin/rm -rf $PIDFILE; then - log_success_msg "$NAME process was stopped" - else - log_failure_msg "$NAME failed to stop service" - fi - fi - else - log_failure_msg "$NAME process is not running" - fi + stop ;; restart) - # Restart the daemon. - $0 stop && sleep 2 && $0 start + restart ;; status) - # Check the status of the process. - if [ -e $PIDFILE ]; then - PID="$(pgrep -f $PIDFILE)" - if test ! -z $PID && test -d "/proc/$PID" &>/dev/null; then - log_success_msg "$NAME Process is running" - exit 0 - else - log_failure_msg "$NAME Process is not running" - exit 1 - fi - else - log_failure_msg "$NAME Process is not running" - exit 3 - fi + status ;; version) diff -Nru influxdb-0.10.0+dfsg1/services/admin/admin.go influxdb-1.1.1+dfsg1/services/admin/admin.go --- influxdb-0.10.0+dfsg1/services/admin/admin.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/admin.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,4 @@ +package admin // import "github.com/influxdata/influxdb/services/admin" + +//go:generate statik -src=assets +//go:generate go fmt statik/statik.go diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/css/admin.css influxdb-1.1.1+dfsg1/services/admin/assets/css/admin.css --- influxdb-0.10.0+dfsg1/services/admin/assets/css/admin.css 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/css/admin.css 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,87 @@ +body { + padding-top: 70px; + /* Required padding for .navbar-fixed-top. Remove if using .navbar-static-top. Change if height of navigation changes. */ +} + +html, +body { + height: 100%; + /* The html and body elements cannot have any padding or margin. */ +} + +code { + display: block; +} + +#settings { + display: none; +} + +#settings form > div { + margin-right: 20px; +} + +#settings form input#port { + width: 80px; +} + +#settings form label { + padding-right: 5px; +} + +div#content { + margin-bottom: -10px; +} + +div#table h2 { + color: #999; + margin-top: -8px; + font-size: 16px +} + +textarea#content-data { + font-family: "Courier New"; + height: 200px; +} + +div#query-alerts { + margin-top: 30px; +} + +div#modal-error, div#modal-success, div#query-error, div#query-success { + display: none; +} + +/* Wrapper for page content to push down footer */ +#wrap { + min-height: 100%; + height: auto !important; + height: 100%; + /* Negative indent footer by it's height */ + margin: 0 auto -60px; +} + +/* Set the fixed height of the footer here */ +#push, +#footer { + height: 60px; +} +#footer { + background-color: #f5f5f5; + border-top: 1px solid #dfdfdf; +} + +#footer p { + margin: 20px 0; +} + +/* Lastly, apply responsive CSS fixes as necessary */ +@media (max-width: 767px) { + #footer { + margin-left: -20px; + margin-right: -20px; + padding-left: 20px; + padding-right: 20px; + } +} + diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/css/dropdowns-enhancement.css influxdb-1.1.1+dfsg1/services/admin/assets/css/dropdowns-enhancement.css --- influxdb-0.10.0+dfsg1/services/admin/assets/css/dropdowns-enhancement.css 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/css/dropdowns-enhancement.css 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,294 @@ +.dropdown-menu > li > label { + display: block; + padding: 3px 20px; + clear: both; + font-weight: normal; + line-height: 1.42857143; + color: #333333; + white-space: nowrap; +} +.dropdown-menu > li > label:hover, +.dropdown-menu > li > label:focus { + text-decoration: none; + color: #262626; + background-color: #f5f5f5; +} +.dropdown-menu > li > input:checked ~ label, +.dropdown-menu > li > input:checked ~ label:hover, +.dropdown-menu > li > input:checked ~ label:focus, +.dropdown-menu > .active > label, +.dropdown-menu > .active > label:hover, +.dropdown-menu > .active > label:focus { + color: #ffffff; + text-decoration: none; + outline: 0; + background-color: #428bca; +} +.dropdown-menu > li > input[disabled] ~ label, +.dropdown-menu > li > input[disabled] ~ label:hover, +.dropdown-menu > li > input[disabled] ~ label:focus, +.dropdown-menu > .disabled > label, +.dropdown-menu > .disabled > label:hover, +.dropdown-menu > .disabled > label:focus { + color: #999999; +} +.dropdown-menu > li > input[disabled] ~ label:hover, +.dropdown-menu > li > input[disabled] ~ label:focus, +.dropdown-menu > .disabled > label:hover, +.dropdown-menu > .disabled > label:focus { + text-decoration: none; + background-color: transparent; + background-image: none; + filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); + cursor: not-allowed; +} +.dropdown-menu > li > label { + margin-bottom: 0; + cursor: pointer; +} +.dropdown-menu > li > input[type="radio"], +.dropdown-menu > li > input[type="checkbox"] { + display: none; + position: absolute; + top: -9999em; + left: -9999em; +} +.dropdown-menu > li > label:focus, +.dropdown-menu > li > input:focus ~ label { + outline: thin dotted; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +.dropdown-menu.pull-right { + right: 0; + left: auto; +} +.dropdown-menu.pull-top { + bottom: 100%; + top: auto; + margin: 0 0 2px; + -webkit-box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175); + box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175); +} +.dropdown-menu.pull-center { + right: 50%; + left: auto; +} +.dropdown-menu.pull-middle { + right: 100%; + margin: 0 2px 0 0; + box-shadow: -5px 0 10px rgba(0, 0, 0, 0.2); + left: auto; +} +.dropdown-menu.pull-middle.pull-right { + right: auto; + left: 100%; + margin: 0 0 0 2px; + box-shadow: 5px 0 10px rgba(0, 0, 0, 0.2); +} +.dropdown-menu.pull-middle.pull-center { + right: 50%; + margin: 0; + box-shadow: 0 0 10px rgba(0, 0, 0, 0.2); +} +.dropdown-menu.bullet { + margin-top: 8px; +} +.dropdown-menu.bullet:before { + width: 0; + height: 0; + content: ''; + display: inline-block; + position: absolute; + border-color: transparent; + border-style: solid; + -webkit-transform: rotate(360deg); + border-width: 0 7px 7px; + border-bottom-color: #cccccc; + border-bottom-color: rgba(0, 0, 0, 0.15); + top: -7px; + left: 9px; +} +.dropdown-menu.bullet:after { + width: 0; + height: 0; + content: ''; + display: inline-block; + position: absolute; + border-color: transparent; + border-style: solid; + -webkit-transform: rotate(360deg); + border-width: 0 6px 6px; + border-bottom-color: #ffffff; + top: -6px; + left: 10px; +} +.dropdown-menu.bullet.pull-right:before { + left: auto; + right: 9px; +} +.dropdown-menu.bullet.pull-right:after { + left: auto; + right: 10px; +} +.dropdown-menu.bullet.pull-top { + margin-top: 0; + margin-bottom: 8px; +} +.dropdown-menu.bullet.pull-top:before { + top: auto; + bottom: -7px; + border-bottom-width: 0; + border-top-width: 7px; + border-top-color: #cccccc; + border-top-color: rgba(0, 0, 0, 0.15); +} +.dropdown-menu.bullet.pull-top:after { + top: auto; + bottom: -6px; + border-bottom: none; + border-top-width: 6px; + border-top-color: #ffffff; +} +.dropdown-menu.bullet.pull-center:before { + left: auto; + right: 50%; + margin-right: -7px; +} +.dropdown-menu.bullet.pull-center:after { + left: auto; + right: 50%; + margin-right: -6px; +} +.dropdown-menu.bullet.pull-middle { + margin-right: 8px; +} +.dropdown-menu.bullet.pull-middle:before { + top: 50%; + left: 100%; + right: auto; + margin-top: -7px; + border-right-width: 0; + border-bottom-color: transparent; + border-top-width: 7px; + border-left-color: #cccccc; + border-left-color: rgba(0, 0, 0, 0.15); +} +.dropdown-menu.bullet.pull-middle:after { + top: 50%; + left: 100%; + right: auto; + margin-top: -6px; + border-right-width: 0; + border-bottom-color: transparent; + border-top-width: 6px; + border-left-color: #ffffff; +} +.dropdown-menu.bullet.pull-middle.pull-right { + margin-right: 0; + margin-left: 8px; +} +.dropdown-menu.bullet.pull-middle.pull-right:before { + left: -7px; + border-left-width: 0; + border-right-width: 7px; + border-right-color: #cccccc; + border-right-color: rgba(0, 0, 0, 0.15); +} +.dropdown-menu.bullet.pull-middle.pull-right:after { + left: -6px; + border-left-width: 0; + border-right-width: 6px; + border-right-color: #ffffff; +} +.dropdown-menu.bullet.pull-middle.pull-center { + margin-left: 0; + margin-right: 0; +} +.dropdown-menu.bullet.pull-middle.pull-center:before { + border: none; + display: none; +} +.dropdown-menu.bullet.pull-middle.pull-center:after { + border: none; + display: none; +} +.dropdown-submenu { + position: relative; +} +.dropdown-submenu > .dropdown-menu { + top: 0; + left: 100%; + margin-top: -6px; + margin-left: -1px; + border-top-left-radius: 0; +} +.dropdown-submenu > a:before { + display: block; + float: right; + width: 0; + height: 0; + content: ""; + margin-top: 6px; + margin-right: -8px; + border-width: 4px 0 4px 4px; + border-style: solid; + border-left-style: dashed; + border-top-color: transparent; + border-bottom-color: transparent; +} +@media (max-width: 767px) { + .navbar-nav .dropdown-submenu > a:before { + margin-top: 8px; + border-color: inherit; + border-style: solid; + border-width: 4px 4px 0; + border-left-color: transparent; + border-right-color: transparent; + } + .navbar-nav .dropdown-submenu > a { + padding-left: 40px; + } + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > a, + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > label { + padding-left: 35px; + } + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > a, + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > label { + padding-left: 45px; + } + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a, + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label { + padding-left: 55px; + } + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a, + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label { + padding-left: 65px; + } + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a, + .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label { + padding-left: 75px; + } +} +.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a, +.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover, +.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus { + background-color: #e7e7e7; + color: #555555; +} +@media (max-width: 767px) { + .navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before { + border-top-color: #555555; + } +} +.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a, +.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover, +.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus { + background-color: #080808; + color: #ffffff; +} +@media (max-width: 767px) { + .navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before { + border-top-color: #ffffff; + } +} Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.eot and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.eot differ diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.svg influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.svg --- influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.svg 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.svg 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,288 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.ttf and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.ttf differ Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.woff and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.woff differ Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.woff2 and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/fonts/glyphicons-halflings-regular.woff2 differ Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/img/favicon.ico and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/img/favicon.ico differ Binary files /tmp/tmpRXt8AH/FRKffTwTBs/influxdb-0.10.0+dfsg1/services/admin/assets/img/influxdb-light400.png and /tmp/tmpRXt8AH/CPOFe2JAqF/influxdb-1.1.1+dfsg1/services/admin/assets/img/influxdb-light400.png differ diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/index.html influxdb-1.1.1+dfsg1/services/admin/assets/index.html --- influxdb-0.10.0+dfsg1/services/admin/assets/index.html 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/index.html 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,227 @@ + + + + + + + + + + + InfluxDB - Admin Interface + + + + + + + + + +
+ + + + + +
+ + +
+
+
+

Connection Settings

+
+
+
+
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ +
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+ +
+
+
+
+
+
+
+
+ +
+
+
+ +
+
+
+ + + + + + + + + + + + + + + + + diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/js/admin.js influxdb-1.1.1+dfsg1/services/admin/assets/js/admin.js --- influxdb-0.10.0+dfsg1/services/admin/assets/js/admin.js 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/js/admin.js 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,529 @@ +// allow the user to store recent queries for quick retrieval +var recentQueries = []; +var queryPointer = null; + +// keep track of the databases that exist on the server +var availableDatabases = []; +var currentlySelectedDatabase = null; + +// connection settings for the server, with sensible defaults +var connectionSettings = { + hostname: (window.location.hostname ? window.location.hostname: "localhost"), + port: "8086", + username: "", + password: "", + ssl: ('https:' == window.location.protocol ? true : false) +} + +var connectionString = function() { + var protocol = (connectionSettings.ssl ? "https" : "http"); + var host = connectionSettings.hostname + ":" + connectionSettings.port; + + if (connectionSettings.username !== "") { + $.ajaxSetup({ + headers: { + 'Authorization': "Basic " + btoa(connectionSettings.username + ":" + connectionSettings.password) + } + }); + } + + return protocol + "://" + host; +} + +var getSeriesFromJSON = function(data) { + var results = []; + data.results.forEach(function(result) { + if (result.series) { + result.series.forEach(function(s) { + results.push(s); + }); + } + }); + return results.length > 0 ? results : null; +} + +// gets settings from the browser's localStorage and sets defaults if they aren't found +var loadSettings = function() { + var cs = localStorage.getItem("connectionSettings"); + + if (cs != null) { connectionSettings = JSON.parse(cs); } + + document.getElementById('hostname').value = connectionSettings.hostname; + document.getElementById('port').value = connectionSettings.port; + document.getElementById('username').value = connectionSettings.username; + document.getElementById('password').value = connectionSettings.password; + document.getElementById('ssl').checked = connectionSettings.ssl; + + getClientVersion(); + getDatabases(); +} + +var updateSettings = function() { + var hostname = document.getElementById('hostname').value; + var port = document.getElementById('port').value; + var username = document.getElementById('username').value; + var password = document.getElementById('password').value; + var ssl = document.getElementById('ssl').checked; + + if (hostname == "") { hostname = "localhost"; } + + if (port == "") { port = "8086"; } + + connectionSettings.hostname = hostname; + connectionSettings.port = port; + connectionSettings.username = username; + connectionSettings.password = password; + connectionSettings.ssl = ssl; + + localStorage.setItem("connectionSettings", JSON.stringify(connectionSettings)); + + getDatabases(); +} + +var showSettings = function() { + $("#settings").show(); + $("input#query").prop('disabled', true); +} + +var hideSettings = function() { + $("#settings").hide(); + $("input#query").prop('disabled', false); +} + +// hide errors within the Write Data modal +var hideModalError = function() { + $("div#modal-error").empty().hide(); +} + +// show errors within the Write Data modal +var showModalError = function(message) { + hideModalSuccess(); + + $("div#modal-error").html("

" + message + "

").show(); +} + +// hide success messages within the Write Data modal +var hideModalSuccess = function() { + $("div#modal-success").empty().hide(); +} + +// show success messages within the Write Data modal +var showModalSuccess = function(message) { + hideModalError(); + + $("div#modal-success").html("

" + message + "

").show(); +} + +// hide errors from queries +var hideQueryError = function() { + $("div#query-error").empty().hide(); +} + +// show errors from queries +var showQueryError = function(message) { + hideQuerySuccess(); + + $("div#query-error").html("

" + message + "

").show(); +} + +// hide success messages from queries +var hideQuerySuccess = function() { + $("div#query-success").empty().hide(); +} + +// show success messages from queries +var showQuerySuccess = function(message) { + hideQueryError(); + + $("div#query-success").html("

" + message + "

").show(); +} + +// hide warning from database lookup +var hideDatabaseWarning = function() { + $("div#database-warning").empty().hide(); +} + +// show warning from database lookup +var showDatabaseWarning = function(message) { + $("div#database-warning").html("

" + message + "

").show(); +} + +// clear out the results table +var clearResults = function() { + $("div#table").empty(); +} + +// handle submissions of the query bar +var handleSubmit = function(e) { + var queryElement = document.getElementById('query'); + var q = queryElement.value; + + clearResults(); + hideQueryError(); + hideQuerySuccess(); + + if (q == "") { return }; + + var query = $.get(connectionString() + "/query", {q: q, db: currentlySelectedDatabase}, function() { + }); + + recentQueries.push(q); + queryPointer = recentQueries.length - 1; + + query.fail(handleRequestError); + + query.done(function (data) { + var firstRow = data.results[0]; + if (firstRow.error) { + showQueryError("Server returned error: " + firstRow.error); + return + } + + var series = getSeriesFromJSON(data); + + if (series == null) { + showQuerySuccess("Success! (no results to display)"); + getDatabases(); + return + } + + hideDatabaseWarning(); + React.render( + React.createElement(DataTable, {series: series}), + document.getElementById('table') + ); + }); + + if (e != null) { e.preventDefault(); } + return false; +}; + +var handleRequestError = function(e) { + var errorText = e.status + " " + e.statusText; + showDatabaseWarning("Unable to fetch list of databases."); + + if ("responseText" in e) { + try { errorText = "Server returned error: " + JSON.parse(e.responseText).error; } catch(e) {} + } + + if (e.status == 400) { + hideSettings(); + } else if (e.status == 401) { + if (errorText.indexOf("error authorizing query") > -1) { + hideSettings(); + $("input#query").val("CREATE USER WITH PASSWORD '' WITH ALL PRIVILEGES").focus(); + } else { + showSettings(); + $("input#username").focus(); + } + } else { + showSettings(); + $("input#hostname").focus(); + showDatabaseWarning("Hint: the InfluxDB API runs on port 8086 by default"); + errorText = e.status + " " + e.statusText + " - Could not connect to " + connectionString(); + } + showQueryError(errorText); +}; + +var handleKeypress = function(e) { + var queryElement = document.getElementById('query'); + + // Enable/Disable the generate permalink button + if(queryElement.value == "" && !$("#generate-query-url").hasClass("disabled")) { + $("#generate-query-url").addClass("disabled"); + } else { + $("#generate-query-url").removeClass("disabled"); + } + + // key press == enter + if (e.keyCode == 13) { + e.preventDefault(); + handleSubmit(); + return false; + } + + // if we don't have any recent queries, ignore the arrow keys + if (recentQueries.length == 0 ) { return } + + // key press == up arrow + if (e.keyCode == 38) { + clearResults() + hideQuerySuccess() + hideQueryError() + + // TODO: stash the current query, if there is one? + if (queryPointer == recentQueries.length - 1) { + // this is buggy. + //recentQueries.push(queryElement.value); + //queryPointer = recentQueries.length - 1; + } + + if (queryPointer != null && queryPointer > 0) { + queryPointer -= 1; + queryElement.value = recentQueries[queryPointer]; + } + } + + // key press == down arrow + if (e.keyCode == 40) { + if (queryPointer != null && queryPointer < recentQueries.length - 1) { + queryPointer += 1; + queryElement.value = recentQueries[queryPointer]; + } + } +}; + +var QueryError = React.createClass({ + render: function() { + return React.createElement("div", {className: "alert alert-danger"}, this.props.message) + } +}); + +var stringifyTags = function(tags) { + var tagStrings = []; + + for(var index in tags) { + tagStrings.push(index + ":" + tags[index]); + } + + return tagStrings.join(", "); +} + +var DataTable = React.createClass({ + render: function() { + var tables = this.props.series.map(function(series) { + return React.createElement("div", null, + React.createElement("h1", null, series.name), + React.createElement("h2", null, stringifyTags(series.tags)), + React.createElement("table", {className: "table"}, + React.createElement(TableHeader, {data: series.columns}), + React.createElement(TableBody, {data: series}) + ) + ); + }); + + return React.createElement("div", null, tables); + } +}); + +var TableHeader = React.createClass({ + render: function() { + var headers = this.props.data.map(function(column) { + return React.createElement("th", null, column); + }); + + return React.createElement("tr", null, headers); + } +}); + +var TableBody = React.createClass({ + render: function() { + if (this.props.data.values) { + var tableRows = this.props.data.values.map(function (row) { + return React.createElement(TableRow, {data: row}); + }); + } + + return React.createElement("tbody", null, tableRows); + } +}); + +var TableRow = React.createClass({ + render: function() { + var tableData = this.props.data.map(function (data, index) { + if (index == 0) { + return React.createElement("td", {className: "timestamp"}, null, data); + } else { + return React.createElement("td", null, pretty(data)); + } + }); + + return React.createElement("tr", null, tableData); + } +}); + +var pretty = function(val) { + if (typeof val == 'string') { + return "\"" + val + "\""; + } else if (typeof val == 'boolean' ){ + return val.toString(); + } else { + return val; + } +} + +var truncateVersion = function (version) { + var parts = version.split(".") + if (parts.length > 2) { + parts = parts.slice(0, 2) + } + return parts.join(".") +} + +var getClientVersion = function () { + var query = $.get(window.location.origin + window.location.pathname); + + query.fail(handleRequestError); + + query.done(function (data, status, xhr) { + var version = xhr.getResponseHeader('X-InfluxDB-Version'); + if (version.indexOf("unknown") == -1) { + console.log('got client version v'+version); + version = 'v' + truncateVersion(version); + $('#influxdb-doc-link').attr('href', 'https://docs.influxdata.com/influxdb/'+version+'/introduction/getting_started/'); + } + $('.influxdb-client-version').html(version); + }); +} + +var chooseDatabase = function (databaseName) { + currentlySelectedDatabase = databaseName; + document.getElementById("content-current-database").innerHTML = currentlySelectedDatabase; +} + +var getDatabases = function () { + var q = "SHOW DATABASES"; + var query = $.get(connectionString() + "/query", {q: q, db: currentlySelectedDatabase}); + + query.fail(handleRequestError); + + query.done(function (data, status, xhr) { + // Set version of the InfluxDB server + var version = xhr.getResponseHeader('X-InfluxDB-Version'); + if (version.indexOf("unknown") == -1) { + version = "v" + version; + } + $('.influxdb-version').html(version); + + hideSettings(); + hideDatabaseWarning(); + + var firstRow = data.results[0]; + if (firstRow.error) { + showDatabaseWarning(firstRow.error); + return; + } + + var series = getSeriesFromJSON(data); + var values = series[0].values; + + if ((values == null) || (values.length == 0)) { + availableDatabases = []; + updateDatabaseList(); + + showDatabaseWarning("No databases found.") + } else { + availableDatabases = values.map(function(value) { + return value[0]; + }).sort(); + + if (currentlySelectedDatabase == null) { + chooseDatabase(availableDatabases[0]); + } else if (availableDatabases.indexOf(currentlySelectedDatabase) == -1) { + chooseDatabase(availableDatabases[0]); + } + updateDatabaseList(); + } + }); +} + +var updateDatabaseList = function() { + var databaseList = $("ul#content-database-list"); + + databaseList.empty(); + availableDatabases.forEach(function(database) { + var li = $("
  • " + database + "
  • "); + databaseList.append(li); + }); + + if (availableDatabases.length == 0) { + document.getElementById("content-current-database").innerHTML = "…"; + } +} + +var generateQueryURL = function() { + var q = document.getElementById('query').value; + + var query = connectionString() + "/query?"; + var queryParams = {q: q, db: currentlySelectedDatabase}; + query += $.param(queryParams); + + var textarea = $("#query-url"); + textarea.val(query); +} + +// when the page is ready, start everything up +$(document).ready(function () { + loadSettings(); + + // bind to the settings cog in the navbar + $("#action-settings").click(function (e) { + $("#settings").toggle(); + }); + + // bind to the save button in the settings form + $("#form-settings").submit(function (e) { + updateSettings(); + }); + + // bind to the items in the query template dropdown + $("ul#action-template label").click(function (e) { + var el = $(e.target); + $("input#query").val(el.data("query")).focus(); + }); + + $("ul#content-database-list").on("click", function(e) { + if (e.target.tagName != "A") { return; } + + chooseDatabase(e.target.innerHTML); + e.preventDefault(); + }) + + // load the Write Data modal + $("button#action-send").click(function (e) { + var data = $("textarea#content-data").val(); + + var startTime = new Date().getTime(); + var write = $.post(connectionString() + "/write?db=" + currentlySelectedDatabase, data, function() { + }); + + write.fail(function (e) { + if (e.status == 400) { + showModalError("Failed to write: " + e.responseText) + } + else { + showModalError("Failed to contact server: " + e.statusText) + } + }); + + write.done(function (data) { + var endTime = new Date().getTime(); + var elapsed = endTime - startTime; + showModalSuccess("Write succeeded. (" + elapsed + "ms)"); + }); + + }); + + // Enable auto select of the text in modal + $('#queryURLModal').on('shown.bs.modal', function () { + var textarea = $("#query-url"); + textarea.focus(); + textarea.select(); + }); + + //bind to the generate permalink button + $("#generate-query-url").click(function (e) { + generateQueryURL(); + }); + + // handle submit actions on the query bar + var form = document.getElementById('query-form'); + form.addEventListener("submit", handleSubmit); + + // handle keypresses on the query bar so we can get arrow keys and enter + var query = document.getElementById('query'); + query.addEventListener("keydown", handleKeypress); + + // make sure we start out with the query bar in focus + document.getElementById('query').focus(); +}) diff -Nru influxdb-0.10.0+dfsg1/services/admin/assets/README.md influxdb-1.1.1+dfsg1/services/admin/assets/README.md --- influxdb-0.10.0+dfsg1/services/admin/assets/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/assets/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,4 @@ +Please note that these files are embedded into the `influxd` binary using the +[statik](https://github.com/rakyll/statik) tool. `go generate` needs to be run +whenever there are changes made to files in this directory. See the admin +interface readme for more information. diff -Nru influxdb-0.10.0+dfsg1/services/admin/config.go influxdb-1.1.1+dfsg1/services/admin/config.go --- influxdb-0.10.0+dfsg1/services/admin/config.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/config.go 2016-12-06 21:36:15.000000000 +0000 @@ -11,6 +11,7 @@ BindAddress string `toml:"bind-address"` HTTPSEnabled bool `toml:"https-enabled"` HTTPSCertificate string `toml:"https-certificate"` + Version string `toml:"-"` } // NewConfig returns an instance of Config with defaults. diff -Nru influxdb-0.10.0+dfsg1/services/admin/config_test.go influxdb-1.1.1+dfsg1/services/admin/config_test.go --- influxdb-0.10.0+dfsg1/services/admin/config_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/config_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -4,7 +4,7 @@ "testing" "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/services/admin" + "github.com/influxdata/influxdb/services/admin" ) func TestConfig_Parse(t *testing.T) { diff -Nru influxdb-0.10.0+dfsg1/services/admin/README.md influxdb-1.1.1+dfsg1/services/admin/README.md --- influxdb-0.10.0+dfsg1/services/admin/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,23 @@ +# InfluxDB Admin Interface + +This is the built-in admin interface that ships with InfluxDB. The service is intended to have little overhead and minimal preprocessing steps. + +## How it works + +Static assets, located in the `assets` directory, are embedded in the `influxd` binary and served from memory using a simple fileserver. + +The admin UI itself uses [React](https://github.com/facebook/react) for the user interface to interact directly with the InfluxDB API, usually running on port `8086`. + +## Building + +The only step required to bundle the admin UI with InfluxDB is to create a compressed file system using `statik` as follows: + +``` +go get github.com/rakyll/statik # make sure $GOPATH/bin is listed in your PATH +cd $GOPATH/src/github.com/influxdata/influxdb +go generate github.com/influxdata/influxdb/services/admin +``` + +The `go generate ./...` command will run `statik` to generate the `statik/statik.go` file. The generated `go` file will embed the admin interface assets into the InfluxDB binary. + +This step should be run before submitting any pull requests which include modifications to admin interface assets. diff -Nru influxdb-0.10.0+dfsg1/services/admin/service.go influxdb-1.1.1+dfsg1/services/admin/service.go --- influxdb-0.10.0+dfsg1/services/admin/service.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/service.go 2016-12-06 21:36:15.000000000 +0000 @@ -1,8 +1,9 @@ -package admin +package admin // import "github.com/influxdata/influxdb/services/admin" import ( "crypto/tls" "fmt" + "io" "log" "net" "net/http" @@ -10,7 +11,7 @@ "strings" // Register static assets via statik. - _ "github.com/influxdb/influxdb/statik" + _ "github.com/influxdata/influxdb/services/admin/statik" "github.com/rakyll/statik/fs" ) @@ -21,6 +22,7 @@ https bool cert string err chan error + version string logger *log.Logger } @@ -28,17 +30,19 @@ // NewService returns a new instance of Service. func NewService(c Config) *Service { return &Service{ - addr: c.BindAddress, - https: c.HTTPSEnabled, - cert: c.HTTPSCertificate, - err: make(chan error), - logger: log.New(os.Stderr, "[admin] ", log.LstdFlags), + addr: c.BindAddress, + https: c.HTTPSEnabled, + cert: c.HTTPSCertificate, + err: make(chan error), + version: c.Version, + logger: log.New(os.Stderr, "[admin] ", log.LstdFlags), } } // Open starts the service func (s *Service) Open() error { s.logger.Printf("Starting admin service") + s.logger.Println("DEPRECATED: This plugin is deprecated as of 1.1.0 and will be removed in a future release") // Open listener. if s.https { @@ -79,9 +83,10 @@ return nil } -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Service) SetLogOutput(w io.Writer) { + s.logger = log.New(w, "[admin] ", log.LstdFlags) } // Err returns a channel for fatal errors that occur on the listener. @@ -97,6 +102,13 @@ // serve serves the handler from the listener. func (s *Service) serve() { + addVersionHeaderThenServe := func(h http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-InfluxDB-Version", s.version) + h.ServeHTTP(w, r) + } + } + // Instantiate file system from embedded admin. statikFS, err := fs.New() if err != nil { @@ -104,7 +116,7 @@ } // Run file system handler on listener. - err = http.Serve(s.listener, http.FileServer(statikFS)) + err = http.Serve(s.listener, addVersionHeaderThenServe(http.FileServer(statikFS))) if err != nil && !strings.Contains(err.Error(), "closed") { s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err) } diff -Nru influxdb-0.10.0+dfsg1/services/admin/service_test.go influxdb-1.1.1+dfsg1/services/admin/service_test.go --- influxdb-0.10.0+dfsg1/services/admin/service_test.go 2016-02-04 16:51:02.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/service_test.go 2016-12-06 21:36:15.000000000 +0000 @@ -5,7 +5,7 @@ "net/http" "testing" - "github.com/influxdb/influxdb/services/admin" + "github.com/influxdata/influxdb/services/admin" ) // Ensure service can serve the root index page of the admin. diff -Nru influxdb-0.10.0+dfsg1/services/admin/statik/README.md influxdb-1.1.1+dfsg1/services/admin/statik/README.md --- influxdb-0.10.0+dfsg1/services/admin/statik/README.md 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/statik/README.md 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,3 @@ +Please note that this file is automatically generated by the +[statik](https://github.com/rakyll/statik) tool, and should not be +updated directly. See the Admin UI readme for more information. diff -Nru influxdb-0.10.0+dfsg1/services/admin/statik/statik.go influxdb-1.1.1+dfsg1/services/admin/statik/statik.go --- influxdb-0.10.0+dfsg1/services/admin/statik/statik.go 1970-01-01 00:00:00.000000000 +0000 +++ influxdb-1.1.1+dfsg1/services/admin/statik/statik.go 2016-12-06 21:36:15.000000000 +0000 @@ -0,0 +1,10 @@ +package statik + +import ( + "github.com/rakyll/statik/fs" +) + +func init() { + data := "PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00README.mdPlease note that these files are embedded into the `influxd` binary using the\n[statik](https://github.com/rakyll/statik) tool. `go generate` needs to be run\nwhenever there are changes made to files in this directory. See the admin\ninterface readme for more information.\nPK\x07\x08\xeb\xa78$\x0e\x01\x00\x00\x0e\x01\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00css/admin.cssbody {\n padding-top: 70px;\n /* Required padding for .navbar-fixed-top. Remove if using .navbar-static-top. Change if height of navigation changes. */\n}\n\nhtml,\nbody {\n height: 100%;\n /* The html and body elements cannot have any padding or margin. */\n}\n\ncode {\n display: block;\n}\n\n#settings {\n display: none;\n}\n\n#settings form > div {\n margin-right: 20px;\n}\n\n#settings form input#port {\n width: 80px;\n}\n\n#settings form label {\n padding-right: 5px;\n}\n\ndiv#content {\n margin-bottom: -10px;\n}\n\ndiv#table h2 {\n color: #999;\n margin-top: -8px;\n font-size: 16px\n}\n\ntextarea#content-data {\n font-family: \"Courier New\";\n height: 200px;\n}\n\ndiv#query-alerts {\n margin-top: 30px;\n}\n\ndiv#modal-error, div#modal-success, div#query-error, div#query-success {\n display: none; \n}\n\n/* Wrapper for page content to push down footer */\n#wrap {\n min-height: 100%;\n height: auto !important;\n height: 100%;\n /* Negative indent footer by it's height */\n margin: 0 auto -60px;\n}\n\n/* Set the fixed height of the footer here */\n#push,\n#footer {\n height: 60px;\n}\n#footer {\n background-color: #f5f5f5;\n border-top: 1px solid #dfdfdf;\n}\n\n#footer p {\n margin: 20px 0;\n}\n\n/* Lastly, apply responsive CSS fixes as necessary */\n@media (max-width: 767px) {\n #footer {\n margin-left: -20px;\n margin-right: -20px;\n padding-left: 20px;\n padding-right: 20px;\n }\n}\n\nPK\x07\x08\x9c\x84U>\x8e\x05\x00\x00\x8e\x05\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00css/bootstrap.css/*!\n * Bootstrap v3.3.4 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n/*! normalize.css v3.0.2 | MIT License | git.io/normalize */\nhtml {\n font-family: sans-serif;\n -webkit-text-size-adjust: 100%;\n -ms-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: 1px dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n margin: .67em 0;\n font-size: 2em;\n}\nmark {\n color: #000;\n background: #ff0;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\nsup {\n top: -.5em;\n}\nsub {\n bottom: -.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n height: 0;\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n margin: 0;\n font: inherit;\n color: inherit;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n padding: 0;\n border: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n -webkit-appearance: textfield;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n padding: .35em .625em .75em;\n margin: 0 2px;\n border: 1px solid #c0c0c0;\n}\nlegend {\n padding: 0;\n border: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-spacing: 0;\n border-collapse: collapse;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n color: #000 !important;\n text-shadow: none !important;\n background: transparent !important;\n -webkit-box-shadow: none !important;\n box-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n select {\n background: #fff !important;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: 'Glyphicons Halflings';\n\n src: url('../fonts/glyphicons-halflings-regular.eot');\n src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\2a\";\n}\n.glyphicon-plus:before {\n content: \"\\2b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: thin dotted;\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 6px;\n}\n.img-thumbnail {\n display: inline-block;\n max-width: 100%;\n height: auto;\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: all .2s ease-in-out;\n -o-transition: all .2s ease-in-out;\n transition: all .2s ease-in-out;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: normal;\n line-height: 1;\n color: #777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 20px;\n margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 10px;\n margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 36px;\n}\nh2,\n.h2 {\n font-size: 30px;\n}\nh3,\n.h3 {\n font-size: 24px;\n}\nh4,\n.h4 {\n font-size: 18px;\n}\nh5,\n.h5 {\n font-size: 14px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 10px;\n}\n.lead {\n margin-bottom: 20px;\n font-size: 16px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 21px;\n }\n}\nsmall,\n.small {\n font-size: 85%;\n}\nmark,\n.mark {\n padding: .2em;\n background-color: #fcf8e3;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 9px;\n margin: 40px 0 20px;\n border-bottom: 1px solid #eee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n margin-left: -5px;\n list-style: none;\n}\n.list-inline > li {\n display: inline-block;\n padding-right: 5px;\n padding-left: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 20px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n overflow: hidden;\n clear: left;\n text-align: right;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted #777;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 10px 20px;\n margin: 0 0 20px;\n font-size: 17.5px;\n border-left: 5px solid #eee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n text-align: right;\n border-right: 5px solid #eee;\n border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: '\\00A0 \\2014';\n}\naddress {\n margin-bottom: 20px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #fff;\n background-color: #333;\n border-radius: 3px;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 9.5px;\n margin: 0 0 10px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #333;\n word-break: break-all;\n word-wrap: break-word;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n.container-fluid {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n.row {\n margin-right: -15px;\n margin-left: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-right: 15px;\n padding-left: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0;\n }\n}\n@media (min-width: 992px) {\n .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0;\n }\n}\ntable {\n background-color: transparent;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n position: static;\n display: table-column;\n float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n display: table-cell;\n float: none;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n min-height: .01%;\n overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 15px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 20px;\n font-size: 21px;\n line-height: inherit;\n color: #333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: bold;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: thin dotted;\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 34px;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"],\n input[type=\"time\"],\n input[type=\"datetime-local\"],\n input[type=\"month\"] {\n line-height: 34px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 46px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n min-height: 20px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-top: 4px \\9;\n margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n vertical-align: middle;\n cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.form-control-static {\n min-height: 34px;\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-right: 0;\n padding-left: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.form-group-sm .form-control {\n height: 30px;\n line-height: 30px;\n}\ntextarea.form-group-sm .form-control,\nselect[multiple].form-group-sm .form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 32px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-lg {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.form-group-lg .form-control {\n height: 46px;\n line-height: 46px;\n}\ntextarea.form-group-lg .form-control,\nselect[multiple].form-group-lg .form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 46px;\n min-height: 38px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 42.5px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 34px;\n height: 34px;\n line-height: 34px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback {\n width: 46px;\n height: 46px;\n line-height: 46px;\n}\n.input-sm + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n background-color: #f2dede;\n border-color: #a94442;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #737373;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n padding-top: 7px;\n margin-top: 0;\n margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 27px;\n}\n.form-horizontal .form-group {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n padding-top: 7px;\n margin-bottom: 0;\n text-align: right;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 14.333333px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n }\n}\n.btn {\n display: inline-block;\n padding: 6px 12px;\n margin-bottom: 0;\n font-size: 14px;\n font-weight: normal;\n line-height: 1.42857143;\n text-align: center;\n white-space: nowrap;\n vertical-align: middle;\n -ms-touch-action: manipulation;\n touch-action: manipulation;\n cursor: pointer;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: thin dotted;\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n outline: 0;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n pointer-events: none;\n cursor: not-allowed;\n filter: alpha(opacity=65);\n -webkit-box-shadow: none;\n box-shadow: none;\n opacity: .65;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus,\n.btn-default.focus,\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n background-image: none;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:hover,\n.btn-primary:focus,\n.btn-primary.focus,\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n background-image: none;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:hover,\n.btn-success:focus,\n.btn-success.focus,\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n background-image: none;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:hover,\n.btn-info:focus,\n.btn-info.focus,\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n background-image: none;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:hover,\n.btn-warning:focus,\n.btn-warning.focus,\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n background-image: none;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:hover,\n.btn-danger:focus,\n.btn-danger.focus,\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n background-image: none;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n font-weight: normal;\n color: #337ab7;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity .15s linear;\n -o-transition: opacity .15s linear;\n transition: opacity .15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-timing-function: ease;\n -o-transition-timing-function: ease;\n transition-timing-function: ease;\n -webkit-transition-duration: .35s;\n -o-transition-duration: .35s;\n transition-duration: .35s;\n -webkit-transition-property: height, visibility;\n -o-transition-property: height, visibility;\n transition-property: height, visibility;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n font-size: 14px;\n text-align: left;\n list-style: none;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, .15);\n border-radius: 4px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n color: #262626;\n text-decoration: none;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n background-color: #337ab7;\n outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu-left {\n right: auto;\n left: 0;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n content: \"\";\n border-top: 0;\n border-bottom: 4px solid;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n .navbar-right .dropdown-menu {\n right: 0;\n left: auto;\n }\n .navbar-right .dropdown-menu-left {\n right: auto;\n left: 0;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-right: 8px;\n padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-right: 12px;\n padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n display: table-cell;\n float: none;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-right: 0;\n padding-left: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 14px;\n font-weight: normal;\n line-height: 1;\n color: #555;\n text-align: center;\n background-color: #eee;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 3px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 18px;\n border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n margin-left: -1px;\n}\n.nav {\n padding-left: 0;\n margin-bottom: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eee;\n}\n.nav > li.disabled > a {\n color: #777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777;\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eee #eee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555;\n cursor: default;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 4px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 50px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n .navbar {\n border-radius: 4px;\n }\n}\n@media (min-width: 768px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n padding-right: 15px;\n padding-left: 15px;\n overflow-x: visible;\n -webkit-overflow-scrolling: touch;\n border-top: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 768px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-right: 0;\n padding-left: 0;\n }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n@media (min-width: 768px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.navbar-brand {\n float: left;\n height: 50px;\n padding: 15px 15px;\n font-size: 18px;\n line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 768px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: -15px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n padding: 9px 10px;\n margin-top: 8px;\n margin-right: 15px;\n margin-bottom: 8px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 768px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 20px;\n}\n@media (max-width: 767px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 20px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 768px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 15px;\n padding-bottom: 15px;\n }\n}\n.navbar-form {\n padding: 10px 15px;\n margin-top: 8px;\n margin-right: -15px;\n margin-bottom: 8px;\n margin-left: -15px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 767px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 768px) {\n .navbar-form {\n width: auto;\n padding-top: 0;\n padding-bottom: 0;\n margin-right: 0;\n margin-left: 0;\n border: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: 8px;\n margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n margin-top: 14px;\n margin-bottom: 14px;\n}\n.navbar-text {\n margin-top: 15px;\n margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n .navbar-text {\n float: left;\n margin-right: 15px;\n margin-left: 15px;\n }\n}\n@media (min-width: 768px) {\n .navbar-left {\n float: left !important;\n }\n .navbar-right {\n float: right !important;\n margin-right: -15px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n color: #fff;\n background-color: #080808;\n}\n@media (max-width: 767px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 20px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 4px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n padding: 0 5px;\n color: #ccc;\n content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n color: #777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 20px 0;\n border-radius: 4px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n margin-left: -1px;\n line-height: 1.42857143;\n color: #337ab7;\n text-decoration: none;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-top-left-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-top-right-radius: 4px;\n border-bottom-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n color: #23527c;\n background-color: #eee;\n border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 2;\n color: #fff;\n cursor: default;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777;\n cursor: not-allowed;\n background-color: #fff;\n border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 18px;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-top-left-radius: 6px;\n border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-top-right-radius: 6px;\n border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-top-left-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-top-right-radius: 3px;\n border-bottom-right-radius: 3px;\n}\n.pager {\n padding-left: 0;\n margin: 20px 0;\n text-align: center;\n list-style: none;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777;\n cursor: not-allowed;\n background-color: #fff;\n}\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n background-color: #777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding: 30px 15px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 21px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n border-radius: 6px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding: 48px 0;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-right: 60px;\n padding-left: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 63px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 20px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: border .2s ease-in-out;\n -o-transition: border .2s ease-in-out;\n transition: border .2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-right: auto;\n margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #333;\n}\n.alert {\n padding: 15px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@-o-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n height: 20px;\n margin-bottom: 20px;\n overflow: hidden;\n background-color: #f5f5f5;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n}\n.progress-bar {\n float: left;\n width: 0;\n height: 100%;\n font-size: 12px;\n line-height: 20px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n -webkit-transition: width .6s ease;\n -o-transition: width .6s ease;\n transition: width .6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n -webkit-background-size: 40px 40px;\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n overflow: hidden;\n zoom: 1;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n padding-left: 0;\n margin-bottom: 20px;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\na.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\na.list-group-item:focus {\n color: #555;\n text-decoration: none;\n background-color: #f5f5f5;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n color: #777;\n cursor: not-allowed;\n background-color: #eee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\na.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\na.list-group-item-success.active:hover,\na.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\na.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\na.list-group-item-info.active:hover,\na.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\na.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\na.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 20px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 16px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-right: 15px;\n padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n margin-bottom: 0;\n border: 0;\n}\n.panel-group {\n margin-bottom: 20px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 4px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 100%;\n height: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, .15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 6px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 3px;\n}\n.close {\n float: right;\n font-size: 21px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n filter: alpha(opacity=20);\n opacity: .2;\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n filter: alpha(opacity=50);\n opacity: .5;\n}\nbutton.close {\n -webkit-appearance: none;\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n display: none;\n overflow: hidden;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transition: -webkit-transform .3s ease-out;\n -o-transition: -o-transform .3s ease-out;\n transition: transform .3s ease-out;\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, .2);\n border-radius: 6px;\n outline: 0;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n filter: alpha(opacity=0);\n opacity: 0;\n}\n.modal-backdrop.in {\n filter: alpha(opacity=50);\n opacity: .5;\n}\n.modal-header {\n min-height: 16.42857143px;\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-bottom: 0;\n margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 12px;\n font-weight: normal;\n line-height: 1.4;\n filter: alpha(opacity=0);\n opacity: 0;\n}\n.tooltip.in {\n filter: alpha(opacity=90);\n opacity: .9;\n}\n.tooltip.top {\n padding: 5px 0;\n margin-top: -3px;\n}\n.tooltip.right {\n padding: 0 5px;\n margin-left: 3px;\n}\n.tooltip.bottom {\n padding: 5px 0;\n margin-top: 3px;\n}\n.tooltip.left {\n padding: 0 5px;\n margin-left: -3px;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n text-decoration: none;\n background-color: #000;\n border-radius: 4px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n right: 5px;\n bottom: 0;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n font-weight: normal;\n line-height: 1.42857143;\n text-align: left;\n white-space: normal;\n background-color: #fff;\n -webkit-background-clip: padding-box;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, .2);\n border-radius: 6px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover-title {\n padding: 8px 14px;\n margin: 0;\n font-size: 14px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 5px 5px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow:after {\n content: \"\";\n border-width: 10px;\n}\n.popover.top > .arrow {\n bottom: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-color: #999;\n border-top-color: rgba(0, 0, 0, .25);\n border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n bottom: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-color: #fff;\n border-bottom-width: 0;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-right-color: #999;\n border-right-color: rgba(0, 0, 0, .25);\n border-left-width: 0;\n}\n.popover.right > .arrow:after {\n bottom: -10px;\n left: 1px;\n content: \" \";\n border-right-color: #fff;\n border-left-width: 0;\n}\n.popover.bottom > .arrow {\n top: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999;\n border-bottom-color: rgba(0, 0, 0, .25);\n}\n.popover.bottom > .arrow:after {\n top: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999;\n border-left-color: rgba(0, 0, 0, .25);\n}\n.popover.left > .arrow:after {\n right: 1px;\n bottom: -10px;\n content: \" \";\n border-right-width: 0;\n border-left-color: #fff;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n width: 100%;\n overflow: hidden;\n}\n.carousel-inner > .item {\n position: relative;\n display: none;\n -webkit-transition: .6s ease-in-out left;\n -o-transition: .6s ease-in-out left;\n transition: .6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform .6s ease-in-out;\n -o-transition: -o-transform .6s ease-in-out;\n transition: transform .6s ease-in-out;\n\n -webkit-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000;\n perspective: 1000;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n left: 0;\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n left: 0;\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n left: 0;\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 15%;\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n filter: alpha(opacity=50);\n opacity: .5;\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001)));\n background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control.right {\n right: 0;\n left: auto;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5)));\n background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n color: #fff;\n text-decoration: none;\n filter: alpha(opacity=90);\n outline: 0;\n opacity: .9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n z-index: 5;\n display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n margin-top: -10px;\n font-family: serif;\n line-height: 1;\n}\n.carousel-control .icon-prev:before {\n content: '\\2039';\n}\n.carousel-control .icon-next:before {\n content: '\\203a';\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n padding-left: 0;\n margin-left: -30%;\n text-align: center;\n list-style: none;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n border: 1px solid #fff;\n border-radius: 10px;\n}\n.carousel-indicators .active {\n width: 12px;\n height: 12px;\n margin: 0;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n right: 15%;\n bottom: 20px;\n left: 15%;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -15px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -15px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -15px;\n }\n .carousel-caption {\n right: 20%;\n left: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-footer:before,\n.modal-footer:after {\n display: table;\n content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-footer:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-right: auto;\n margin-left: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*# sourceMappingURL=bootstrap.css.map */\nPK\x07\x08\xbeO[\x0b6)\x02\x006)\x02\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00css/dropdowns-enhancement.css.dropdown-menu > li > label {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333333;\n white-space: nowrap;\n}\n.dropdown-menu > li > label:hover,\n.dropdown-menu > li > label:focus {\n text-decoration: none;\n color: #262626;\n background-color: #f5f5f5;\n}\n.dropdown-menu > li > input:checked ~ label,\n.dropdown-menu > li > input:checked ~ label:hover,\n.dropdown-menu > li > input:checked ~ label:focus,\n.dropdown-menu > .active > label,\n.dropdown-menu > .active > label:hover,\n.dropdown-menu > .active > label:focus {\n color: #ffffff;\n text-decoration: none;\n outline: 0;\n background-color: #428bca;\n}\n.dropdown-menu > li > input[disabled] ~ label,\n.dropdown-menu > li > input[disabled] ~ label:hover,\n.dropdown-menu > li > input[disabled] ~ label:focus,\n.dropdown-menu > .disabled > label,\n.dropdown-menu > .disabled > label:hover,\n.dropdown-menu > .disabled > label:focus {\n color: #999999;\n}\n.dropdown-menu > li > input[disabled] ~ label:hover,\n.dropdown-menu > li > input[disabled] ~ label:focus,\n.dropdown-menu > .disabled > label:hover,\n.dropdown-menu > .disabled > label:focus {\n text-decoration: none;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n cursor: not-allowed;\n}\n.dropdown-menu > li > label {\n margin-bottom: 0;\n cursor: pointer;\n}\n.dropdown-menu > li > input[type=\"radio\"],\n.dropdown-menu > li > input[type=\"checkbox\"] {\n display: none;\n position: absolute;\n top: -9999em;\n left: -9999em;\n}\n.dropdown-menu > li > label:focus,\n.dropdown-menu > li > input:focus ~ label {\n outline: thin dotted;\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu.pull-top {\n bottom: 100%;\n top: auto;\n margin: 0 0 2px;\n -webkit-box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);\n box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-center {\n right: 50%;\n left: auto;\n}\n.dropdown-menu.pull-middle {\n right: 100%;\n margin: 0 2px 0 0;\n box-shadow: -5px 0 10px rgba(0, 0, 0, 0.2);\n left: auto;\n}\n.dropdown-menu.pull-middle.pull-right {\n right: auto;\n left: 100%;\n margin: 0 0 0 2px;\n box-shadow: 5px 0 10px rgba(0, 0, 0, 0.2);\n}\n.dropdown-menu.pull-middle.pull-center {\n right: 50%;\n margin: 0;\n box-shadow: 0 0 10px rgba(0, 0, 0, 0.2);\n}\n.dropdown-menu.bullet {\n margin-top: 8px;\n}\n.dropdown-menu.bullet:before {\n width: 0;\n height: 0;\n content: '';\n display: inline-block;\n position: absolute;\n border-color: transparent;\n border-style: solid;\n -webkit-transform: rotate(360deg);\n border-width: 0 7px 7px;\n border-bottom-color: #cccccc;\n border-bottom-color: rgba(0, 0, 0, 0.15);\n top: -7px;\n left: 9px;\n}\n.dropdown-menu.bullet:after {\n width: 0;\n height: 0;\n content: '';\n display: inline-block;\n position: absolute;\n border-color: transparent;\n border-style: solid;\n -webkit-transform: rotate(360deg);\n border-width: 0 6px 6px;\n border-bottom-color: #ffffff;\n top: -6px;\n left: 10px;\n}\n.dropdown-menu.bullet.pull-right:before {\n left: auto;\n right: 9px;\n}\n.dropdown-menu.bullet.pull-right:after {\n left: auto;\n right: 10px;\n}\n.dropdown-menu.bullet.pull-top {\n margin-top: 0;\n margin-bottom: 8px;\n}\n.dropdown-menu.bullet.pull-top:before {\n top: auto;\n bottom: -7px;\n border-bottom-width: 0;\n border-top-width: 7px;\n border-top-color: #cccccc;\n border-top-color: rgba(0, 0, 0, 0.15);\n}\n.dropdown-menu.bullet.pull-top:after {\n top: auto;\n bottom: -6px;\n border-bottom: none;\n border-top-width: 6px;\n border-top-color: #ffffff;\n}\n.dropdown-menu.bullet.pull-center:before {\n left: auto;\n right: 50%;\n margin-right: -7px;\n}\n.dropdown-menu.bullet.pull-center:after {\n left: auto;\n right: 50%;\n margin-right: -6px;\n}\n.dropdown-menu.bullet.pull-middle {\n margin-right: 8px;\n}\n.dropdown-menu.bullet.pull-middle:before {\n top: 50%;\n left: 100%;\n right: auto;\n margin-top: -7px;\n border-right-width: 0;\n border-bottom-color: transparent;\n border-top-width: 7px;\n border-left-color: #cccccc;\n border-left-color: rgba(0, 0, 0, 0.15);\n}\n.dropdown-menu.bullet.pull-middle:after {\n top: 50%;\n left: 100%;\n right: auto;\n margin-top: -6px;\n border-right-width: 0;\n border-bottom-color: transparent;\n border-top-width: 6px;\n border-left-color: #ffffff;\n}\n.dropdown-menu.bullet.pull-middle.pull-right {\n margin-right: 0;\n margin-left: 8px;\n}\n.dropdown-menu.bullet.pull-middle.pull-right:before {\n left: -7px;\n border-left-width: 0;\n border-right-width: 7px;\n border-right-color: #cccccc;\n border-right-color: rgba(0, 0, 0, 0.15);\n}\n.dropdown-menu.bullet.pull-middle.pull-right:after {\n left: -6px;\n border-left-width: 0;\n border-right-width: 6px;\n border-right-color: #ffffff;\n}\n.dropdown-menu.bullet.pull-middle.pull-center {\n margin-left: 0;\n margin-right: 0;\n}\n.dropdown-menu.bullet.pull-middle.pull-center:before {\n border: none;\n display: none;\n}\n.dropdown-menu.bullet.pull-middle.pull-center:after {\n border: none;\n display: none;\n}\n.dropdown-submenu {\n position: relative;\n}\n.dropdown-submenu > .dropdown-menu {\n top: 0;\n left: 100%;\n margin-top: -6px;\n margin-left: -1px;\n border-top-left-radius: 0;\n}\n.dropdown-submenu > a:before {\n display: block;\n float: right;\n width: 0;\n height: 0;\n content: \"\";\n margin-top: 6px;\n margin-right: -8px;\n border-width: 4px 0 4px 4px;\n border-style: solid;\n border-left-style: dashed;\n border-top-color: transparent;\n border-bottom-color: transparent;\n}\n@media (max-width: 767px) {\n .navbar-nav .dropdown-submenu > a:before {\n margin-top: 8px;\n border-color: inherit;\n border-style: solid;\n border-width: 4px 4px 0;\n border-left-color: transparent;\n border-right-color: transparent;\n }\n .navbar-nav .dropdown-submenu > a {\n padding-left: 40px;\n }\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > a,\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > label {\n padding-left: 35px;\n }\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > a,\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > label {\n padding-left: 45px;\n }\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {\n padding-left: 55px;\n }\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {\n padding-left: 65px;\n }\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,\n .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {\n padding-left: 75px;\n }\n}\n.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,\n.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,\n.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {\n background-color: #e7e7e7;\n color: #555555;\n}\n@media (max-width: 767px) {\n .navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {\n border-top-color: #555555;\n }\n}\n.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,\n.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,\n.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {\n background-color: #080808;\n color: #ffffff;\n}\n@media (max-width: 767px) {\n .navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {\n border-top-color: #ffffff;\n }\n}\nPK\x07\x08\x9a\x90:SN \x00\x00N \x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00fonts/glyphicons-halflings-regular.eot\x9fN\x00\x00AM\x00\x00\x02\x00\x02\x00\x04\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x90\x01\x00\x00\x04\x00LP\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'\x12\x7f,\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00 \x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00\x00\x00\x0e\x00R\x00e\x00g\x00u\x00l\x00a\x00r\x00\x00\x00x\x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00 \x001\x00.\x000\x000\x009\x00;\x00P\x00S\x00 \x000\x000\x001\x00.\x000\x000\x009\x00;\x00h\x00o\x00t\x00c\x00o\x00n\x00v\x00 \x001\x00.\x000\x00.\x007\x000\x00;\x00m\x00a\x00k\x00e\x00o\x00t\x00f\x00.\x00l\x00i\x00b\x002\x00.\x005\x00.\x005\x008\x003\x002\x009\x00\x00\x008\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00 \x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00 \x00R\x00e\x00g\x00u\x00l\x00a\x00r\x00\x00\x00\x00\x00BSGP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\xa9\xdc\x00M\x13\x00M\x19\x00F\xee\x14\xcd\xe9\x8c\xcf\xd2\x11\xd9\xa3(u\x1b\xca\x8c<\x010D\xe3B/X\x0d\xefN\xef\x0b\x88\x00CC\xea^\xc7\x05\x0crmR2sk\xc9\xcbPJ\"5+\x96gl\xe9W*i\xd5W\x96/E\xee\x91\x9c\xd3\x054#\xac\xd4\xa3U\xa6~\xb2f\x10\x89\x91\x1bUD\xdc\xc4\xb9\xf7\x88\xab\xb1\xe0J\xb71\xe1/!\xfe\xfe/\x9e\xba\xcas\xaa\x027\x19\x92\x93k\x8a\x95\x94\x07\x06(\xba\x88\xa1h\x1fN\xf8\xe98o\x90\xedd$yq\x8e\xb91\xb3\xe2\xd69\x16\x83@\x11-\x89\x82HG\x92\x01\xf4\x18\xb5S\"\xf8Fj\xf4\x04\xd8\xa06C3\x94\xa4&\x9e\x87\xc1\x1e\xf8\xaaW51\xc1\xd3\x1b\xdc\xd7B\x9f\xafa\xcb\xeaQaR\x86U/\xf5\xb6{*\xbf\x82\xcb\xef\x00\x82=\x96@d\xf4\xf8h$\xa1\x1e1\xc9T\xdb\x97nc+c\x92\xdeA\xa1\xa7\xbc \x17\x95Z\xc9\x80\xa4@Q\xd1c\xada\x1a\x87\xd5\xdel\xf7\x902>\xcaK\xb0\xc8\x04\x01m\xf3' \x0b\x93\xcb\x19C\x87HM\xc4\xacfB\x89X\x8d,\xbeY\x01\x17\xf2\xc2p\xa8e\xa2\x90\n\xee\x8d\xb8U\xf8\xd8\x01\x1e*\xd2\x94\x1az\xff\nm\x82\xcb\xcbi\x16\x10O1nE\xc6.\x10\x9b\x84\xe4\x0dhx!aC\nXT\xda\x14V\xa2\xc5\x16\xa9\xc2\x8b\x96\x0c\x97\x07\xe9R\x1d\xa5\x9a\x1e%\xa5\x04\x04|I\xe4\xa0H\xf0\x90\xc5\xd5P\x835\"\xc5b\x92N\xb2\x10\x11\xb5=\xe2\xf8\x83r\xd9/_\xe5R\x8c\x9b\x94\x99_\x0b\xe0%\xd2\x84\x8duz\xc9\xe9\xd2\x98\xd65\x922\xc4\xa1\xd2\xe3\x05P\xda)\x9e\xd4\xfe\x11\x9e\x1b\xc6\xc3F\x837S\x8b\x01q\x84F\xc0{n\xe1i\x14a\x81\x02\xb7\xb8@D\x11\xd0s\x88;\x9a\x18\x1f}9\x1b\xe2\xac\xa5?\xc5\xba\x91\x06\x19\xa7\xc2\x0b\x0cR{\xa6Tk\xed;\xde\xb5\xc7\x9c\xd7U\\N\x9eZ\xf8\x9bQ-\xbb^\xd4s\x90\x7f7\xf2f\x0b0\x19\x98\xca\xc6S3A\x0c\xdc\x0d_n\x90\x81`W\x1c7P\x7f\x12p\x98\xbb\xf4\xe0\x08i\xab\xed\xb3!\xf0g\xd8/\xe0_p\xbb\xc1\xd2\x04Z\x80-=\xc3\xd7\xa5~WZ#/\xe14 KF\x00`\xb4 \xbb\x8c\x01z\xdf\x02\xd20\x07\xdb| D\x82\x06\x14\xd1\xb5\x00\xec\x00\x82&d\xc3\xa4I\x89\xb4\x8e\xfc\xc3\x8f\xc1;\xb7M\xec\x94{'\xb6om\x86\x94m\xa2\x0bI\x0b\x11\x06!w\x1ci9|H:\xa7\xdb\xa7\xc0\xbb\xe7\xf7\xca\xfe\xbe{\x15\xfb~\xf6\xb9\xfdq\x04\xba\xb8\xa9O\xf8\xe5\xf4\xee\xa9\xa0\x0f\xfa\x9b,\x98 \x82L]&\x84J0\xf1\x95\xd99/\x03\xed\x0f9&\xccY\xf8\x0c\xe8\x93\xb0{;\xf7\xfa'\xc03`\x1c\x92e\x00\x7f@v\x14H\x0f\x84y\x08DZ$\xba\x00\x843\x81\xcb\x03\x88Dx28\x0c\x04\x05\x83W\x80 Cx5x\x18\x08w\x07\x82B`\xa3$C$'\xe3\xcaEl\x85\x1cy\xa0\xd5h\xbf\xeb\x1c\xd4\x80\x0d\x0bDJ\n$(p\x9d\x06\xbd\x07\xeeQA\x94A\xdc\x89A\x96@'\xc7$\x0dh\x03p\x1c\xca0\xceV\x120 `\x9d\xbas\xbe\xaae\xd2$\xc94$\"t2=f\xb4\x984\x84A\x84{Tk\x96\x070|r\x18H\xa4\xf6\x10\xc4\xd0\x02\xa3\xef`\x1bL&\xb1\xb4s\xd4h\xa6]\x01\x94\xa7A<\xa3\x1a\xa1\x8b\xb2`R\xb4'\xa3\x02\x95!\x83\x8b\x811N\xa6\x01;\xa3\x12_\x8at3\xdb#\xa0 \xe2\xf2\xeb\xfa\x18\xd8\xeaV\xe3\x05\xea\x0b*ve\xd1F`E\x7f O$\x8e{)\xd9W=p:\xae\xde\xd6\x11F`\x8a\xbe2\x1a\xc6\xc42\xda\x93C\xeb\xc1\x8c\xd2^\xd7.\xca\x15\xc4\x87\x03\x98\xa1\xf8\x96\xf8G\x08\xfe<\xfb.p\x1e\xe7Ne2\xea\x8b\xef\xd6\xb4\xba\xde+Y\xecs\x16\xdbl:\xc2\xc3\xcb\xbc\xef\xb5\x0c\xdc\xabu5\xa9\xa6\xde\xee\xd0t\xc0u\x95^8\xbe\xcc6\xe8\xf3\x1f\x07\xc8\x84Tmy\xf0Q\xc9%\x16\xfeu~\xf4\xf2\x9a%~1r\xd2\x98a\xfdw\xdf\x9a^\x90\xf9_\xa9Z\xa3\x8dZ\x9da\xa2\x83\xb20!\xd9\xe8\x05\x8d\xa1\xb7\xfaN\xf6`\xa5.\x8e\x0duq\xc0\xb1\xe7\xeaYB\xa5\\\x99\x1b\xa8\xf3\x85\x84\xca\x11\xe1\xa8\x80\xd4\x15\xea\x85\x13[\x07e\xf0\x8b\xee\xee\xfe:@\xa0\xeaJ'E\x17\xdb\x81,\xaf3\x1f\x1dubj@\x8dp\xa8\xc1\xc6\xe4\x17\xf0\x13\xb4f\xa8\xc9\xdf\x15\xf3\x7f\xee\xb5\xb7eW9( \xf3\x1b\xba\xe5\x02\xa0\xb4\xde\x85\x89\xb3\xe6=\x8b\x17l\x94G\x1e\xa6\xe0\x017\x12g\x10j\x1a \xe2S\x83M6\xc6\x01\xb0\xa0\xf40\x83\xff9\xf2\xa7\x96O\xcb\x91\xa8\x9d\xfc\xed\x8dl\xa7\xaeB\xbc\x0e\x07a\xaa\xdd\x81\xf2\xaf\xa0\x0b\x83<\xa6\xe7\x02\x01\x9e\xc7B\xd5\x99(\x04VRAp\xa1f\xf9^\xb0\xfa\xaf+g9\xa0q\xd3\xdd\x0e\xb9\x8d\x8aM\xc6t\x1c\x18]\x04\x1d\xbb\xd8\xaa\x81p\xebE\x8d\x95r\x07@]\x87@\x1e\xf3\x0f\xa9V\x8d\x9fkV\xa5\n\x05u\x13\xea\x1a\xe4d\xe9^\xd1\x11X \x8d\x15\xe5\x96\x06\x97R@?E\x1d\xd5\xf6Y2\xf4\xa8\x98\x1c\xc9\xef]#\xe0\xc7\xbc\x9a4\x03\xc0J\xde\xe5K\xf6\xc1\x07\xe4\xd6'\xc3\xc1\xbed\xb2\xe2PC|m\xe3m\xe5n\xe4#\xbe\x82\x10\x07$+48u'\x11\x85\x90\xe7e&\xfb\xbf\x80[n[L\x11\xe1\xc8\x1d\xf9\x92\x9e\xb1%{BCD\xdaL:\x01^!\xa0\x8b\x82\xd3b\xc6\x99:&\xc9\xff\x0f\x88\xee\x82g3\x93-3\xd0\xf8u\xb4\xe8\x7f\xad\xc7\xe6\xd0\xf0\xb9\x83b\x0diLZ\xe9\xda\x82W\x82FS\xc9\xe4Id\xcd\x02\xf1\xa16.\x91k5P\xee\x84\x04l7\x9e7\xfcUz\x92T:N\xfdN\xa1\x91\x97.\xfd\"\x80\xaa\xe5\xfc)\x89\xc5\x19\x97\xec\x92['\xdf|U\"\x0fA\x1c\x83\x04\x80\xb3\x97\x82I\x95\x81\xdbv\xa9w\xd0\xdb\xd8p\xeb\x99\xed\xadt\xb9dk\x82\x88\x9e9\xd8\x08\x9b\x8a\xe5\xab\xab\xcd9\x00n\xa8D\x8bmq\xb9\x977I\x18|6\x9bK\x03bc\x83]\xb6\x06M\xf4\x93\x19\x12\xa9\xb2\xc4\xce\xf7\x97\xe8\xb6\x12B\x8dA\x90\x80B\xf8\xc8\xaa\x06\x00_\xf4J\xbaT\x0c\xd9\xfc\x08\x18\x01q \xd0\x07 \x1e6@\x97\xa7\x84\xb8F\x8d\x97\x87\x83\x9ehd`G\xae\x8fT\xeb\x14\xf1\xb7:M\x1e\x7f\xc57'\xe0\x85L,\xe9\x17Ih\x97\xc6FP \xbb\xca~j\x00\xbd\x06\x8a\x8c\xed\xc4\x0c\xac$\xc2\xa1\xc2\x84 \xc43\x99hA\x13\xdd\xe4\x00\x81\x92-S\x8c^\xfb\xda\x86\x90\x85\x06\xd0\xe4-%qe\xcf\x14\xec\xeb~\xc0\xc6Qq\xab\xa7\x16\xacln\"i\xbe\x9e\x07&\x90\x91\x07\xe6\xd1Qe?Fl\x0eK\xef\xa8\"\xfaAs\x14\xc0(\xdd3\x1dY;\"\xa1L\x9e\xda\xd4e\x80t\xe5\x1c'\xc4RzM\x1a\x9c\xaa1\x0c\x110\xa8{=\xe6\x90\xf7)\x9e\x80\x0b\xb3K\xa0%\x8f$C\n\xc2\xf8\x919\x8aM\xf0\xf6\xbc\xeb\xbc4c\x01 \xea\x80Eotj\xcd\xc2V\xa7GD\x8e)l\xf18\x93\xaf,\x98\\w\xc0\xa5\xe0\x0b!\x00%$\xbf\xd73t\xc9 T\x12B\x1az\x92\x9e\xd2\xb4 iU\x1aJ\xd2\xdd\x12[\xa2\xc7x\x12g\x13d\x1c\x84\x1eBr\xef$\xc5\x18!eq\x88\x81\x92\"J>\xe0\x10\xa3 \x0c)\\\x9d~\xa1\x82\x8a\x893\x08\x90\x0e(\x08^\x0d\xe2\xa0R\xc2\x808#>\xd6\x07b\x9b\x05\xe4H\x80\xe2G'7_\x0cf\xd3\xabc\xce\xbatD\x00oAA\xdf\x83\x86(q\x99B<\x16\xfd`\xc7\x03\xad\x1c`V\x88\xfc\xf8\xe9\xce\xab\x94\xa9\xd6\x98\x19\xc2*\xfab\xde\xcfu\x8b\x14P\xad4\x13v@\x97+\x95\xca\x04.\x08\x8f\x92\xeeQ\xe5\xd4\xa5$V\x82\x12\xa1\x19\x95\xed@C0\n\x0b\xedR\xa2\xd3\x1c\x04\xdc\x90P[\x91\x1d\x11z:X\xa6H\x18#e\xe4\xc1\x0b\x02\xf2s\xa0>?\xfa\x05E\xc8WO>@\x0eI\xd8$|s\xac\x9ei\x1a\xe2\nE\x02\x12S\xa5\xb2)0A\x8c?\x16\xa39\x95ab,\xb6\x8d@K\x9a\xf1\xcc\xa9o&\xee\x04\xfe\xfc\x0e\x88\xacQ\xb4%\xac\x0c\xcf\x9eLu+\x9b\n\xc2+\x90H|\xcc\xc6\x90?\xb4\x16NK\xcc\x1b4\x8c\xc6\x17\x92\xd3CnPt\xe8\xb3\x0d'OT\xf2\x9c\xd2.j5\xe0\xc4\xb48\xc8\xdcv\xb6w\xd6\x9c\x90\xab\x12I\xa5\x12&\x95+\xdf`\x02\xbc\x8ay\x18S\x9d\x90caO\x11[#\xa1g\xb0\xa7\x08Q\xa7\x9c\x80\xb8\xdad\xaa\x17[\xeeK\xbdI\xe7\x9f\x97`\xf4\x07\xc4\x8cLP\xfd\xd3\x18\xb8 #\xb0\xc1\x9c\xbd \xa9)2\xd27\x16aT\x83\x7f\xe4\x16\xebi@c\\\xde\x90\x91\xee\x0b\xc2\xe2\xc80n\xeaC\xbbp\xf2\x10\xdf\x96\xe9\x81\x8b4\xcd\xb5\x8e\x90x\xf6*\xf1\xcb\xd0R\x94z\xd5\x0eY\xe2\x84b\x87\xc0\xfa\xdbT[\\\xfa\x1dkU\x02\x99v\xecH\xca\x88\xdcq\x92\x08p\xe0\xa6\x84I\xc2\xedI\x18\xeb\xc5\x97)\x08\x0b\x8bbB\x0d \x10X\x94P\xbaN\xb4\x85\x9at\x0fz\xed 2\x0dI\xe6=\x12=\xa4 \xa6\x1f\x1b\x1c\x0e\xb3\x88\xfd\xa8\xc3;}\x86b\x8a\x9c\xe0q\xfe\xd3ji\xde\x86\xa7a\xb2#\" \xac\x17\x9f>1\x01\xbc\x82\x90\xb01\xe4A\x9b\xa3p1\xd6\xed\xddP\x82\xa7O\x12\x1a\xc7\x04\x13\x1d\xe8O\x97ux\xf7Q\xf4\xf9\xb0\xb9\xce\nF\xcf\xb2(\xfah\xdd\x84\xa9O'MDx\xcaL\xedK\x11$\xc8\xb5\x9ch\xe6&\x0d\xf9\x8b\xa6\xd0\x04\x001\x8c\xef\xc44\x9b\x9bSi\xa0\xdc\xe3rHJ\x16\x92P\xb0t\x00DM\xcb;rM\xe3\x82+\x94\xf6\x97\n*\x96\xe0\x9f\xed\xd8\x975u\x172$\x07\xaaf\x103\x92K \xdf<\xf9P\xb2L\xe7r\xd1c\x8bI)\x81\x98\xd6\xe5\x16^\xeb\x1fda>\n%\xc5\xe0\xd1\xb3b\x0b(\x9f\xfa\x96@\x08,\x912f,~\"\xa67\xdbR;\xaeE\xd1\x1d\x8e;\xac\xad\x94HX\xe5(\xfa\xb9\x12\x9f\x17\x1c4\x8a2Z\x03\x1f\xe4\xdd\x08'\x00T\xaa\xdb\xbf\xf6\x8b\x81\x84\x04\x0f\x94\xbd2J+\xff^!#o\x1b\x8e\x84\x9b\x94Y~4\xd8-\xd7\x83\xf2GW\x1f*\xf0!\xdf\xc3A\x95\x1a0&8\x80f\xe4{`\xbc\xbd\xf8\xe0W\xf6=\x92DP8\x92'\xff\x13= \xd6R\xa6 g\xa9}\x10\xf4iP>\x93\xca\x03#\xb9\xcb\xe5\xb94\xc3\xd2E\xd0BRY\xae\xc6^4e\xf3\x02\x9d\xfd\x82\xa7N8\xb8\x8fV,[B\x8b\x86\x15\xc4\xa8\xeeD#\x81X\x86\xf8]\xb2,\x19\xd6\xe8\xe2\xabLBsNC>\x17\x0d+\xa2\xedo\xcd\xea^x\xa8\xec\xa7\n\xa0\xf7\x0e\x95\x90\xfajC\xec.4\x0f\xc0Y\x11a\x89_{e\x01\xa6A2=r\x8e\xf0\xd8+\x00\x89\x05\xad\xb4\xd6\xee\xa7\x04\xa19PO\xbbA!!\x0b\n\xd7}\xb4Y\xcaP\x1a\x02Je\x84\x15\x97\xe7Gn\x9a\xb1%x\xfc\x8d1\x16\x14\xac/}Rg\x12H\xd8\x08\x9a\x11a\x0c\x8e\x01^3-\x8a\xa9 \x8b5\x06\n\xb6|\x8b\x01qS\xa7\xd0\xf0a\x07\x08WK{\x0c1al`I\xc0\x181\xd3\x0b\x99\xc6Q\xe9\xbe\xebf_yy\x05C\x19Z\x14)\xc4L3X\xb1]\x0bW6@DM\x99\x15T\xf8<.\x84\xabu\xebG\xceK\x8c\xec8\xd9Ds\xf4\xda\xd0\xb1\x03W\xe6\x91r\x85\xdd\\\xdf7Z\\\xd5\xcb\xff\xf2V\xd3\"I\x18\xa6\xa2\x8c\xf5S\xa1\xa7\xaed\xb5\x89>C\x8e\xa6\xc8U\x81j\x90\xdf\xc9e\x02\x9e\xc9\xc8\xd3D\x13\x05 \xae\x163\x19M\xc7tWcP\xef\xaf\x05\x86\x96\xf2\xd3\x896#3Q\xd7\x02\x9an\xe1\x89\xa9\x85\xb0J\\\xba\xa1\xc3\x05\x197\x9e#\xe7\xa3\xb1`\xd8\x80K\xeb\xeb\xa4 \xa9\xd7\x08lV\x036\x0b&\xd3\x1fT\x1d\x8e\x92 \xde~\xee\xdal.\x92\x9d\xbe\x16\xa1 <\x98\x98BP\n\xa3*\xb4!zRZ\xc4\xe4e\xd1\x99\xbb\xbe\xbe\xd9\xb7\x04T\xb1#\xa3C\xe9LH\x13\xb1\xac\x18\xc8\x16\xaa\x08W\xc5)\xdaD\x9b\x86\x16\xf7\x93p\xaeYU#\xc9\xca51{WJ\x80\x8e\xa44^\xc4f\x12\x19\xb3\xcc\xbc\x15Z\xb0\x11\xf6\xcc\xd1y6\x96\xd3\x91T2\x99d\xce4H=\xeaB\x84\x1e\xd2\x8a\xc9\x10\xac\x0f}\x9c&\xdd\x83\xea\xc2,aP\xc3\xa7v+:2\x16\xe6~\xf2\xc1*0\xbc\xb0\xa2\x88d\xa6\xc9\x93\xc2\xf5\xd6\x83d \x0c\xc2\x82\xf8\xe1\xcb!\"A+\x89r\x00\x90Hn\x0c\xa1\xc7\xe0sA\xe4\xc1\xc8\xda\x97U\x0d\x80\xfd\x02\x04\xf8\xa4b\xa0H\x93\xdbN6\xfe$.\xd0l\xc0};\xbd@\xa3\x11\xa1\xe2\x14iK\x7f\xc3 \\\xac\xd2\x82:v\x1e\x89QE\xc7\x00:\x1d\x08,|\xfd\xaaQ\xa0 Y0\x02|\xc7%\x18\xd6\x16\x19@\xed\x13\xb0 \xdc\x81\xe4qc\xe7\xd3\xd1dqh\xaf\xab\xe8\xab\xb9v\xdcC\xcdG\x07V\x86\x06\xb0\xa9\xb8\xaf-(\xd1\x03\x88m\x11\x85\x05\x921\x12\xbb\xae\xe2q89KF\x96\xd6\xc3\xa4\n\"2\xb1\xf2}Rr\x16z\xf3\x8e,j^\xa5\xc0q\xcb\\\x04\x85\xdd\x96#p\x19\x83\xbb+\xf5`f\x1dl\x9a\xb3\x13\x1c\xbd\x8f\xe2:k\x0b\xb4t\x965E\x84\x1aOaI\xc3\x0fJ\x16\xacP\x0d@ps\xadE\x99j1\xe44;6\x12\xf6\xf4/aH\xce.\xbc\xcf\xd3\xb0T\x00X\xa4p\x93L\x8b\xb8\x08\xc4L8\xa4\xbdF\xe7\x84\xdc\x9ai\xa8\x01\x0el\xed1\x96\x12\xb2Y\xd8\x8a8\xc9\x0d%\x8d\x16!/\xc1\xf9{\xad\xa8\x0e\xd2\xc5\xf1\xa0\xe9\x9c\x8b\xd9\xe5\x1d\xc6X\x9c\x80\xba\x07b\x88\x95\xbd\x85\x00\x15N\xc9\xc2\x04xp\xbb\xba\xe4\x06PW\xea\xbd\xdb\xed\xe8\xc4cI9\x18g\xb2*\xfe\xba\xbf\xee\x9d\x83\xb9\x9c%:\xd6\x1d\xbbL\xcb\x06\xdeu\x8a\x82CAO\xc2\x8c\xad\x99%\xac/\xc5\x93\xb4(Y\x08\xa2\xb3\x10^\xef?\xa0\xde\xe3\x1d\x81&I'\x88\xc8\x12uh[x\x14\x8b\xb4\x96Q\xf4$\xc7z\xd2\xb5\xc5\xbd\xf8\x01\x82 \xdf\xb3\x9e\x81\x82(=V\xd7\x80\x9f\xea\x0d\x02\xbem\xf0\xad\xc5\x04U)\x0b\xbb\xedl\xce\xa0\xce\x92\x18\xbe\xcci\x9c\x02\x18\x95d\xe3\xa6\x88\x99\xa8\xbd~f\xa6\xf9jG\x1b\xed\x01\xb5\x03\xd6R\x06{D\x80%>\xae\xa5\x90@\x94\x85\x946\x82\xa5\xa41\x82\x9c`\xc6\x1a!\xce ` \xc5\x1d\x86\xb6wY\xf3\xa7\x7f\x89\xd5\xe0\x81k/\x08a\x9d0A\x12\x86\x02\xab\xc2\xb9\x9f\xd4\x81\xb4\x0f\xcaYh\xb2\xaf\x97\xb5\xf2\x88d\xec\xe6x\x9a\x91k:f\x9a\x83\xff\xef\xa6\x1f\x92<\xd5\xd8\xd9WL4\x81`\x1d8IYMB\xc1Slc\xe4\xd6\xdf\xc8\xe0\x99-\xbbE\xd2\x82'\xce\xda\x8c\xc5:,\x1d\xffD\xac\xb0\x90\xe7\xd1\xd9\xf8\xc6\xa98\x184\xd3)~\xf7\xff\x042\x04\xb7j\x80\x9f\x96\xc7\xa0\x10i\xb6\xb7\x04\x0fB(L\xe3\xb5\x06\x08|\x0b\"a\xa9\x9a\xa6\xaf4,\x06\xa6b8\xa7\x94\xa5\xd4\x93\x17i 94\xa6\xe5\xd4\x11\x8c\xd2jW\xd1\x89\xb0\x1e\x0c\x02\xa96*\x90\xd0T\xf0\xa3\x1b\x86c4g\x8a\xcc\x93\xa2\xd7UM\xc9b\xb3R\xc7E\x00\xb2\xfe\xaa\x8b\xf2\x85\x1aC5\x91\xc6)j\xe4\xc8\xb4 \x91\xad1\x8d6pb\xc3\xeb\xdb\xc6\x8eH\xf7\xa7\xaaFx\xf2\x88\xf1\xb9\x01\xbf\x97\xaf\xab\xc4\xa3\x8f%4\xe2\xf2Q\xd9\xc4\x17C\x04\x89\xca\x88\xa7\x0e\xc5 \x1c$9\xd2:\xc5M>\xf2E\xd6\xdda\x03\x01\xdc\xc6\x02o\xab\x81\xcc\x9f^\xc2\x15\xd8<\x02\x19Iw\xde\x11\x00Ygq\x8f7s[\x86\x1f\xf0\x92\xf0 -y\xd01\xd8\xb95\x86\x19\xe4a\xc4\xe3M\x06K\x9a\xd7\x90\xe6RB\xc7Y\x80\x86\xf3Fq}\xb9\xe2\xe7\xb1\xf48\xef\x01\xc0\xb2*\xc8Nt\xe5\x90'.Yb\x14\x84\x94\x9b\xa4\xcd\xf0Z\x15\xcd\x1e\xfbv\xc5K\n(\xca]\x05&\xc9\x9c\x13\x1c\x8a\x19(\x0e\xa0\x1d\xd5\x99\xa5\xfd\x1e2\xaf:0\xd5\x0d\x80\xe4o\xa4\xce\x8f\xd1\x85\x1cPKiBH4U\xf0\xb9X,\x13\x8e\xa2\x8d[\x17\x88\xf0$\x0c\n0\xadmX\xb1\xe5\xbb\xd8\xb4\x8f\x82 \xf2\xa5f\xeb\x015\x0f\x15\xf0\x03\x040\xb1\xa0VR\x0d\xa98\xea%\xd9\xd1\xf1\xca\xde\xa7\xa7\x02Dt\xb0\xc3U\x8c\x91s`\xf4\x1d\xb7\x1b-BP\x8f\xc8\x0fz\xf4\x01P\x13\x94\x1ds\xeb\xb0\x03\xc1\x11\x12\x02\xa6v\x04I\xeb\xb68z-\xa5t1DiB\n\xd2\xdd\"\xcb\xb6\xdc\xc8YTJ \x83\xc4.\x9a?\xd40\xc77\xe0\x80jL\xa2\xc3N\x9e\xfa[2\xfft\xc8\xc2\xc4\xae\xcc\x8e\x18\x86\x8d\xe6\x00\x0b\xf9#\xfe6?E\xd7\xbb\x86\x94\x8c\xd7\x10\xa1\xae\x88:\xc9\x9e\xceY;\x83\xe7\xacA&q\x85\x13\xeaS\xd8IR\xc8)\xc9s\x05s\n9*x\xd2\xdf0Bj)m\x06\xec\xe1H\xa7A\xbe\xf6hy\xd0\x8fh\xf2Mm\xa4&4\xc5\x8a\x854\x80\x0b\x80\x87\xdc\xdag\xb8\x9fV\xe6&tY\xf3\xda\x1e\xa6\x02\xcfOCS0\xb3Y\xb2\xf4\x82d\x077Mv\x8d\x07N\xefj)w\xeb\xf7A\xc9(\xac\xe2o\x7f\x0d\x1f\"\xcd\xa2\xc9[\xa6\xd6\nE`\x9b\x94\xfe\x8a\xeb7ez\xb8\xc4\x8f\x86\x9d\xbb\xde-\xb7Q\xc0]\xa66\xdb+\x13Bca\xcd@^I\xc2:\xb2\xd2\xbb\xb4\xde\xfe\xbe\x1d=\x83\x02\x97\x9a\xad\x88sS\x9f\xe4\xcfn\x1dc\x08\x11\xc7\xf9\xd3 6\x1c\x0d\x82\xcd\xe7O\xe3\xa1B\x844\xbd\x88\x9f\xe7\xa0L\x1b\xa5\xa8\xc0\x07\x07Gp\xe3B\x89q/<\x95zA\xe3\xd3C\xc1\x01\xa5\xbb \x80\x05\xd4A~\xb9\x12\x16\xc9x\xd00\x0b6r\x17ih\xa3\xcdh\x1f\xa4I\xec\xd8\xb7O\x82\x03N,:\x0eo\xc7\xc1k\xec\xe7\xda\xf6\xc5/\xf3\xb8{H\xb0\x14\x15,\xdbz\xd0\x82g\xdaf\xc8\xbbz\x97\xaf\xd3\xce\x805\xfd\x90F\xf5\xb2\x14Tr\xc0n/\xe4t\x11``l\x86\xc1\x14\x06\x06\x99*\x08H6j\x1fT\x0f\xa1\x1etG\x04/x\xf8\xcb@P@(\x84\xc4I\xe8p\x0d\xd2e\xba!\xf9\xea`wv,:A\xec\x91\x9c\x0f\xb9\x0eN\xa3 4}09\x01z\xc7qC\x90\x17\xec\xe4$r\x00\xec\xfc\x02\x0eM`\x19Y\x86Q\x94\x92\xa0\xf8M\x7f\xe4\xe4\x95\xab\xb3\xf7\xc4(|\xe9B!\x0f\xed\x08>\x1d\xa2\xc1\xd8>\xabO pwj\x08 A\x02*@\x9b\x12\x90\x1f\xaa\x9fJ\xe4C[h&3\x8d\xfc\xebB \x1fQb\xfd\xcf\xa9\x108\x0b\x85:\xf1\x93%f~\x81v\x01/\x8bl\xe2S\xe4\xd0\xe8\xd20\xdd\xe8\xfe\x02\xd0\xf00a\x19\x93\xb7\xb6\"\x8eB\xe68(f \xfauGo\xc7\x9a\xc8\x0egy\xf1\xc5\xd7t\xa3_\xc3y~\xc0\xcd\x94\x8e\n\xc3%\xe4\x07\x85\xb0\xfb\x1dm\x90\x0e\x86L\n\xe8\xe0!I$\xe4Xt0\x83\xcf\x12\x80~\x16\xc0e\x8cP\x88z\xcb\xcd]\xa2U\xecg\x0d\x7f\xd0\x9d=_\xb1?\xf1\xba\x1b\xb5\xba.j#+`li\x91\xa1 \x1eB\x8a\x0e\xf1\xe2M5\x9b\x1f\xd2 \x0b\xba\xbf\x10\xc5\x91G\xd2p\x8c\x9e7\xaba\n\x86\xd6\x92\x1f\x8f%Y[\x18UG9\xe6\x99\xf2\xc6\xd3@\\\x05bD\xea\x1c\xd0Y\x1c\x85\x92{\x89\x9d{\xa4\xf0\x0eED0\xae\x97\n\xd6\x01$\x8d\xa0\x1bQ\xf8+FvC\xf0`\xdd\xa8\xec3\xb0\xfdQ\xa5 \x93\xee\xb1\x10E\\\xe0\xd0uC9\xcd\xe1\xc3![\x80$\x12\xeal\xef\xdf\xf5\x0b\x9a\xfa\x90\xea\x10\xf96\xdfD\xa8o\xd9Dg\xc0G\x88*+\x8dX!\xb8\x8d%#\x82C\x80q\x91\x0c?\xe9\xad\x0f8\x07ZU\x1aB)U@\x07\x08o\xa2\xcfpg\xde\xa9\xb6\x01Z\xd8q\x85\xa1\xa1\xc98\x8e\xca9\xf8\x9e\xc7|uc\xe1c\x08A\xd1\x9c\xb6\xad\xe9\xb0\x10\x95W;\xa9@\xe1\"\xa0\x11\x80\xd8\x19>P\x82\xd3\xef\xe5h_\x99\x9d\xc7\x109}.6\x80\x84\x90V/\xc7O:\xe03\x1b\xb4}\x8e\xa1\x13Z\x1cS\x0c\x1b\xb1\xb5\x9c{:\xd8\xda~\x92\xf3\xfe\x9c\xc5y\x0e\x1f\x8ak\xa6c\xee\x16\x88\xa4O6;O\xceB\xe1=\x97bV\xf1. R\xbfk\x8b\n\x01o\xf1\x0b\xa4\xff\x07^\xb1GV=\xe1\xd8 }\x11\xefoI\"+\x0d\xf4\n]w\x87\x81\xd7F\xaa\xc1z\xcf\xb7\xe4\xae\xf6`<\x93\xc9\xf5\xb230\x9d\xfb\x1f\x17h\xf7\x8c\xc53]\xeaR\x1ef\x7f\x05\xfa\x97\xbf8\x145\x04\x169s\xbc`K\x85\x06\x1e\x10M\x80\xa48\x03\xf1\xcb\nX\xbbUq\x88<\x89\x98\xa5\\\x98\x93\xc6ZO\xe0ss\xe8\xbfM\x92\xb7&\x03j\x1c&\xcf\x0d\xc4\xd6\x06\x02\xf7 .\xa7%\x9f\x0e\xa0\xd2P\xcaBL~^\xe4\xdf\x01\xff\xf9G\x88\xcb\x88\x913p\xe1D\x87\xac\x95:\xed\xb8\xd7\x1a\xa3Z\xbf\xa3\xc1\xb9\x04\xa8\xa9<\\\xf1\xab\xc7\xa0i\xaf\xa8\xdbW\xcc\x86\xa8\xc8\xc2\x10\"(\x94\x1c\x83:\xe1\xc5\xe2z\x8b\xa9\x96\x13X\x96~\xea\x81\xce0\x05P\x18G]8\x90\xef\x82\xbc\xaa\xac\x83\x8c\x88RQMN\x15T\x00\xa6qf\xddW\x03~!\xdd0\x94R%\xd5\x91\x9d0\xf0xvGFy/F\xf8-\x9b\xcfw\xfau\x82/\xf4\xee\x1f*\x7f\x01\xeb\x07+\x92\xc6 \\\xf5\xc28@\x886\xa2\xab\xc2\xfe\xa9\xb4\x8b\xbbc<\xb4\xe0\x0e\x1a\x16L\x02\x83;c\x8d[\x9a\x1e\xfb\xf7\x8a\x07\xdb\xd9\x07\xc2\xba\x0bnr \x9fQ\x15S'o\xa3Qu\x18\xf2T\xae{\x08q\xd2\x90\x90_\x9c\xcd\xbf\x8f\x83\x83S\x04d\xaa\xbbA\x04*\xc3\xb0:m\xf28Yuz2\xffPB\xab\x0d\xecHh`\x18l\xb4k\x92p\xe8\x13\xffLLh\ncEb6e\xdb\x8f\x00\xd2\x8b ?!\x84\x99\x10>|\x0b*=V\x8d\x01\xfc\xad\xbbK\xee@\x9e\xeb\x93rx\x910\xd3G`%\x08ryr[6\xea\x8aY3\x8c7\x8c\xc5\x0b\x87f\x17*\x97*\x18\x1bn\x95\xe0%9\x99\xdadf\xc3\xe81\xf31\xde\xa2\xda\x81^'\xcc\xfd]\x89\x9a\x96\x0dR\xcd\xdf\xe8q\x90\xdc\xd4.\xd8\xea\xf7\x00,\x9c\xf6\x95\x89\xd3^%\x84\xafl\x1a\xc2\xda\x11\xe2\x0d\xe3\xbde\x14\xd8\xd8#wW\x1e\xc2\xd1s\x12\xf356!\xd9=\xe2\xe5!q[\x04\xf6\xb0\xc4\xd8\x0c\xc3\x00%\xc8\xd4\xae\x18]\xf3\x8b5^:\x80\xbdm\xa85\xa9\x10\x10)\x01?\x11\x90V\x0bb|\x9au\xc07f\x93\x05\xba\x19\xecw\xb0\x9c\xe6\xc4\xf0,\x11:\xe7Y\x1ee\x0c\xe6\x86\x7fR\x07\x7f%\x94\n\xf2[\xc0\x9b\x8d\xee\x0d\x10\x1b\xc9o g\xe6F\xabA\x15z\xb5FP\xa5\x12\x9b\xf2\x1d\x9f\xc0\x10\x1d\xd4x\x9b\xb6\xee{\xcf\x16\x07\x07\xed\xf6d\x82x\xc3\xad\xd6w\x01\xc38\x8e\x98\x96\x08\xd9\x94{{\x03L> \xae\x82d\xac\x13\x812C\xe4\x00L\x91\xba\xa8\xe4L\x19\xde,\x9cL\xdc\xe8\x1e,\x84\xf2(\xe0mS\xec\xf8\x12\x9b\xc3\xd8\xe5\xe5$=\xd1|%\x9d\x15\x06\xd6\x9dlu\xe8& \xc4\x85\x94\x1a83\xe3\xda\nN\x0c\xa2X\xe5\x8ax\x0c\xeb\x0b\x04\\Vn\xf5\xcd\xef\x9eJ[)I\x9b\xea\x0ew\x8c\xc4/\xd1\xe9\xb9\xbb\xb2\xda\x0c\xb2\x7f\x18|\xa0G\xc5\xba\xd9\xd8Y\xd7\xc1\xf0\xf7DH\xb5\x98\xbd*\x9b\x12S\xfd\xaa\x8d\xee\xe8\xa4\xe6\x92\x1a\x1ap6\xae0\xd3c\x8fJ2\xe3@\xc5W\xb4%\xd1\xa6c\xef\xbc_\x1a^\xd3$\xd1\xe7#\x08*:G\xa7\xae\xe6\x186\x8e\xa1\xefn>\xcdD;\xfa\xf9\x02\xa5\x90~\xa4`9\xebhXB \xf1\x1dU\x8e\xabJ\x03B_\xd0\xb2\x88\x98\xf9\xcb\x88\xee%\xaa\xfd\x92\x01\xb4w\x17'\xb9$\xb3\xe8v|#T<\x0f6\x078\xe7\xaaKM\xb6\xcf\x91-\xd95U+\xbe\xb6\xa1'\xed\x06B\n\xcb\xc4\xaaN\x85\x91\xf8\x8ebJ\xec\xc3Ov'\xc8\xec|\x8b\x84+*M\x8c\xbck(\x0fd\x88\x0d}\x9b\x10C\xda\xcb\xb1@\xa0\xb3\xc1q\xb0\xb3\xb9\x04&\xf9aR%}\xb4\n\xcb!\xd6V\xd0\x83\x91s\x113w2\xa6\x8e\xd1a\xb22\xb2\xff\x8e\x17awH\xe6z\x10\xa1\x17/\x8d\x9dQ0\xf0\x02F\xa6 \xd9]~;\x03\x9e\xa5\xc3\xa4\x9d\x12\xcb\x15\xd6 ND\xf9P\nm\xfc\x98K3x\xde\x15\x8ake_\x99\xa3\x0d\xa0\xf1\xf3\xfeS\xaf!\x90\xa1V&=\xb2\x05\xb2\xaa\xb2\xbav\xe7\x19\x19_P\xc0\x8dL9\xd8\x83\x18Y\xa3\xa1i\xbf\n\xa7NU\xad\xd3_\xb6\xac\x11\xaf)\x96\x17\x83\x90J6\xdd\x7f9\x91f*\xca\xf0S \x10\xb1 \x11\x1d\x8f\x1e1\x007\xdaF|\xb8BR$\xc9\x15\xb7y,\x19\x0e\x04\xc6\xb1.\xbf\xc0\xd5&=uqs\xe1\x18\xf3\xa1OD\xc5\xf2B\x9b\x1f\xc8\xf4R\x84\x19=\xe7\xd9\xc9\xb3\x97e\xf8\xd8\xbd\xc9\x87\xe0\x1bB\xae\xac\x03\xe4\xac\xd6H\xad\xa1\xae\x13\x01\x88\n\x1d\xaa2lu\x03'\x9dh\x117^#\xffS\x85)\xa3Xi2..P\x00e\xa1/@F\xd5K\x13\xc9\x11\x91$\xe9](\x1a\xd8%\xe0|\x962\xc9\x1d\xf0\x13Y1\x10\x05pC\x9d\x07\xb18t\xc1\xf9I\xf8\xe311\x14N//+\\\xbb\x93p\xa5j\xd4\xc4\xef\xf4d\x80\x94\x18\xce\xe1\x18\x00W\xcam\x15\xe9\x9b\xc3I=\x04\xdf\xbd\x12\xb7\xd3\x08Y\x03\x93Zx\x1e\xa1\x01\xd4M\xd0\x89P\xfd8\xb2\x93\x011/\x0bJG\xab\xf5\xc4^U \x14,P\xc8d1O\xae\x14\xf3\x06^\xacy\xdfpq\x94\x13l\xbf\x81\x0e\xa32h\xdd\xd1$\xf8\x7fjv\xf1\xf8\x8b\xfe\xaa\x04\x1d\x01\xad\xcc\xf5\x9eI\x9a\xf6%\x87\x8a\xa8\xa7\xe1\xe8\xd4]V\xe6\x06\xce\x13\xd6\n.\x00'[+W\x12U8\x1b\xc1\xad[\x8e\xe5\x86\x01D\xea\x06\x92\xb3\xe8,\xdf\xbb\x05\xbe-\x0f=[\xc0\xe5\xd9\xc2\x06\x02\x0bO\n\nw\xff\x83\xa0\xbfE\xea)\xe53\xb1\xf8\xac\xe4\xf3\x0f\xbfJ&\x02\xa5d\xef\xd9\x82\xdd\xb6R\xc2\xa1\xac\x06\x97S\x96\\.\xb0 \x955J$I\x8d&\xf3\x91\x18\xddo\xaa\xb7\x1fH\xc8\xb3~\x9b l\x87\xbd\xcez>\x80\x1a\nUx\x08/\xf5H\xf1\xd0u;\x9a?Gt\xae{?\xe0\x0f\xb3;\xf3T\xa1\x8c\xaaH \xe4L\xfe|F\xfa8\xe1\x86\x03\xf0\xb2}\x88\xa1{\x0f\x18\xfa\x98\x1dp:\xdf2t\xec\xcd\x86a\xb7\xcb\xd1\xa7p6\xd1\xcf\xed\xd4\xfb\xb7\xb6\xf8\xe85Y\"L\xdaD\x93\x8c\xf9.\x8ar\xbc\xd8V\x0f\x93\xdf\x04\x91\x15\x14\x1a\xb2\x18\xdfS_\x0d\xba\xda\xcak\xfe\xcf]\xbbn&\xfaH\x8d\xb8\x0fz\xa7~\xa69\x91\xc3\xa6\n\xa6p\x0d\x15$\x1c\xa74\xd9\x82\x94\x8d'\xa2{\xba&\xa0\x01\xb8\xab\xff\xcbM\\\x9f\xce\xb0\xe9\xd1\x87\xa8\xae!\xecq\xaei\x08\x9d\xe0 \xcf\xc4(.h\x9a'\xcb\x0bB\xb1\x0bT\xad\xb2\x9f|\x0e\x1e\x9e{\x04\x06I\x846cL\x1c\xf2\xa1\x10.\xb9\x9e\x1e\xa3\x11\xeb\xb9\x8diI\xbe\xea\xab\xbf\\!\xe0\x1d;\x08\xa7\xe0g`1\xe2\x0c\x1d\xe1\xbe\x15\x95\x98j%\x10C \xb9o\x9d3*60\xf7\xb7E\x8c\x07\x98\xd8\x8e\x01\x9e]t\xe4\x89.\xd7-%0\x0dY\x1e\x89K\xc7_nft] \xb7\x17*VFC\x1f\xc6tJ\x92\xb0\xc4T\xd4\x12+\x16\xa4\\WZ\x908\xe1\xf2\xc4\xe2\xd7\x10g\x1bF\xd0\x04\xb2\xe0\x02\xd9^\n\xde\x9ef\x14\x9d\xb6 5\x9a\x12I=\xb6\xd7\xf9#6\xf6.@\xf52z\xd2\xc3;W\x9a`\xffB/\xc4\x99Q\x05\x05\xf8\x8a\x1ag\xed\xb0\xfc\x15h\x8a\x19jyJ\x9d\xd3\x19\xb0\x80N\xb2AX\xa43\xaa\xdd\x05,\xf5\xa4\x9b\x00\x0bK\xe46\xe8\x92\xeb6\x1b\x8f\xda\xb2\xd8M0\xaeT@\xa0\x8e\x85O{\xa1\xa3\x984k\x13j\x1f\xa3|\"\xacft\xd1\x9f\x94\xdb\x84U\xfc\x82\x18<\x08\x16-\xfc\x18\xdea\x86\x14\xae\xd5\x19\xe65b\x8f\xfa)\xed^R\xb1\xb08\x99\x84\xce\xc1\x9e:\x86\xa7il\x1e\x90\xbe\xad\xc0Ka\xc76@\xb5\xe6\x94\xc3!\xc0\xcd\x1a\xc3\x0f]\xa0buv\x0f\x1d\xce\x8f$ \x11\x87oU\xc7\xc2\x9c\x81~:.\x85\x10\x06\x14L\xfbt\x96\xe8\xeae\x97\xfc \xc7\x1b\x0f\x80\xa2J\xce\xbeP\nl$S\x13[z\x0e\x96\x8f~\x12Rq39\xe9\x92\xba\xf4\xb69\xebQ\x08\xef\x13\xcb/\x90m\"\x95%\xca\xa4\x89\xaf\x86\xbc7\x9f\x17\x03\x0e\x9a \xc3\xee\x1f5MKL\x9f\xe9\x91\xa7\"I\xdf\x8fG\x84 \xe8XT\xde\x8e\x1aXL\xbfF\xf0\xdd\xa7V\x0dj\x91p^\xa1/M\xc3g\xc1\xdb\xbb{\xb6\xb9\xbbw\xf4\n\xec*\xf8\xba\x80\x9e9\x0e\xff\xf0\x97\xcaO\xbe\xca\x88<\xcb\"a\x12\x1f\x0f\xf4\xc1\xfe\xf7A\xe1\xee\xef\x88q\xbf\xbb\xa2\x86.\x1fM\x7f\x97\x13\x002@m\xf6\xeb\x87p\xbf^\xda'\x93w\xdf\x95m\xf6\xc7k\x01xO8\x0b\xfc$[\xf3\xab&\x17\xc1\xfc|Y\x82Zy\xfd`2_|%\x11r\x97\x93/\xe5J?\xa1Q\xf9\xc3\x0eN\xcc\xb1l\x893\xc3\x9e\xce\xdfK\xa1\x14E$\x96w\xffv\x07\x13\x18C\xcb\x01h\x7f\xa3\xfb\x0ba@\xf7U\x801\xa9M\xf8\xbe%\x02\x160?1*\xa5\x0b\x96$G\xd3Z\xd3{!|\xc7\xca\xbf\x18\x11\x04\x10\x1a\xc0$\xc3\xdf\xdb\x95\x10\xc2-\x1b\xf2\xd9\xaaEv;\x90\x8b\xcd\x93:\xe4\xbd\x15\x8b\x9f\x1a\x06`Bl\xcf\xcb\xb8\xa0\xec\x8c\xa7\xc7\x07\xc9\xac\x9boQ\xdd0&\x8b\x02\x95\x04\xfb\xfe\xf1,\x86F?\xa1\xfd\xc6\xe4\x84^\x9ds,\x87c\x93\x99\xc1h\xcb\x95\xff$\xc3E\x13\xfccl0\xe5\x13\xbaw`\xfd\xe2\x8f\xba\xb9\xc5\x88\xa9@/\x80r\x07^\x16l\x988cT\xb73\x99\x9d\xdck@\x9b\xdc\xfaJ\xc2\xdd\x94\xb4uP\xd0\x18\x05&\xca\xaaN\xc9\xf3d\xf9Jj\x14T\x90K\xb8\xaai \xb7\xe9*u\xa7\xbb\xe9X\x96{t\x93j~\xbd\xc9\xa1}\xf9\xf1\x12\x0fi\\B\xc8K\x1den\xe2\x8b\xc8\xb5|N\xeb\xd6\xea\x0b\x9au\x9d\x92\xef#\xc3]@\x1fl\xf5CZ$iP\xe6a\xbd\xe3\xb8\xa9t04y20\xfc\x0ds\x11\xd8\xd6\xaa\xe2,Au\xc6!Q\x07\xd2\xd8B\xe4\xcf\x96\xf6\x96^\xec\x88@Vs\xc9\x91\x80\x82\\\xdeZ\xdfa\xe37\xa9\xec\xbe\x89\x04\xa9\x14\xb8\xe2\xd1\x88\xb3\xbb6-T\xefr\xb1\xc0\x17\xe4U\x98\x8f\x0b\xe0u\x18\x93\x8c~\xeb\xb01H\xc2J\xa8(<\xce\xb1\x8e\x82\xf2\xbd\x90\xd3\x1d\x0e\xb3bR\xd4\x96\xb8qi\x0b\xcf\xe9\xe6\xa2\x10\xacJ?\x1f\xede\xffG\x00\xc1\x0d\xa7*jV\xc4\xa7\x0c\"\xe1\xd8\xe3\x86:\x0eY);\x8f-F\xe5\x06\x05d\xf4\x17!\xf0H\xa3\xbb\xd9G~\xb4\x95\x1fu\xa6x cb\x8f6m\x95\x16\xbc\xf8)&;\xf10\x1d\x87\xcfdU?\x878\xc5X~\xef\x9f1\xd12\x8f\x9a\x17\xdb\xbc\xa8t\x80\x88I\x00\xd8x\xe85\x03\xc4\xc0{\x03(\xebz\x84\xea\n\xdc'\xbf\xfb\xff[\xa0\xc5\x83k\x8e\x08\xe8Z\xd0\x85\xd8\x12\xec\xe9\xcdi,\xf0\xc5b\x991\xcc\x87\x8b\xa2\xcd\xe1`\xba\xb1(\xf4\xaam\x8fH\xe1\x81N\xf0\xec\xfce\x89K\xa4\xb0\x10\xdf\xd4/\n[\xe0\xb4(\xf5\xf6#Q\xf4\x97G\x06d\xcau\x1a\xceT\xa9\xbd\x7f^\xf8m\xb3\xa2\xbf%\xf9\x86\x8c\xc5!(\x987K\x1cg\x03\xe9\x85P=\xe8\x13h\x0e\xf8\x1f\xcf\x95\xe1k\xc9\x90U+\x8c\xc2\xc6.[\xd2e\x05\xaf\x8b\xcf\xd0\x03\xbc\xb3C\xc1\x8f\x9e\x8f\xff\xe5\"GD\xff\xce\xa8\xa3\x9b<*<\xd6\xcf\xfe\xe9h\x0b\xab)\x18\xa4` A\x98U\x0c@O]h\x14\xfdl\xc5f2\x94\x85!H\x9e\xe7\xcfF#\x00QB\xc2\xe9=u\xc8\xbe9f\xb4h\x07\x80\xf3;\"R\x84\xa5\xda\x00\x92\x01K\xdc3-\x82(\x08G )\x05\xbc\x19P\xb1\xe1\xf8\xa1\xa8\x9e\xa6\xb2\x10T],7\xc1ec\xde\n\xeb\x1d F4hH\x0c\xd3s\xb3\x1473\xe1\x96\x9f\xa0\x12\xa1\xb2\x16\x0b\xe2\xdb`\xe0\xbaR\x96\xa1T\xedwf\xcd\xb3;6B\xf3>\xc5\x98\x0d9&\x15\x06\xc8\x1b\x05\xe2\xdb\x06\xd1\xda\xdc\x82\x02\x05\xce\x08?\x92\x97\xf7\xf8)\xc0\\\xa8\x8c\x80\x81<&O\xcc\x86\x99\x1c\xf05 \x11L\xc3Ju\xa5@Y\x9e\xdd\xdd,\x1d\x90\xeb\x83\xb2\xda\xbe\xdb\x04_w\x07\xc20\x98\x03^\xe917\xf1\xf0\x08\xf0\xf6\x11\xecp\xde\xbb\x87*>D\x948\xe3\x9f\x11\x98\xfc\xb0_)$U\xc5\xba\xf3\xca\x15R\xb4!j\x1aOF\xf4\xd6>{\x88\xd0\x1f\x92\x91\x0b\xbbt,\xaf-\x1c\x85bP\xb5,m`D\"/\x9fz\xf0A\xe2\x0d\xcd\x94\x02\xd8\xa5\xea\xdfQZG\xd5&U]\x90xej\x04x\xe6\x13\xe5Lwv\xf5~\x10\xb2\x9c=)@\xd8B\x1b\xaf\xd6\x086\xca?!;5\x143/ps@\x02t\x83OZS7\x10\x03\xa9\x94\xa7\xd8\x99\x14\xae\x92n\x9f\xd8lx\xe8\xfb\xee\xffZ?\x12\xe1\x83Z\xf9\x97j\x03\x0da\x0e\xb2\x96\x15{\xfd\xfb6\x97\xa5\xff\x19L4\x9b\x8a\xab1\x9f\x03\x7f\x0b2\xb9\xb4\xf9\x0e\xaf\x8bQ\x87i\x18\xdc\x19\xfd&\xd6\xa5l\x83\x88\xe1\xbd\xc5\xec]o=\x03\x1f\xa07\x1d\xc4\xc4\xbc of\xfc\xd0\x96\xfcr\x0bMEV@\x83\xe2\x0fH\xf5\x03\xa6\xe8\xf2\xfb/\xe8\xfda\x08D\xa5\xd9\xa6\xebH\x95\x83\xae\xaelK5)\xc2\x8cZ OE\xb4\x9c\x98\x1f\x84\x053\x10\xc1\xa6I\x01G\xa9'\xd0\xb3\x0f;\xeeD'\xb6zl(\x91\xa0\xf7\x9dE\x9f\xcf\xd1$\x9d\x8a.\xd9\x9c\xf0-W\x0c\x07R'\\w+)\xc2w3\xe6\xea\xba\xbe\xf9\xb8 @\xdb%R\xd4\x14)\xff.\xf2\x07~\x8a\x14\x059\x05;]\xf4\x14.\xc2\x9ag+)\xd8\x03%\xc8\x9d\xf6k\xd5\xce\xc0\xd2\x89\xb3\xa8^\xd2\x1f\xf6N\x80\x06W\xb7>b1z:\x1ds\x86\xa8oD\nK\x8d\xf6\xb2\xba\xc22w[|>9\xe2\xaevWMF\xe2\xafu\xde`\xb9\x1c\xeb\x13\xcdax\x87ch\xd5\xab\xf5U\xb7\x14\x81`*\xca\x86e\xfb]O\xa0V'6\xfd\x87\xc0\x03\x83x\xd4d\x1e?\xbcH]_r\xf5A\xbb\xa3+z\x11\xc0d\xadF\x9b\xafH \xc4\xca\x8b<\xa4\x1e\x01\x85\xc7\xb4\x1d\xa3\x04\xd5\x02\xc6\x04\x17kUsFz\xc0\xba\xf2\xcfaH\xd6\xc79\x07-\xb3\x9c\x90\x00\x98\x83gv\x82b\x07\x8b\x1f=\xd3\xebL/\x11\x9eE\xbb)\x10\xb0\xc4.\x98\xbdx9j\x13%B\xe3)\xa0$\x97\x8c\xe9A\xcbB\xa2\x8f\xe6\xec \x83\xdb\x1a\x9f\x01t b.b\xd2\x02A\x10E\xa8\x9eZ\x8dRb\xf6H(\x91\xa3\xf5J\xffya\xe3\x88\x1e\xd29Wj0\x05f\xa4\x1b\xdfF'\x9b\xb0X\xe0z\xbe\xc3\x0b\xef$DQ\xad6\xb4\xeb\x9eq\x83\xd8` o\xca\xd0 i\x14=\x81\xe1\xc1{#4\x14\xa4\x13\xa9FYH\xf9@\xd8J\xd03\x0d3i~\x91tY\xd0\xa2\xd9hk\x05H\x87P\xcf\xf1\xd7\x0f\x17\xf7\xfd17\xd3\xe0\xe0\x04\xa5\x86Y\xdcD\x97\"\xdcp\x97\xc4\xa6;'\xb31\x026\x80\xc6f\x97pu\x18\x02\x8b\x17\xa0\xf4\xbe\x04>\x11\xfeF\x9boD\xc5Qi\xa8n\xe1\xcc\x92\x8d-\xcf\xe7\x0c@P#\xe4\x9a\x7f\xab\x87 \x12\xa6h\x93j \xde\x82\x06\x88\xc5\x80f\xb4\xb6C\x0e\x18\x96 \x96\x81\x837\xc2\xb0\x01\x94\x17T5HVX\xc9p\xed\xf6\xc9\x02k\x12l\xc4\xad\x8c\xb3\xae]\x99\xdayX\x06r\xc6)?\xcd\xba\xd3BNJ\x9a\x0cB\x83\xf7\xf8\xe4\xbd#\xcb\x07\x9b9e\x18\x14\x94\x00&&\xbb_0\x95\xd3\x1f=\xae\x9a\x7f\x7fp\x15Z\x14\x8f\xc96\xa7\xaah\xa4\xd9)\x03\xf0\x0d\xcc\x97\x83a b\xc0\x8c\x15\xed=(p)\x87\xe2\xd9\xe5\xed\xac;\x8a.N\x10\x95,\x93\xc3W\xed^\x0c*\x03h\xd4\xba\x8aC\x97\xeem}E\x997\x00i\x10\xf5\xe0\x876\xc1\x82\x06\x02\xc3\xf7a\xcbI\xedv\xcd\xb2xp\x83*A\x7fc\x14#4\x82\xff\x17\xfd\xfb\x8c\xb3\xc7\x08\xa0N\xf6&\xf0`)\xae\xc4\x89\x88\xa3H\xa3We\x9b\x83y\x127jl\xa1\xef\x08\xb0\x00o\xdd\x13Eh_n3 \xad \x8bjp?\xaa4\xe8p2W\xc0E\x05\x1d'kT_\xe3\x0d&\xa3\xb0!\xc8\x96jVl\x98H\xed\xd3\xbb_k\xc9\x9a\xe1\xc8\xe2\xf6\xca\xb3\xf9\x0faY\xf9\x8c\xa0\xa1\x1b s\xce@\xe4[\x85\x04G\"\xca\xd3bY\xd5L\xdb\xdc\xabX\xa6\xaai\x8f\x0d\xd5C\x90\x98\x11q8\x01\xf1&\xfaz\x90\x90VaY{\xe8\xc6\x0e#I@\xb5\xa7\x7f\x1f\x9d\xad2\x98\x8em\x06\xf3!\xf4d\x01\x8c[1 \x85A\x10\xe0\xc6\xa2\x9a\x07\xffnK\xe8\xf0\xf2\xd5\x06\xc6e\xd7\x9d\xb2\xe5/>\x1d\x10\xdfd\x83\x02m\x86uX:x\xca\xb7\\\x84\xe2p\xf2N\x91\xfb\xfd\xa9\x1el\x9d+\x83H+c\xc7tS\xc7\xb6\x82\xe6\x1f\x14C\xc0\xb1[\xed\xe0~3\x8c\xcde\xc0}6\xb0 \x8d\\\xb3,\xce\xf1\xc9\x84\x98\xd7|\xa4Y\xf2\x04\x11\xdd\xa7\xe7\xf8\x98\xa7v]\xac'\xfb|\xbf\x9e\x9f\xeb\x08&\xed\x96\xecM\xf42\xb0 d\x10\xf5\xac\x00\x1bds\xe0x\x03-((76\x94\xa9a\x05\x03X\x9b\xbdm=\xd5\xee\xd3\x8a\xbf\xe3Q\x88\x97<$\x02\xaa\xaa\x00\x80\xfc\xf3Q\x05\x1e\xc2\x86\x98\x84\xba\\\x90\x02\x93\n\xd4\x0c\xaaqi\xe9H\xe9\x98\x87\xac\x8b\x89i\x06'i\xa4\x94$\x8f\"\xa3{S*V\x8fwF\x8b\x93/\xb0t<\xec\xca\xc1\xe1Q`\xca\x92Z\x16\xb8\x9a\x1e+\xf0pr)\x9b(\xb9.\x13j\xb5\xe9\xb8\xabI\xfck\x035\xf5 <\x04\xe4\xb4\x12\xca\x86\xb1\xcb\xae\xc9\xd6, k\x1bO\x91\x87\x9cDT\x88\xcaJ&^\x0e7\xba\xa3\x07\x87\xc4\xaaQ\x8d\xfe\x10\xa4\xcb\xcfv\xdfe\x10\n&\x9eZ\x90\xd8\x08\x92\x0d^4\xfa\xc6^\x06s\xb0\xf1D+`WH\xb5\x99\xae\x11\x81b\xec6\xd9\xc4\x05\x14\x05\xa9\x98 \x08\xb8\xae\xc8L\x9d\x98W{ZZ \xa6@\xb0\xe4mq\xe1\xfbv\xa6\xc9\xb7(\x04D\xc1\\+\xd4l\xe5\xe9\x14\xfb\x140*\xa5V\xa5\xdf\x87\xb0Vm\xd5\x1f\x8a\xa7\xe6h\xc6\x8f\xe6\xd8/S`|\xb3^\\<-\x99\x84\xa9\xc36\xeb\xb82\xa9N3\x9d\x82\"\x07\x0dT\x12o\x9f\xf3\x06\x04\x8clr\xe4\xc5e\xa0\x82\x12!\xf5\x04\xc5H2\x0b\x8b\x15p\x83A \xd6\x9b\xa8\xa0\x18\x87\x02\xde\xc3\xcf\x02{\x9b\xc8\xbc\xfe/\xa3\x1e\xf2\x9f\xd1\x00\xe7\x9cudU2*2\xf2\x10\"c\xab\xcc\"p\x85${\xa9\x80y\x81,\x0b\xe9\xa5\x8b\xf6\x11\x11\x1c&\\\xe0m\xbe&\xba`\x1c\xd0|x \xa6p\x85\x88C\xaa\xd2w#\xc2\xc9\xfbW\x909D\xadI\x07i\xf1\xd1\x96C\x88\x8f\x11\x9b\x02Ks\xef\x96\xe7\x87\x9d\x1dS\xb6\x07\x93\xe33\xef\x0c,\xb6\x0e\x91\x15\x96\xfeM\x9b\x92;\x1dj\xa8\xfeB\xeb\xa74\x9a\x9bP\x9b2\x02\xd9\xd9i\xb5\xee\xeff\xb6\xae\x00\xc9\xbf\xed\x17\xc1bA\xad]a\xa2id\xed\xc2\xf0\x10\x8a\xad\xa8\x0f\x86\"\xc4\xf2\xd7\x13i!\x07aQh\xd4CNO\xbd\xb1\x9e\x8a\xef\x82\x1a\xf5Y\xed\n\x93x\x06\x12F$\xc4\xf8g\x979\x16\x14\x9e\xa5\x10\x91Z`W\xab\x1f\xb0\x85VB\xe2g\xb8\xb1\xa0\xec\xda#j\\\xcb\x82\x97\xa8\x80e\xf9G\xf1\xfb\x1c\x0b[\x13\xb3.\x0f\x1d\xe0\xbe]\x8b\xaa0\x13\x14\xba~X{2\x9bD\xa9\x84?\x0b\x8a\xf8\"\xf3\x8a3\xc4B\x07\xe1j\x14,\xc0\x10K~\x0f\xc6\x06\x00b#\x840\x02\xac\xc9\x92L\x98\x01kc\xcd(6 \xb8\x0c\x02\n\xb1a\xfc\x04E\x1a7\xce\xbb\x81/\xd5\x8e\xaf%\x00\xad \xfc\xb1\x03\x9c\x0f\xb7\xef \xe0\xc9\xc4\xa1R\xeb^J\x14\xfb\x1b\xeb\xc5\x12\x04C\xcf\x8fZ+71X\xdb\x1a\x13\xb4\xd0UO,\xc1\x84\xf6\xe1\x14}\x0f#\x90-\x94\x90e\xd9\xa4\xa04\xf63\xc5\x82\xf0\xedt\xc3\x048\xa7\x99Z\x047\x08\x86\xcei\xe0\xac\xca<:i\xd1?Ft\x8bFk\x96\x7fC\x0cW'\x98\xea\x01f0\x01i\x02\x1b<\xe2Xdj\x97\xf9\x10\x8c\x940\xdb\x12W#i\x13\x8b\x0e\x8b\x9a\x96e\x08C\xcf\nzI7\x88\xce\x0eB\xb0\x03s\x18\xb7\xac\x98.K\x83\x0e \x12 *\x05\xeb\x8d\xddV\x91\xb0\x9d\xab\x12d\xff\x87\x9fD\x81lj\xf6@\xec\xab\xef%\n\xa9\xdc\x88\x9d\xe5\x9d\x0d\x15\xceZ\x88\xc4s\x18\xdd\xef\xae\x90sh\xcc\xb8%\x87^\xdf\n\xfd\xe8\xf8\xcd\xf7@\x188\x86\xf2\xa7\xa4\xf8\xce?\x0b\xa0N\xba8g\x17\xd4G\xf8gr\xa8X\xb0\x14\x17\x80S\x83\xbb\xa0\xfc\x06\xe4\x1b\x95\x0dAp\x0c\x91\x07\xb3\xba4\x87z*\xf6\xb94\xe1\x81\xdd\xe0\xa7,\xc3\xad\xb9\xc4t4G\xd9n\xd4\xe8\xf7\x91\xbc\x15\xf5dS\x0f\xe5\x13>f\xee\x94Q\x96C\xf2\x10\xcf\x1eWUZ{S\x08\xd9;N\x8f\x08x\xe5\xbd\xca}\xb0\xc7H&\xba\xbc*\x0b\xad\x009\xd7\xb8\xafq\x13\x8b\x9aU1 \xca\xf3\x0ca\xa0`(\x11\x7fM-a\xc8G}\xd5n\xb6\xcc\xbd\xe8\xa2\xda0 \x96\xbcp\xca\xd4\xc7mcn\xf1\x8e\x0d\xf2\xf0\x82\xc9\x98\xc0_\xfa\\\xb1\xfc\x17l\xbc\xde\xdb\xe0\xfe\xbb}\xce\x18\x1a \xc8\x00\x0b\xde9\xf7F\xe1\x1cv\x9eH\x7f\xc3\xbek\x83JZ\xfcNO \xe5\xb4m\x08Z\x81\xe1\xc1Q\x9e\xed\xd2\xa4 aS\xfb\xebf\xfa\x16\x81\n)\x11QC+2\x16\nd\x92\x98\xa1[\xaf\xfd\xbb \xcc\xef\x03\xc1\xf5H\"t*\xde\x0d\xc1c*b\xcf\xc6\xda\xa2\x01\xf7\xafq\xb0\xa8,\x05\xb5\x99\xf3\xe3\x13#\x01S\x98\x9f#\xa2\xe4u\x9b\x14\x0f'\xd2\xac\xf5:4\xa9\x1das\xa5\x03\xa9\xbeCDM\x90F\xa7\x13|\xc9\xb8m\xa9_\xc31L]\xf6\xe1Y\x98\x17\xca\\\xc0\xa7\xfd*\xa4X\x9c\x8a>t\xfa\x96\xaf\xb8\xcc\x16g\x8b\xa7\xd8D\x1c\x89\x9f\xa3\x8a\xf8\xe8\x0f\x04d@&[\xb0\x19)8\xc3\x18\xce;<\x9c{\xf3\xca8<\x11\x96\xc8+VG\\\xb0H\x98\xae\xa6\x1d^\x8d\x04\xafa\x97\x8ba\x90e\xdd-4\x94\xfas\xdaJ\xd4A \x1b\\\xe0\x0c\x90hM[\x82\\`\x14\xf0\x12\x93\xbc#\xbfpD5Z97g;\xb2\xf7BW\xe2m\xce\xe4\x89qTXX\x91%0\xb9\x00v\xba\x9e\xe3\x86\xf9&\xf9\xb7]E\x0b\xb9\x17\xdb4]\xfbF\x10\x9fIJ\xe4\xa4\xf9\x96\x9c\x84&\xddS\x8e_\x12\xa6\xee\x834\xc8R\x890\xa0\x02\xaf\xa5D\x05\xfc+\xcbme\xb9\x04\xd4\x05\xa8Y \x83g\xf7\xd0O\xf8\xf1+M{\x940\x06\x0f3\xcfv\x00'\xcd\x85f\x85\xc1\xc5t\xaf\xe1\x00\xe8\xa0:;\xf4\xd8\xb1\xca N\xf4\x07\xa6n\xe0\\\xc7\x94^\xdc,\x0e)1\xdel\xe3\xe1\x92aB\xef\x03ZZ\xda\x01\x84[\x95\xe0 \x04\xb8\x9d \xfbZS\xd2\xd2\xe4\xbcUYh\xdc\xdf\x86\xcf\xcaw\x7f\x80\xf5\x8b\x08\x9aS\xb8\\\xae/\xa4*?zQ\xd0\x8b\xff`\x90X4\xef\x1cg\xb9r\xed\xdb[\xa7\x08\x8aCW\xe6\xdbG\x16\xfb\x14.\xa7Y\x84\xec0Q|\xfbR\xd4\x83\x82E\x8d[w\xbe\xa6\xee\x84y\x83)\xb8\x10\xe1\xef\x91\x1a,\xd1\x88\xef$\xcbNK@c\x1d/b\n-#Z\xafI\x0d\x1b\xb9G$\xc6\x97\xaf\x99\x8btm\xe7\xcaH#\xea\xf0\x7f)X\xa3wP\x11ZAD\x7f|\xa2S\x0do\xfdf\x90\x8a\xf5T\xa4\x80\xddH\xda\xeb)\xb8\xce\xe4\xd3\xe7\xc7>\xaaM1\xa0b\x0d7\x1e\x03\xe1\x0e\xb0\x85\xc9\x86S\x8bu\xd0\xc3q\xd7\n\xf6\xb7\xf6jK4[s\xf0\x08\x84\x87\x10 \x1e\x95\x97\xd7x\x02L\x0c \x9b\xd6\x13\xbc\xc7\xa2\xa9\xeb]5\x0b\xfa!M!\x1fA\xbed\xc6\xa7N\xa0\xcb><\xab:\xc7\xbbZ(\xb08\x90\x86\xf8\x9d)e\x85\x0b\x84\xdf\x0d \xbb\xa5\x99\x86/\x99W\x15\xd8\xc8\xc0|\x0d\xfd\xb0b\xaa\x19\xd8\xe9\x88\xfa<\xe9\xf7\x9c\x83\xaeT?%\xc3 \x03\xb2:@\xb1\xe4\xd4,-\xe0\xf8\x80ecMP\xf08u\xa4m\xb0V\xd0g\x8d\x8c9\x1cH\xe9\xf66\xae\x1c\x12\xcb\x0f\xe7\x8b}\x0f\xbe=\x03\xb35\x00\x83\x81\x97Ab\xd2\xc4\x8e\xe6\xfd\xcf\xec\xb0\x81\xac\xce\x99\xc0V:\x92\x9d\x85_\xa0\x1cle\x1d\xc9\xb9\xdf\n\x11\xcf\xca\xee\x96\x95\x16v\x17\x0b\xfd`\xcd\x110\x8e\xe4!$`G\xc1\xe9A\"I;$\xdf^?\xfa\x81\x02\xae\x8a\x13\xed\x89Ke \x05O\xa2\x15 \xcd\xf7\xb3N\x0f(\xd5\xbd\xe7\xf6\x93Yy\xca5B\xe7\x1a\xa1w\xb8\xd0V\xb9%\x88ju;)\x16lF\xb5oa\xe5\xec\xcb\x15\x9b7\x8f\xf3x\xe9\xff\x92\xda\xb8\xd84-\x89\xc9%\xeb\x9d \x05\x86\xf0$\xcf\xd6\xb9/zsk\xc7\x98(sh>\xbb\xc1DD\xa9\xc5\x83\xc9t\xa5T\xc4\x117\xf6rur\x1a\xc0\x16\x9c\xb80\xc9\x1f\xd2\xa2\xa0`\x02\xdc\xb4h5\x0d\x025\x90\x8c\xa6\xc9\xe4\xb6\x00\x93S\xe1}\xb8\xd1\xdd\xc8\x15\x06\xff\xd24\x1bhrva\x12\xbc\x03\xe9l\xe9c!ZjB]\x81\xb9\x8e\xa6\xa9\x1a\xce\x7fx\xe2D\xaf\x18\xb6\xffb\x96Tx\x8fzYS\x82\x1f\xdf6_\xf6)\x83\xcao\xb0\xd4p\x08>\x98#\x8f@P\xa2S\xd3*\xfdb\xdcS\\q\x0b\xc6\x8bx\xf1YfQ><\"\xf3\x7f\xb7\xb2\xe3\xa2\x0dY\x1c6\x91\xc5\xf2I\x1fE\x0e\x00r_7\xf1\x88\xd2\xb0\x0cV\xd4H\xc3!\x00\xb3\xc5I\xe7r\x8cEL\xe7\x8e6\x19\x9e!N\xbb\x00\x12\xf6q\"'\x92\x1cd\xa0\x93\x01a\xfeqMv\xba\xc5\x0f\x8bA\x1f\x82%\xed\xf5\xba\xba\xbe \xf1v\xb3\xed\x15\xbd\x8e\x0bn\x1f\xf0.;\x1f\x9a\x1f\xebA/\xcf\x07\xe7\xb0\xf42\xca\xb2\x89\x9ca8D$\x17\xff\x03GWv\x85#\xcc\x8f\xfb\x0d9\xaek\xc5\xc5'\x06\x01\xfc\x89\xcbo\xd8\x9f\x9co\x80@\xe2\xfd\xcc (]gk\xed+}/\x03 (nq\x16\x87\xba\xecK(f\xa2\xcd\xd6\x0b\xdd\xc6\x9f\xd0\xb8\x9ep\x1d\xfc\xf8\xcc2\xd4\x1f\xc83Y\xb0\xe3\xdd\xe9w\x00\xb2pD\xfedG\xb4q2$\xcc\xc9}\x91K\x13\xd3\xafA\xad\"\xf6E&N\x82tg\x19'N\x07e\xfds\xd5\xf3\x1a!\xd0\xae\xf04q\xec\x9c\x17o\x14}\xec\xbf\x9d\xa5S\xb5\x05\xb5\xeb\x1e,o\xd5jr/s\x0c\x9cT\x0e\xfe\x04MT\x97&\xf6\xd0\x1b\xf0Qf\\12\xa1h'&ctN\xa6\xfa'T\xc5x7\xbc\x81]2\xfb ;G\xcd\x0e\x02 \xca\x85\xeb\xa2\xe3\x17|T\xaa\x01++:%/ \x86\xe8\xa6\xfb\x8a\xb3\xff1T\xc2\x0b\x01\x91\xc5\xfa\xcf\x1a\x93\x0c\x1a\xf3\xcb\x80\x9f<\xd4\xf1\xcc4\xc2\x0e\xd3\xd4\xf9\x8a\x0e\xc0\xcd\x94\xd7\xff\x93\xcb\x97 \xc9,0\x0f~\xe1\xf2\x9d!\xa1W\x8bO\xa9\x1d\xe0'\x89 \xe1\xcd:s\xf1u\x01\xc8\xc6\xca\xd2\xa6\xd9\x86\xf9\x8f\x03(\xb4^\xef\xae\x8e\x18\x0c\xb5\xa5\x9c\xc2)\x98\xf87\xe8\xd8\x00f\xc1\x80\xd1ml\xa5\xf2\xd2\xb9\xee1\xc5\xabt\x19\xdc\x1d\xd2Z\x83\x04\xe8\x08h\xc0\x0d\xcaL0\x0c\xa7\xa3\xb7\x96\x1a6\xd2X\"J\xd2\x82\xed\n\x8c\x884\xa79\xd8\xd1 \x9d\xd6\xa9B\xe9\x1d}\x83\xde\xd4\xad`\xe8`\x91\xa5\xae\xf0\x84\xd3\x92\x7f\xe7 #\xa6J\xde\xefn\xe9\xe4\xf4\xd1_\x91F\xad H|\x9a\xa1$O\xc8K\xce\xfa=\xa1\xc5\x93\x1fi\x191\xf7\xa1\xa67\x8c\x94o-\x7fH\xcbq\x1d\xa1\xaa\xfbp[\xc9\xab%%:\x10\x04\x88\xe4\x80\xc9\x88\x15\x10i3\x1d\xdb\xa0\xfa\x84G C\x97\x00LL\x894\x8aS\xd0:\x9edB\xf2j|\x89\x88pY\xd3\xf6S\xfeD\x90P\x1d>\xb6p\xd3v\x00\xde\x04\xb25KLe\xe8{t0\xae\xf2\x91y\xc7END$\xe0*\x9d;z\x1c\xbb5\x9a\x8aN\x92\x02\xe1BI\xf3\xd9gn\x8c\x80\x7f.N\xc9|\xd7\xb6\xe0\xd1n\xd0\x05\x8d\x94R\xc8aS\xa4Z\xd7\xc2JcH\xb2\x10 m\xcd\xd1X\xf8\x06\xdc\xca\xdf\x07e\x90\x12k;_\x0c6\xc8,y\x1e\x01\xca\xc2b\x94\x8d0#\xa6Z\x84\xb8A\x14\x0d\x17\x06e|w\x82\xd4\xccG\x0dU\xbd1l\xb8\xcbLD\xd87\xc3\x84V\xa3q\x92\xddt[\xadxu\xddE\x94QUL\x88\x01\xef\xf0PB\x9dlZSh\x96\x92.\xe1\xe91Q0U\xec\xd9\xb18R\x84\x06i\x8d\xfap;\xa6\xf1{\xf3\xf4H#\x96G\x03ON\x02!?\xeb\xa3\x05\xe8t>\xa9Q \x08|p\xcak\xa4\x90\xdb\xf3\xa8q!\xe7gT,\xf6\xd5j\xc7\xd02\x04\xc3\xc8\x16s\xc7\x8d4\xed\x88\x8at\x1b\x94j\xe4\x0e\xb7n\xc6\x9b/\x08I\xc9O\x98E\x02!\xcb\x8bnF\xf5\x9b4\x12\xa8\x86\xb7\x88M\x0f&\x1e\xd41\x84\x1b\x92\x97\xbe\x85x\xd3$\xa7ew+v\x99S\xf0\xcb\n\x0e\x0cbm\x05]e%8\x0c\xb2\xe4\x90\x06P\xcc\xc2\x11\n!\xfa\xef\xb3\x11\x90\x8cs\xc2\x11\xf3_0\x066\xa3\x1b\xf2)\xc2\x8fQ\xb42JB\x86\xea\xd8\xfd\x01\x0c\x84[t9\xae\x96\x83'\x94\xa7\xb3\xd4\x9c,\xa7\xa2\xcc\xf4[\xbdf\x00\xc3\x86\xd7\x92\xb6]\x98\x1e\xc2B\x8aB\x1f\xc3@\x9a\xa6\xeer&B\x84s|\x95Q\x9a\x0d\xb0\xa7\x99\xd7g\xa8\xedOC\x88\x9e1\x87\xdcJ D\xe7<\xcc\xcf\xe2U\xff\x87\xb2\xce\xbc\xd3\x11(o\xa9!\xb3\x81h\xa6\x90\xdcK\xbdH\xfc\xea 0q\x9b\x88\x92\xa7A\xd1V\x88\x07\xbc'p\xb4f\xccy\"Q\nO\x04\x85\xdb\x0c2\xc7\x06Z\xbb\xbe\x9fq\xc2\xe0\xbd\x02#d\"\x9b@bQ\xbb\x06,\xae\x93\xc2\x0bw)\xeeP\xcd\\\x1db`x\x1f\x8a\xdf\x00O\xfe)\xde\xa2\x0cd\xbc\x13MC\x80$[Ho\xa4\x13\x90\x18W\xde\xa6\xd1va4{\xe4\xc7\xb1`52\xed\x0f\xfd\xba\x87\x06\xb3\x1e5;\x82\x19\x85X\xb0\xffao\x00K\x86;\x986\x93%\xc1R(\xc0\x82\x05\x8c\xd3\xc6\xd1\x85x9\x8a8\x0c\xc02r\xe3D\x1ec\xf7\x10\xa5@\xd9\x88\x8c\xe6\xbe\xc9\x10\xa4\xeeF\xd7<\x86d\x0f\x16\x07(\xc8AN#F\x81I\xb7\x9e\x9bz\x1cm\x1dE\x04\xfe\x89\xbbF=\xa9\xb1\x85\xc6\x9a\xad\xe5\x95S\x82\x80f\n4\xca8\xa7<'\xb4\x84\xedj\x90\xea\x8e\xf4-\xaa\x98\xda'\x08\xc7\x98<\xd2Tb\xf12\xddv\x80E\xc0t\xb8\xbfq\xa1\xd23qODd_\xed\xd0{`/\x9c\x0fhh\xea\x82\xf6\xcc`\xc2\x929_\xfc1hAY|\x19/\xf9\xeb\x0b\xab\xde\xb7U\xea\x08-\xcd\x95\xba\xd0\xc4A\xde\x0f\x94\xd5o(\xf1\xcb\xea\"\x93$r\xd8\x86T\xcc\xd7PR;\x15\xa7\x05.\xb8-w>&LJ\xf8i\x10\x1aC`\x05A\xa3^\x1b\xb1\x97\xd3#\x89\x80\xc4X\x178\x97t\x97\xf6\xe2H?\x80\x14\x08d\xc1\xbfa\xc3\xc4\x96TST\xda\x03a\x1e\xa8H\x9f0@\xd2\xee\x8a\x17\xf0\xf3U)\x88\xa3\xe6\xef^\x1be}Jb7\x17%\xd7\xdc\x94%\x0e:\x9b\xd3\xc6\xbf@\x00\x97\xafM\x8e\x07+\xf1\xbb\x17y\x94sq\x8e\x00\xaa\xa1\xebL\xcc\xd2\xfd\xf8\xbf\xe1\x0b\xca\x12Y\xaa\x19\x1a0\x0f\x140\xc3\x94\xf7G\xfcD\xa1 \x16>\xc4\xa9\xeaAW\x0b\x11\x88\xb6\xf02\x8dI\xdb:\xc4\xd9F \x9e\xc8\x08\x02\xc7\x03\x9a3\xe3\x9f2\xca\xa0\xedq\x17\x80\xf7\xc0\x94:6S\x95\x97\x8e\xd1]K\xce\xcf\"\xa0\xa5\x08\xaeg[\xa6\x9d \xe5\xcf\x91H\x93\x1b\x1d\x1e\x98\xe2\x03B\xac5\xc8V\x06\x1cEq\xdbLJ\x8c\x95X{C\xbc\x88\xb9\xa7B\xbd\xc5\x03\xd9\xd2!\x08\xa5P\xabI\xe1\x90\x1dq9\xbb\xf8Llx\x96\xae\xca\xaa7\xd2\x1b>\xd6\xa4\x96\xdb]@\xd5!@9H\x03\x94!\xaa\xed\xe4\xc8p\xc0\x1e\xc9\x99\x15\xd5$ \xe2?\xde\xd5)\x9b\xab\x81\xdc\x8e\xa8\x05l\xb0\x06/\"\xb1\x94\xc0\xcc\x81\x96\xaf+\x93@`}}:\\\xf7\x95\xaf\xd0 8\x95zQgS\xa3\xbf\x18+\xf2\x92\xa4\xbf\xc1\x92C\x84\xa3}\x80\x1bR:\x9f\xf5H\xfeUF\x02\\\xa1X\xfe\x92g\xf6\xc0\x18/\xe2\x08\xeb\x80\x04AZ%\x11c\x1f1\x10\xd5wlET\x00\x05\x96\x0fwX\x0c\xa0\x08ZNh\xa5\xa0\x85\x08\xc4yf2D\xc6 \x80\xc3\xb8\x18\x89&v\xaeL\x93q\xee\x1e4\xc67\x95\xf1\xa7\xfb\xcaz\xfa\xf0\\\x90iJ\x11y\xc0\xe8J-k\xafN\xc43\xbd\xa0\xeb \xa3-\xbc\x05s\x91\xd1J\x175\x16\x89\x0f\x97\x1e)\xd9V0\x99N\x170\xddd\xda\\\xd3\x9bd0d-\xa9\xe3E\xda[mf\xa3\\\xa3Um\xc1x\x11\xb2\xd2\xd2C\xabR<(`\xaa\xd1\x95\xe6\x1d\x83p4^!\x06\x9dh\xd4Q\xe8\x0b\x00`\xa2\xf9\x81!l\x08\x93 ~\xc6\x99\xec:J\x87\xc9\xa0\xf1l\xfcW\xb1\xfe\x809\xcb\xb8\xccZXB\x14=\xeb\xc8l)`j\x9e\x1d\xaaeVJ\xb3\xe0U\x80\xb3\x02\x86G!\xaes\xd8\xe71\xd4?\xc6\xbc3\x84\xa8\xc3\x8a.\xb3\x1e}b\x1bIa\xd9\xea6\xe0\x03\xca\x95\x06\x16\x0d\x1e\x9ct?\xe8\xc0\x80\xdeS\x07xZJ'\xc3p\ni\xeb,\xa6\x12.\x88\x8f\xf1\xac\x12\x90\xd8R\x07\x07\x012T`5\x00\x98-R\nBxr\xe6WH\x03\xf6\x0cJP\xb0e#Bb\x89|\x93\xaf\x94-\xb1\xfe\x90\xa1\x90\xec\x8b[\xb2\x84\xa0\xc6\x1c\xe4P\xc2\xe2\xfd\x85\x1a\xa4\xa8Eh\x8b\xb1\xb3\xc2\x8b(5S\x9c\xa2\x1d\x95f\xd5r\xe4\x1c\xc3\x19/]\x7f\xcb\x1c\xb0\xd1I\xc6\x8a\x0d\xcc\x15\xd6d\x94\xdeE#\xfa\xa2O\xaeS\xfa3\x979\xd3\xbb]\xb8\x16\xba\xb3\x80e\x1e\x82\xbf\xdb\xae\xd5\xc9\xb9.9\x17_\x8cb\xeae\xa7\xe6\xbeM\x8c\x14\xb4\x819b\x1e\x13\x19#e\x8d\xa9(\x92\xa6\x11-\x88 0\x9d\xa7\xd2\xd7Ra\xb1\xe0\xc6\x1d\x849\xf9\xba\x96\x88\x06\"\x02\xfe\x89\xb1\xfe\xfd\xfdU,\xe1\xc2%\xfa~\xbeX\xe8\xdc\x80\x1f\xf6\x97\x94\xebz\x80\x15\xdb\xbd{'6[@\x84t[W%\xfd\xd1*\x0b.d'vR {\x94\xd2\xf0h\x1e\x8a\x04\xa6!\xdeAed\x92C\xaaE}\xbbx=E[\x0e\x01|\xefB$7J\xa1* B-\xe1\x00\x0c,=\x11k7\x94[_\xb6\xea-\xd0I\xf4\x96\x0c\xa2\x15\xab\x80\x92\x87J5e\xd6\xcc\xb6\xc4\x01\xb4\x13{\x0d\xc8\xed( \xb4\x86;\x10\x8d\x05\x14WMw\xa7`\xab\xb0\x02\x80\xcb~p\xdcA\xa0\xb0z\x15\x0e 8\x87\x02\xeef\xe6\x13))\xe2\x8c\xe2\xc2(\xde\xfc@ \xa9\x08\xc4\xaa\x07\xd9\x85\x85\xd9\x03<\xe1\xee\x85\xe4.a%N \xf2\xecn\x14\xe9@bz\xad\xc3\x19\x87\xc8\xd1\x1a\xc0\xb5\xbf>\x1a\x1a\xc0\x90\xeb\x18\x03\xf4%\x85\x87\x80\x05T\x07*?lgb\xbfd\xf6\x10\xc8<\x82\xc4\xb5\xe3\xfa\xc0w9Na\xac\xc5\x13\xbc8;<^*%\x9d\x9by\xd2:t\x11D\xa5\xd2\x95Z<\x1c@\x1b\x05\xfc\x890\xaa\xa8\xab\xe4\x82q4\xb1\xe4\xd0\xedl\\\x0d\x96\x1f\x15\x861\x86\x0e\xee\xc9\x1b\x9f\xd3\x00`/\x9e$\x12IJ \xd2\x93sN)\xbc;\x07:A;\x92)$\xd7\x95\n\xb0Ww\xa2\x12y%Kr\xdeIv\x0f\\b\xb6V\x07\x99\x06\xa3\\\x1cn\xadd{\xc0\xde\xf4\xc86\x15t\x10\xbb\x90v\x9d\xed\xd7/~\xa2\x00\xfc*O\xd6\xed\n\x117U\x16>\xa38\xfb\x17r\x82AC<\xba\x13j\xe9\x8aE\xe2\xa2-j\x0f\x1c\xe7\xd8\xe7\x89\xb7\xa8\xfcxs\xee)\x81\x8d\xccD\xa2\x9b\x961\xbc\xc3\x8c/\xcf\xcaq\x93p**\xcc\xb8\x18\xc0$\x02\xd9\x91\x8d\x13,\x0b\x16\xdb\xe1\x0d\x83\xb3B\x1a\xf5\xc8\xbcp\xc4\xfa\x0ek \x1bM\x17hp\x88K\xea7\x17\xc6\x15U\xe8\xa4\xc3]\xf0\xf8\xe1\x03h&\x18\x84-\x19\x13\x9e$\x03\x88\xe9\x8e\xbb\x81\xaf\x94\x93Y\xea\x90\x1b\xa3;\xe0q\xcb\xe96w\x95z\xdd\xf7\xd6W\xfb\xee\xcb\x84\xd6\xadA\xa6h\xb2\xecD\x7f\x9c\x91\x03^R\xf6\x03\xde\x1e\xc9\x01\"\xad\xc6s\x0f5\x00f\xfd\xfc\xeew\x0d\x88\xbf\xe4+\x10\xe7Q&\x12\x1f\x11\x92\x0e/9\xc8\x82\x96\x9c\xb8wN\x1eb\xc7\xeb\xe9\xfc\x90\x90\x19\xad\x0f\xe3\xfc\xb0\xd8z{\xe5\xd8\x1c\x95\xd3\xfeY\x17\xc5>\n]NE\xda\xc1\xb1c,\xdf\x9e#\x0cBF\xfd:0\xcd\xd8/-E\x18\x13\xc8\xbe\xc6\xc2\x8c\xc7\xd7\x83\xeb\x11F\\\xea\x8c\xf4\xe4I\xa7{t\xec\xe4A\xbbZ\x89C\x99O\x0e\x07R\xc3\x1buk\xa5\x0ci\xfa\x94\xf4)\x85ytkd\x9dN\xb8&\x16\x9bv\xa7\x15A\xb1\x99\x88\x0cP\x02{\xcd\xd6\xee\xf4\xcbP'\x9a\xf3\x92>\x02\xc8\xea\xe0x\x02\xe0\xc6\x86`.\xdc\x04\xe4%,;:\xd4\xbf\xd9:\x0f\x8e\xa9\x1a\xab\xad\xbf\xedaF\xf1\xa7o\x1aTQ\xab\x10}v#\xf4\xec\x9a\xd7\xa3\x02\x91\xf6\xdaQk\xe8\xc6'\xdds\xc4\xd4\xd3\xd6\x1f\x08\xd8\xf7\x12~\x85\x0f\xb5\xde\xcdz5h\x1fM\xc4Q\xca\x92\x06\xe1Y>C\x19\x85\xe8\x07\xca\x8d\x99\x00\x84\xe8\xbei\xb7\xcc\x02\x1d\x18U\xb1\xa0\xd3NF#\x11J\x180u\x8c\x05\xceC\xf0\xe4\xf08k\x93\x08!\nf\xab\xe9\xec\xa7v\x0c\xb9{\x7f\x1aE\xf1/\x1f\xcf\xeb\xe6IKIE\xcb>\x0c\xbap\xb7yd\x86\x13\xcce \n\xca\xbe\x12\x94=z\xf4\x86:@7\xd6J\xe0\xf7\xc4\x0e\xb3\xcc|\xc6\xc65g\x07\x0f8\xc0\xeex\x0c\xc53\xe7O\xb1\x8c\x1d\xaa\xfe\x80\xc4\x10\xdc\xdc\x07\n\xfe3\x1f\x80H1\x8b\xf3\xb1\xd8\x84F.\xa0y\xb4fz\xb4\xecW\x06IM\x1b\x0b\xf1\xd9\x19\x83\x18\x11\xc6j[\x81\x17.w\xe6%\x08\x84i?\x01\xd2\x86U\x12\xc2\xe8\xa9f|}@+\x04[8\x95k7Cx\xa4\x98S\x90\x85\xedEO\x02\x12\xde\xafp\xa0$\xe4\x17\x97\xfc\xe1\xe7\xbeQ\xe6\xbb+\x99\x19\xca\x03:\xb8<\xe1]\x81\xb6\x01\xb8K\xe23\x8f\x8bT-y\xb2\xc2\xcd\x16\x08[N\xd1\xd6z\xb4\xb5\x84;y\xb3\x8a\xa4-HZ\xfe\xaaY^\x15\xa1\xd4.\xa5M\x00*\x08\xd4'h8\xd2\xedA\x85\x04.\xb0N\xef2\x16r\x89\x9cLB\x9c\x0b7:Or\x8f\x92\xa9}\x91\x12C\x89S\xcb\x9aS\x199\xe4\x01Jq#\xec\xa3\x12WI}*8\xcbD!\x1b\x88\xb8# \x11\x14g#Y\x8f>\x138\x18`\x95\n\xec\x15\xd0\x92\xc2\xf1\x03\x0c?a\x85\x812\x12H,\x1f^\xf1\xe3\xc4'\x83\x8f\xef?\xb8\xff\x03\x90\x0e^\xb8\x88\xe6\x03\xa7n\x83h\xe3O\xc6\x92\xad\x07\x1a\xbfi<\xd1\x19\xaa\xe6\xddY\x0fa2\xc9+\x1e\x99\xfc\xcc6a\xb0F\xb1\xe2a<\xcc!\x84\xdb0\xac\x892\xbd]\x81c:\xefe\xbcK\xa4\xac\x1cX\x98X\x98[Ug\xe9O\x12\xafu\x175i\xd4yPcV\xd9T\xba\x9e5\x1fR\x01I\xfa\x9fA6\xf2O\xd4\xb8i\x0d\xa4\xfdC\x04\x8e\\\x87\xf1\x1e\x85\x97\xe3Q\x0fZ\x13\x8dM\x84D\xc4\xc6\x83\xd3\xda\x01\x90\xcfB!X\x96\xc4:\xf4\xd0\xe3\x08\x1c\\!\xc7\x14\x18^\xc1\x85\"\x14{\xa1E \x14Va\x07x$P\x1d \x17\x18\\$\x02\x0c\xb3DBBT\xde\xd3Ft\xe8\x9f~\x99\xc3{O\x87\x1e\x04\x00\xbc\x0dw\x00\xef\xf8\x9e5a#\xf8\x07`\x90\xab=\x04\x16\x00\x11g\x81\x12\x80\x11\xd0\x81\x13\xb0\x01Y\xfd\x192>\x1d\x87\xb1MG\xaf-G\xadk\x8f\xc3\xa8\xaa\xdb1T\xbeb\xa6\xfc\x85L\x18\n\xb9`*\xd9\x80\xabV\x02\xacX\n\xad\xde*\xa5x\xaae\xc2\xa7\xd6\x8a\x9dZ*c`\xaaV\xc1S\xd5b\xa5\xba\xc5JU\x8a\x92\xaa\x15\x1d\xd0*6\xa0TK@\xa8zqP\xc4\xe2\xb6\xc9\xc5h\x93\x8a\xc7g\x15\x86\xcc*\xdf\x94U\xa7(\xaa\xf6QU4\x82\xa79\x05L\x92\n\xadc\x15M\xc6*\x90\x8cT\xbb\x18\xa9R!R,B\xa3\xc8\x85E\xb0\x8a\x88\xa0\x15\x0f\xbe*C|Tz\xf8\xf4p\xf0\xf4F\xe8\xf4@\xe8\xf44\xe8\xf4*\xe0\xf4\x10\xe0\xf7\xf5\xb1\xee\x05\xb1\xed\x85\xb0\x96\x86\xd8\xac\xe1\xb1X\xf3b\xb0\x06\x80\x1cL\x80.\x99\x00T2\x00y`\x00\xae\xc0\x01Up\x02b\xe0\x03\xe5\xc0\x05\xeb\x00\n\x15\x00\x10\xaa\x00\x1bT\x00,\xa0\x00%@\x05\x1a`\x07\xe4\xc0\x0b\xe8\x80\x15\x90\x00#\xa0\x00?@\x00t\x80\x00\xa4\xf8\x19GL\x88\xeb\xc5\x9e\x00S\xc0)\xf6\x01\x9e\xc3\xbf\xa0z\x93\xfft\x07\xcf\xb2Fy\x15\xd7\x8e\x00 \x1a\x0014\x00Lh\x00\x8c\xd0\x00\xe1\x80\x01\x83\x00\x01f\x00\x7f\x99\x00\xb0\xc8\x05e\xc0(.\x01)p\x08K\x80@\\\x01\xe2\xe0\x0c\x97\x00X\xb8\x02e@\x05\x15\x00\x08T\x0fb\xa0v\x95\x03h\x98\x1aD\xc0\xc5&\x05\xf90-I\x81bD\x02\x89\x10 d@\x1f\x11\x00ZD\x011\x10\x03\xa4@\x0c\x91\x00\x18D\xe0\x04y\xc0\xb8\xf3\x80\xd1\xa7\x00CN|\x0d9\xdc4\xe6\xd8\xd3\x9a#Nc\x0bl\xc2\x16\x05\xc0\xb0;\x05\x81\xb8,\x0d\x81`c\x8b\x03\x04X\x16\xe2\xc0\xb3\x16\x05@\x90(\x84\x812$\x030\x90\x0c\"@- \x00\x98$\x01\xe8\x90\x06B@\x16\x89\x00<$\x00\xd0\x90\x01\xc1\xc0\xf8\x0e\x078p7C\x81\xa6\x1c\x0c\x80\xe0b\x05\x02\xe8(\x16\x81@\xa5\n\x05\x18P\x05A@\x13\x85\x00F\x0c\x00\xb80\x02\x00\xc0\xf5\x06\x00t\x89\xfc\x01\x9c\xde\x04\x1d\x8b\xe4G\x16\xad\xe9\xd4OR\x97\x89\xe9\xc4\xb2\xe2I\xef\x1eT\xf1yS\x8d\xcdMW52\\T\xc6oR\xe5\xa5KV\x950\xc8\x8e\xac\x04\xe0\xbb\x1a\x82\x8c(\n-\xa0$\xb2\x80\x92\x9a\x01\x80\x94\x0d\xa4\x02!6\xa6\x84\xa2w\x88\xeaH\xa2\xa9\x0e\x86\xa3\xf9\xfa\x8fG\x12\xf9\xad\xa0\x11O r~\x07\x07\xe0e~\x06/\xe0]\xfe\x05\xb7\xe0V~\x05/\xe0P~\x057\xa0Sz\x04\xef\xa0K\xfa\x04\x97\xa0Fv\x04\x17`;\xf6\x03\xaf`9v\x05\xd1#\nJ\xa4\x1a\xdc\x17\xa7B\xcdN\x82,\x90\xe4\xd7\xce\x1f\xc5\xd3\xad\xb2'\xb0\x00\x00\x01\x03\x01`\x00\x00\x15\xa1'\xe2\x8f\x10\x82`\x04\\LT\xfe\xf0\xd9\x02A\x18p\x12Bs\x81)r\x85!\xd5\n\xe2(\n\x03\xd2\x04i\x82`PK\x07\x08X\xc7\xb1\x9c\x9fN\x00\x00\x9fN\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00fonts/glyphicons-halflings-regular.svg\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n PK\x07\x08|\xee\xc6\xc9\xc2\xa8\x01\x00\xc2\xa8\x01\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00fonts/glyphicons-halflings-regular.ttf\x00\x01\x00\x00\x00\x0f\x00\x80\x00\x03\x00pFFTMm*\x97\xdc\x00\x00\x00\xfc\x00\x00\x00\x1cGDEF\x01D\x00\x04\x00\x00\x01\x18\x00\x00\x00 OS/2g\xb9k\x89\x00\x00\x018\x00\x00\x00`cmap\xda\xad\xe3\x81\x00\x00\x01\x98\x00\x00\x06rcvt \x00(\x02\xf8\x00\x00\x08\x0c\x00\x00\x00\x04gasp\xff\xff\x00\x03\x00\x00\x08\x10\x00\x00\x00\x08glyf}]\xc2o\x00\x00\x08\x18\x00\x00\x94\xa4head\x05M/\xd8\x00\x00\x9c\xbc\x00\x00\x006hhea\nD\x04\x11\x00\x00\x9c\xf4\x00\x00\x00$hmtx\xd2\xc7 `\x00\x00\x9d\x18\x00\x00\x03tlocao\xfb\x95\xce\x00\x00\xa0\x8c\x00\x00\x020maxp\x01j\x00\xd8\x00\x00\xa2\xbc\x00\x00\x00 name\xb3,\xa0\x9b\x00\x00\xa2\xdc\x00\x00\x03\xa2post\xba\xa3\xe55\x00\x00\xa6\x80\x00\x00\n\xd1webf\xc3\x18TP\x00\x00\xb1T\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x00\xcc=\xa2\xcf\x00\x00\x00\x00\xd0v\x81u\x00\x00\x00\x00\xd0vs\x97\x00\x01\x00\x00\x00\x0e\x00\x00\x00\x18\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x01\x16\x00\x01\x00\x04\x00\x00\x00\x02\x00\x00\x00\x03\x04\x8b\x01\x90\x00\x05\x00\x04\x03\x0c\x02\xd0\x00\x00\x00Z\x03\x0c\x02\xd0\x00\x00\x01\xa4\x002\x02\xb8\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00UKWN\x00@\x00 \xff\xff\x03\xc0\xff\x10\x00\x00\x05\x14\x00{\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x01\x00\x00\x00\x05\x00\x00\x00\x03\x00\x00\x00,\x00\x00\x00\n\x00\x00\x01\xdc\x00\x01\x00\x00\x00\x00\x04h\x00\x03\x00\x01\x00\x00\x00,\x00\x03\x00\n\x00\x00\x01\xdc\x00\x04\x01\xb0\x00\x00\x00h\x00@\x00\x05\x00(\x00 \x00+\x00\xa0\x00\xa5 \n / _ \xac \xbd\"\x12#\x1b%\xfc&\x01&\xfa' '\x0f\xe0\x03\xe0 \xe0\x19\xe0)\xe09\xe0I\xe0Y\xe0`\xe0i\xe0y\xe0\x89\xe0\x97\xe1 \xe1\x19\xe1)\xe19\xe1F\xe1I\xe1Y\xe1i\xe1y\xe1\x89\xe1\x95\xe1\x99\xe2\x06\xe2 \xe2\x16\xe2\x19\xe2!\xe2'\xe29\xe2I\xe2Y\xe2`\xf8\xff\xff\xff\x00\x00\x00 \x00*\x00\xa0\x00\xa5 \x00 / _ \xac \xbd\"\x12#\x1b%\xfc&\x01&\xfa' '\x0f\xe0\x01\xe0\x05\xe0\x10\xe0 \xe00\xe0@\xe0P\xe0`\xe0b\xe0p\xe0\x80\xe0\x90\xe1\x01\xe1\x10\xe1 \xe10\xe1@\xe1H\xe1P\xe1`\xe1p\xe1\x80\xe1\x90\xe1\x97\xe2\x00\xe2 \xe2\x10\xe2\x18\xe2!\xe2#\xe20\xe2@\xe2P\xe2`\xf8\xff\xff\xff\xff\xe3\xff\xda\xfff\xffb\xe0\x08\xdf\xe4\xdf\xb5\xdfi\xdfY\xde\x05\xdc\xfd\xda\x1d\xda\x19\xd9!\xd9\x13\xd9\x0e \x1d \x1c \x16 \x10 \n \x04\x1f\xfe\x1f\xf8\x1f\xf7\x1f\xf1\x1f\xeb\x1f\xe5\x1f|\x1fv\x1fp\x1fj\x1fd\x1fc\x1f]\x1fW\x1fQ\x1fK\x1fE\x1fD\x1e\xde\x1e\xdc\x1e\xd6\x1e\xd5\x1e\xce\x1e\xcd\x1e\xc5\x1e\xbf\x1e\xb9\x1e\xb3\x08\x15\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x02\x8c\x00\x00\x00\x00\x00\x00\x005\x00\x00\x00 \x00\x00\x00 \x00\x00\x00\x03\x00\x00\x00*\x00\x00\x00+\x00\x00\x00\x04\x00\x00\x00\xa0\x00\x00\x00\xa0\x00\x00\x00\x06\x00\x00\x00\xa5\x00\x00\x00\xa5\x00\x00\x00\x07\x00\x00 \x00\x00\x00 \n\x00\x00\x00\x08\x00\x00 /\x00\x00 /\x00\x00\x00\x13\x00\x00 _\x00\x00 _\x00\x00\x00\x14\x00\x00 \xac\x00\x00 \xac\x00\x00\x00\x15\x00\x00 \xbd\x00\x00 \xbd\x00\x00\x00\x16\x00\x00\"\x12\x00\x00\"\x12\x00\x00\x00\x17\x00\x00#\x1b\x00\x00#\x1b\x00\x00\x00\x18\x00\x00%\xfc\x00\x00%\xfc\x00\x00\x00\x19\x00\x00&\x01\x00\x00&\x01\x00\x00\x00\x1a\x00\x00&\xfa\x00\x00&\xfa\x00\x00\x00\x1b\x00\x00' \x00\x00' \x00\x00\x00\x1c\x00\x00'\x0f\x00\x00'\x0f\x00\x00\x00\x1d\x00\x00\xe0\x01\x00\x00\xe0\x03\x00\x00\x00\x1e\x00\x00\xe0\x05\x00\x00\xe0 \x00\x00\x00!\x00\x00\xe0\x10\x00\x00\xe0\x19\x00\x00\x00&\x00\x00\xe0 \x00\x00\xe0)\x00\x00\x000\x00\x00\xe00\x00\x00\xe09\x00\x00\x00:\x00\x00\xe0@\x00\x00\xe0I\x00\x00\x00D\x00\x00\xe0P\x00\x00\xe0Y\x00\x00\x00N\x00\x00\xe0`\x00\x00\xe0`\x00\x00\x00X\x00\x00\xe0b\x00\x00\xe0i\x00\x00\x00Y\x00\x00\xe0p\x00\x00\xe0y\x00\x00\x00a\x00\x00\xe0\x80\x00\x00\xe0\x89\x00\x00\x00k\x00\x00\xe0\x90\x00\x00\xe0\x97\x00\x00\x00u\x00\x00\xe1\x01\x00\x00\xe1 \x00\x00\x00}\x00\x00\xe1\x10\x00\x00\xe1\x19\x00\x00\x00\x86\x00\x00\xe1 \x00\x00\xe1)\x00\x00\x00\x90\x00\x00\xe10\x00\x00\xe19\x00\x00\x00\x9a\x00\x00\xe1@\x00\x00\xe1F\x00\x00\x00\xa4\x00\x00\xe1H\x00\x00\xe1I\x00\x00\x00\xab\x00\x00\xe1P\x00\x00\xe1Y\x00\x00\x00\xad\x00\x00\xe1`\x00\x00\xe1i\x00\x00\x00\xb7\x00\x00\xe1p\x00\x00\xe1y\x00\x00\x00\xc1\x00\x00\xe1\x80\x00\x00\xe1\x89\x00\x00\x00\xcb\x00\x00\xe1\x90\x00\x00\xe1\x95\x00\x00\x00\xd5\x00\x00\xe1\x97\x00\x00\xe1\x99\x00\x00\x00\xdb\x00\x00\xe2\x00\x00\x00\xe2\x06\x00\x00\x00\xde\x00\x00\xe2 \x00\x00\xe2 \x00\x00\x00\xe5\x00\x00\xe2\x10\x00\x00\xe2\x16\x00\x00\x00\xe6\x00\x00\xe2\x18\x00\x00\xe2\x19\x00\x00\x00\xed\x00\x00\xe2!\x00\x00\xe2!\x00\x00\x00\xef\x00\x00\xe2#\x00\x00\xe2'\x00\x00\x00\xf0\x00\x00\xe20\x00\x00\xe29\x00\x00\x00\xf5\x00\x00\xe2@\x00\x00\xe2I\x00\x00\x00\xff\x00\x00\xe2P\x00\x00\xe2Y\x00\x00\x01 \x00\x00\xe2`\x00\x00\xe2`\x00\x00\x01\x13\x00\x00\xf8\xff\x00\x00\xf8\xff\x00\x00\x01\x14\x00\x01\xf5\x11\x00\x01\xf5\x11\x00\x00\x01\x15\x00\x01\xf6\xaa\x00\x01\xf6\xaa\x00\x00\x01\x16\x00\x06\x02\n\x00\x00\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\x02\xf8\x00\x00\x00\x01\xff\xff\x00\x02\x00\x02\x00(\x00\x00\x01h\x03 \x00\x03\x00\x07\x00.\xb1\x01\x00/<\xb2\x07\x04\x00\xed2\xb1\x06\x05\xdc<\xb2\x03\x02\x00\xed2\x00\xb1\x03\x00/<\xb2\x05\x04\x00\xed2\xb2\x07\x06\x01\xfc<\xb2\x01\x02\x00\xed23\x11!\x11%3\x11#(\x01@\xfe\xe8\xf0\xf0\x03 \xfc\xe0(\x02\xd0\x00\x01\x00d\x00d\x04L\x04L\x00[\x00\x00\x012\x16\x1f\x01\x1e\x01\x1d\x017>\x01\x1f\x01\x16\x06\x0f\x0132\x16\x17\x16\x15\x14\x06\x0f\x01\x0e\x01+\x01\x17\x1e\x01\x0f\x01\x06&/\x01\x15\x14\x06\x07\x06#\"&/\x01.\x01=\x01\x07\x0e\x01/\x01&6?\x01#\"&'&546?\x01>\x01;\x01'.\x01?\x016\x16\x1f\x0154676\x02X\x0f&\x0b\x0b\n\x0f\x9e\x07\x16\x08j\x07\x02\x07\x9e\xe0\n\x11\x02\x06\x03\x02\x01\x02\x11\n\xe0\x9e\x07\x02\x07j\x08\x16\x07\x9e\x0f\n)\"\x0f&\x0b\x0b\n\x0f\x9e\x07\x16\x08j\x07\x02\x07\x9e\xe0\n\x11\x02\x06\x03\x02\x01\x02\x11\n\xe0\x9e\x07\x02\x07j\x08\x16\x07\x9e\x0f\n)\x04L\x03\x02\x01\x02\x11\n\xe0\x9e\x07\x02\x07j\x08\x16\x07\x9e\x0f\n)\"\x0f&\x0b\x0b\n\x0f\x9e\x07\x16\x08j\x07\x02\x07\x9e\xe0\n\x11\x02\x06\x03\x02\x01\x02\x11\n\xe0\x9e\x07\x02\x07j\x08\x16\x07\x9e\x0f\n)\"\x0f&\x0b\x0b\n\x0f\x9e\x07\x16\x08j\x07\x02\x07\x9e\xe0\n\x11\x02\x06\x00\x00\x00\x00\x01\x00\x00\x00\x00\x04L\x04L\x00#\x00\x00\x0132\x16\x15\x11!2\x16\x1d\x01\x14\x06#!\x11\x14\x06+\x01\"&5\x11!\"&=\x01463!\x1146\x01\xc2\xc8\x15\x1d\x01^\x15\x1d\x1d\x15\xfe\xa2\x1d\x15\xc8\x15\x1d\xfe\xa2\x15\x1d\x1d\x15\x01^\x1d\x04L\x1d\x15\xfe\xa2\x1d\x15\xc8\x15\x1d\xfe\xa2\x15\x1d\x1d\x15\x01^\x1d\x15\xc8\x15\x1d\x01^\x15\x1d\x00\x00\x00\x00\x01\x00p\x00\x00\x04@\x04L\x00E\x00\x00\x0132\x16\x07\x01\x06\x07!2\x16\x0f\x01\x0e\x01+\x01\x15!2\x16\x0f\x01\x0e\x01+\x01\x15\x14\x06+\x01\"&=\x01!\"&?\x01>\x01;\x015!\"&?\x01>\x01;\x01&'\x01&6;\x012\x1f\x01\x162?\x016\x039\xfa\n\x05\x08\xfe\x94\x06\x05\x01\x0c\n\x06\x06x\x06\x18\n}\x01\x13\n\x06\x06x\x06\x18\n}\x0f\x0b\x94\x0b\x0f\xfe\xed\n\x06\x06x\x06\x18\n}\xfe\xed\n\x06\x06x\x06\x18\nv\x05\x06\xfe\x94\x08\x05\n\xfa\x19\x12\xa4\x08\x14\x08\xa4\x12\x04L\n\x08\xfe\x94\x06\x0c\x0c\x08\xa0\x08\x0cd\x0c\x08\xa0\x08\x0c\xae\x0b\x0f\x0f\x0b\xae\x0c\x08\xa0\x08\x0cd\x0c\x08\xa0\x08\x0c\x0c\x06\x01l\x08\n\x12\xa4\x08\x08\xa4\x12\x00\x00\x01\x00d\x00\x05\x04\x8c\x04\xae\x00;\x00\x00\x012\x17\x16\x17#4.\x03#\"\x0e\x03\x07!\x07!\x06\x15!\x07!\x1e\x0432>\x0353\x06\x07\x06#\"'.\x01'#7367#73>\x0176\x02\xe8\xf2p<\x06\xb5#4@9\x17\x13+820\x0f\x01{d\xfe\xd4\x06\x01\x96d\xfe\xd4 09B4\x15\x169@4#\xae\x1ebk\xa7\xcev$B\x0c\xd9dp\x01\x05\xdad\x86\x14>\x1fu\x04\xae\xbdhi-K0!\x0f\x0b\x1e.O2d22dJtB+\x0f\x0f\"0J+\xabku\x9e0\xaawd/5dW\x85%\x8d\x00\x00\x02\x00{\x00\x00\x04L\x04\xb0\x00>\x00G\x00\x00\x01!2\x1e\x05\x15\x1c\x01\x15\x14\x0e\x05+\x01\x07!2\x16\x0f\x01\x0e\x01+\x01\x15\x14\x06+\x01\"&=\x01!\"&?\x01>\x01;\x015!\"&?\x01>\x01;\x01\x1146\x17\x1132654&#\x01\xac\x01^CjB0\x16\x0c\x01\x01\x0c\x160BjC\xb2\x02\x01 \n\x06\x06x\x06\x18\n\x8a\x0b\n\x95\n\x0f\xfe\xf5\n\x06\x06x\x06\x18\nu\xfe\xf5\n\x06\x06x\x06\x18\nu\x0f\xb6\xcb@--@\x04\xb0\x1a$?2O*$\x0b\x0b\x0b$*P2@%\x1ad\x0c\x08\xa0\x08\x0c\xaf\x0b\x0e\x0f\n\xaf\x0c\x08\xa0\x08\x0cd\x0c\x08\xa0\x08\x0c\x01\xdb\n\x0f\xc8\xfe\xd4BVT@\x00\x00\x01\x00\xc8\x01\x90\x04L\x02\xbc\x00\x0f\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\xfa\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\x02\xbc\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x00\x00\x00\x02\x00\xc8\x00\x00\x03\xe8\x04\xb0\x00%\x00A\x00\x00\x01\x15\x14\x06+\x01\x15\x14\x06\x07\x1e\x01\x1d\x0132\x16\x1d\x01!546;\x015467.\x01=\x01#\"&=\x01\x17\x15\x14\x16\x17\x1e\x01\x14\x06\x07\x0e\x01\x1d\x01!54&'.\x01467>\x01=\x01\x03\xe8\x1d\x152cQQc2\x15\x1d\xfc\xe0\x1d\x152cQQc2\x15\x1d\xc8A7\x1c \x1c7A\x01\x90A7\x1c \x1c7A\x04\xb0\x96\x15\x1dd[\x95##\x95[\x96\x1d\x15\x96\x96\x15\x1d\x96[\x95##\x95[d\x1d\x15\x96\xc8d\x01\x16\x1f\x0176\x03!'\x03\x02\xf6 \n\x88\x01\xd3\x1e\x14\x1e\xfbP\x1e\x14\x1e\x01\xd4\x87\n $\nop z\x01y\xb6\xc3\x04\xb3\x13#\x10\xbb\xfd\x16%\x15**\x15%\x02\xea\xb7\x10$\x14 \x10\x94\x96\x10\xfc\x1ep\x02\x16\x00\x00\x00\x00\x04\x00\x00\x00d\x04\xb0\x04L\x00\x0b\x00\x17\x00#\x007\x00\x00\x13!2\x16\x07\x01\x06\"'\x01&6\x17\x01\x16\x14\x07\x01\x06&5\x1146 \x016\x16\x15\x11\x14\x06'\x01&4\x07\x01\x16\x06#!\"&7\x0162\x1f\x01\x162?\x0162\x19\x04~\n\x05\x08\xfd\xcc\x08\x14\x08\xfd\xcc\x08\x05\x03\x01\x08\x08\x08\xfe\xf8\x08\n\n\x03\x8c\x01\x08\x08\n\n\x08\xfe\xf8\x08\\\x01l\x08\x05\n\xfb\x82\n\x05\x08\x01l\x08\x14\x08\xa4\x08\x14\x08\xa4\x08\x14\x04L\n\x08\xfd\xc9\x08\x08\x027\x08\n\xda\xfe\xf8\x08\x14\x08\xfe\xf8\x08\x05\n\x02&\n\x05\xfe\xf0\x01\x08\x08\x05\n\xfd\xda\n\x05\x08\x01\x08\x08\x14\x80\xfe\x94\x08\n\n\x08\x01l\x08\x08\xa4\x08\x08\xa4\x08\x00\x00\x00\x03\xff\xf0\xff\xf0\x04\xba\x04\xba\x00 \x00\x0d\x00\x10\x00\x00\x002\x1f\x01\x16\x14\x0f\x01'7\x13\x01' \x01\x05\x13\x03\xe0&\x0e\x99\x0d\x0dc\xd6_\"\xfd\x99\xd6\x02f\xfe\x1f\xfe\xb3n\x04\xba\x0d\x99\x0e&\x0e\\\xd6`\xfet\xfd\x9a\xd6\x02f\xfcjp\x01O\x00\x00\x00\x01\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00\x00\x01\x1132\x16\x1d\x01!546;\x01\x11\x01!\x02\xbc\xfa\x15\x1d\xfc\xe0\x1d\x15\xfa\xfe\x0c\x04\xb0\x02\x8a\xfd\xda\x1d\x1522\x15\x1d\x02&\x02&\x00\x00\x00\x01\x00\x0e\x00\x08\x04L\x04\x9c\x00\x1f\x00\x00\x01%6\x16\x15\x11\x14\x06\x07\x06.\x01676\x17\x11\x05\x11\x14\x06\x07\x06.\x01676\x17\x1146\x01p\x02\x85'0SFO\x88$WOHB\xfd\xa8XAO\x88$WOHB\x1d\x03\xf9\xa3\x0f\x1e\"\xfc\xc17Q\x17\x19)mr\x19\x18\x10\x02 \x9b\xfd\xa2*`\x15\x1a)nq\x1a\x18\x11\x02\x7f&*\x00\x00\x00\x02\x00 \xff\xf8\x04\xbb\x04\xa7\x00\x1d\x00)\x00\x00\x002\x1e\x02\x15\x07\x17\x14\x07\x01\x16\x06\x0f\x01\x06\"'\x01\x06#'\x07\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x01\x90\xc8\xb6\x83N\x01\x01N\x01\x13\x17\x01\x16;\x1a)\x13\xfe\xedw\x8e\x05\x02d\xb6\x83NN\x83\x01r\xb0\x94VV\x94\xb0\x94VV\x04\xa7N\x83\xb6d\x02\x05\x8dy\xfe\xee\x1a%\x18:\x14\x14\x01\x12M\x01\x01N\x83\xb6\xc8\xb6\x83[V\x94\xb0\x94VV\x94\xb0\x94\x00\x01\x00d\x00X\x04\xaf\x04D\x00\x19\x00\x00\x01>\x02\x1e\x02\x15\x14\x0e\x03\x07.\x0454>\x02\x1e\x01\x02\x890{xuX6Cy\x84\xa8>>\xa7\x85xC8Zvxy\x03\xb5DH\x05-Sv@9y\x80\x7f\xb2UU\xb2\x7f\x80y9@vS-\x05H\x00\x00\x00\x01\xff\xd3\x00^\x04{\x04\x94\x00\x18\x00\x00\x01\x1362\x17\x13!2\x16\x07\x05\x13\x16\x06'%\x05\x06&7\x13%&63\x01\x97\x83\x07\x15\x07\x81\x01\xa5\x15\x06\x11\xfe\xaa\x82\x07\x0f\x11\xfe\xa9\xfe\xaa\x11\x0f\x07\x82\xfe\xa5\x11\x06\x14\x03 \x01a\x13\x13\xfe\x9f\x11\x0c\xf9\xfeo\x14\x0b\x0c\xf6\xf7\x0c\x0b\x14\x01\x90\xfb\x0c\x11\x00\x02\xff\xd3\x00^\x04{\x04\x94\x00\x18\x00\"\x00\x00\x01\x1362\x17\x13!2\x16\x07\x05\x13\x16\x06'%\x05\x06&7\x13%&63\x05#\x17\x077\x17'7#'\x01\x97\x83\x07\x15\x07\x81\x01\xa5\x15\x06\x11\xfe\xaa\x82\x07\x0f\x11\xfe\xa9\xfe\xaa\x11\x0f\x07\x82\xfe\xa5\x11\x06\x14\x01\xf3\xf0\xc5J\xc1\xc3J\xc0\xeaN\x03 \x01a\x13\x13\xfe\x9f\x11\x0c\xf9\xfeo\x14\x0b\x0c\xf6\xf7\x0c\x0b\x14\x01\x90\xfb\x0c\x11d\x8e\xe2\x8b\x8c\xe5\x8c\xd3\x00\x00\x01\x00\x00\x00\x00\x04\xb0\x04\xb0\x00&\x00\x00\x012\x16\x1d\x01\x14\x06#\x15\x14\x16\x17\x05\x1e\x01\x1d\x01\x14\x06#!\"&=\x01467%>\x01=\x01\"&=\x0146\x02X|\xb0>&\x0c \x01f \x0c\x0f\n\xfb\x82\n\x0f\x0c \x01f \x0c&>\xb0\x04\xb0\xb0|\xfa.hK\n\x17\x06\xe6\x05\x17\n]\n\x0f\x0f\n]\n\x17\x05\xe6\x06\x17\nKh.\xfa|\xb0\x00\x00\x00\x0d\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00#\x00'\x00+\x00/\x003\x007\x00G\x00K\x00O\x00S\x00W\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x1535)\x01\"\x06\x15\x11\x14\x163!265\x114&3\x1535\x05\x1535!\x1535\x05\x1535!\x1535\x07!\"\x06\x15\x11\x14\x163!265\x114&\x05\x1535!\x1535\x05\x1535!\x1535\x19\x04~\n\x0f\x0f\n\xfb\x82\n\x0f\x0fUd\x02\xa3\xfd\xda\n\x0f\x0f\n\x02&\n\x0f\x0fsd\xfc\x18d\x03 d\xfc\x18d\x03 d\xe1\xfd\xda\n\x0f\x0f\n\x02&\n\x0f\x0f\xfc\xefd\x03 d\xfc\x18d\x03 d\x04L\x0f\n\xfb\xe6\n\x0f\x0f\n\x04\x1a\n\x0fddd\x0f\n\xfe\xa2\n\x0f\x0f\n\x01^\n\x0fdd\xc8dddd\xc8ddddd\x0f\n\xfe\xa2\n\x0f\x0f\n\x01^\n\x0fddddd\xc8dddd\x00\x00\x04\x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146)\x012\x16\x15\x11\x14\x06#!\"&5\x1146\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146)\x012\x16\x15\x11\x14\x06#!\"&5\x11462\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x02m\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\xfd\xbd\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x02m\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x04L\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\xfd\xa8\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\x00\x00 \x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00/\x00?\x00O\x00_\x00o\x00\x7f\x00\x8f\x00\x00\x1332\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x01462\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\xfc\xf5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\xfc\xf5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x04L\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\xfep\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\xfep\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x00\x06\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00O\x00_\x00\x00\x1332\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x01462\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\x02\xbc\x15\x1d\x1d\x15\xfdD\x15\x1d\x1d\xfe\x85\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\x02\xbc\x15\x1d\x1d\x15\xfdD\x15\x1d\x1d\xfe\x85\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\x02\xbc\x15\x1d\x1d\x15\xfdD\x15\x1d\x1d\x04L\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\xfep\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\xfep\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x00\x00\x00\x00\x01\x00&\x00,\x04\xe8\x04 \x00\x17\x00\x00 \x0162\x1f\x01\x16\x14\x07\x01\x06\"'\x01&4?\x0162\x1f\x01\x162\x01\xd1\x02;\x08\x14\x07\xb1\x08\x08\xfc\xf1\x07\x15\x07\xfe\x80\x08\x08\xb1\x07\x14\x08\xab\x07\x16\x01\xdd\x02;\x08\x08\xb1\x07\x14\x08\xfc\xf0\x08\x08\x01\x80\x08\x14\x07\xb1\x08\x08\xab\x07\x00\x01\x00n\x00n\x04B\x04B\x00#\x00\x00\x01\x17\x16\x14\x07 \x01\x16\x14\x0f\x01\x06\"' \x01\x06\"/\x01&47 \x01&4?\x0162\x17 \x0162\x03\x88\xb2\x08\x08\xfe\xf5\x01\x0b\x08\x08\xb2\x08\x15\x07\xfe\xf4\xfe\xf4\x07\x15\x08\xb2\x08\x08\x01\x0b\xfe\xf5\x08\x08\xb2\x08\x15\x07\x01\x0c\x01\x0c\x07\x15\x04;\xb3\x08\x15\x07\xfe\xf4\xfe\xf4\x07\x15\x08\xb2\x08\x08\x01\x0b\xfe\xf5\x08\x08\xb2\x08\x15\x07\x01\x0c\x01\x0c\x07\x15\x08\xb2\x08\x08\xfe\xf5\x01\x0c\x07\x00\x03\x00\x17\xff\xeb\x04\xc5\x04\x99\x00\x19\x00%\x00I\x00\x00\x002\x1e\x02\x15\x14\x07\x01\x16\x14\x0f\x01\x06\"'\x01\x06#\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x0532\x16\x1d\x0132\x16\x1d\x01\x14\x06+\x01\x15\x14\x06+\x01\"&=\x01#\"&=\x0146;\x01546\x01\x99\xc4\xb3\x82MN\x01,\x08\x08m\x07\x15\x08\xfe\xd4w\x8eb\xb4\x81MM\x81\x01o\xb3\x98XX\x98\xb3\x99XX\xfe\xbc\x96\n\x0fK\n\x0f\x0f\nK\x0f\n\x96\n\x0fK\n\x0f\x0f\nK\x0f\x04\x99M\x82\xb3b\x8dy\xfe\xd5\x08\x15\x08l\x08\x08\x01+MM\x81\xb4\xc4\xb3\x82MX\x99\xb3\x98XX\x98\xb3\x99#\x0f\nK\x0f\n\x96\n\x0fK\n\x0f\x0f\nK\x0f\n\x96\n\x0fK\n\x0f\x00\x00\x03\x00\x17\xff\xeb\x04\xc5\x04\x99\x00\x19\x00%\x005\x00\x00\x002\x1e\x02\x15\x14\x07\x01\x16\x14\x0f\x01\x06\"'\x01\x06#\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x05!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x01\x99\xc4\xb3\x82MN\x01,\x08\x08m\x07\x15\x08\xfe\xd4w\x8eb\xb4\x81MM\x81\x01o\xb3\x98XX\x98\xb3\x99XX\xfeX\x01^\n\x0f\x0f\n\xfe\xa2\n\x0f\x0f\x04\x99M\x82\xb3b\x8dy\xfe\xd5\x08\x15\x08l\x08\x08\x01+MM\x81\xb4\xc4\xb3\x82MX\x99\xb3\x98XX\x98\xb3\x99\x87\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x00\x00\x00\x00\x02\x00\x17\x00\x17\x04\x99\x04\xb0\x00\x0f\x00-\x00\x00\x0132\x16\x15\x11\x14\x06+\x01\"&5\x1146\x055\x16\x12\x15\x14\x0e\x02\".\x0254\x127\x15\x0e\x01\x15\x14\x1e\x012>\x0154&\x02&d\x15\x1d\x1d\x15d\x15\x1d\x1d\x01\x0f\xa7\xd2[\x9b\xd6\xea\xd6\x9b[\xd2\xa7g|r\xc5\xe8\xc5r|\x04\xb0\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\xe1\xa6>\xfe\xd9\xb8u\xd6\x9b[[\x9b\xd6u\xb8\x01'>\xa67\xc8xt\xc5rr\xc5tx\xc8\x00\x04\x00d\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x0132\x16\x15\x11\x14\x06+\x01\"&5\x1146\x0132\x16\x15\x11\x14\x06+\x01\"&5\x1146\x0132\x16\x15\x11\x14\x06+\x01\"&5\x1146\x0532\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x04\x01\x96\n\x0f\x0f\n\x96\n\x0f\x0f\xfe\xde\x96\n\x0f\x0f\n\x96\n\x0f\x0f\xfe\xde\x96\n\x0f\x0f\n\x96\n\x0f\x0f\xfe\xde\x96\n\x0f\x0f\n\x96\n\x0f\x0f\x04\xb0\x0f\n\xfb\x82\n\x0f\x0f\n\x04~\n\x0f\xfep\x0f\n\xfd\x12\n\x0f\x0f\n\x02\xee\n\x0f\xfe\xd4\x0f\n\xfe>\n\x0f\x0f\n\x01\xc2\n\x0f\xc8\x0f\n\xfa\n\x0f\x0f\n\xfa\n\x0f\x00\x00\x00\x00\x02\x00\x1a\x00\x1b\x04\x96\x04\x96\x00G\x00O\x00\x00\x012\x1f\x02\x16\x1f\x017\x16\x17\x07\x17\x16\x1f\x02\x16\x15\x14\x0f\x02\x06\x0f\x01\x17\x06\x07'\x07\x06\x0f\x02\x06#\"/\x02&/\x01\x07&'7'&/\x02&54?\x026?\x01'67\x1776?\x026\x12\"\x06\x14\x16264\x02X!)&\x051-\x05\x86=+P\x03\x19\x0e\x01\x98\x05\x05\x98\x01\x0f\x18\x03P08\x86\x05,2\x05&+\x1f!)&\x051-\x05\x86<,P\x03\x19\x0d\x02\x97\x06\x06\x97\x02\x0d\x19\x03P/:\x85\x05-1\x05&+x\xb2~~\xb2~\x04\x96\x05\x98\x01\x0e\x19\x02P09\x86\x05,1\x06&+\x1e\"(&\x061,\x05\x86=,Q\x03\x19\x0e\x02\x97\x05\x05\x97\x02\x0e\x19\x03Q09\x86\x05-0\x06&* !(&\x060-\x05\x86=,P\x02\x19\x0e\x01\x98\x05\xfe\x99~\xb1~~\xb1\x00\x07\x00d\x00\x00\x04\xb0\x05\x14\x00\x13\x00\x17\x00!\x00%\x00)\x00-\x001\x00\x00\x01!2\x16\x1d\x01!2\x16\x1d\x01!5463!546\x17\x15!5\x01\x11\x14\x06#!\"&5\x11\x17\x113\x113\x113\x113\x113\x113\x113\x11\x01\xf4\x01,);\x01\x13\n\x0f\xfb\xb4\x0f\n\x01\x13;)\x01,\x01,;)\xfdD);dddddddd\x05\x14;)d\x0f\nKK\n\x0fd);ddd\xfe\xd4\xfc\xe0);;)\x03 d\xfdD\x02\xbc\xfdD\x02\xbc\xfdD\x02\xbc\xfdD\x02\xbc\x00\x01\x00\x0c\x00\x00\x05\x08\x04\xd1\x00\x1f\x00\x00\x13\x0162\x17\x01\x16\x06+\x01\x11\x14\x06+\x01\"&5\x11!\x11\x14\x06+\x01\"&5\x11#\"&\x12\x02l\x08\x15\x07\x02`\x08\x05\n\xaf\x0f\n\xfa\n\x0f\xfe\xd4\x0f\n\xfa\n\x0f\xaf\n\x05\x02j\x02`\x07\x07\xfd\xa0\x08\n\xfd\xc1\n\x0f\x0f\n\x01w\xfe\x89\n\x0f\x0f\n\x02?\n\x00\x02\x00d\x00\x00\x03\xe8\x04\xb0\x00\x11\x00\x17\x00\x00\x01\x11\x14\x163!\x11\x14\x06#!\"&5\x11463\x01#\"&=\x01\x02X;)\x01,\x1d\x15\xfc\xe0\x15\x1d\x1d\x15\x03R\xfa\x15\x1d\x04\xb0\xfep);\xfdv\x15\x1d\x1d\x15\x04L\x15\x1d\xfep\x1d\x15\xfa\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x000\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x0532\x16\x15\x1132\x16\x1d\x01\x14\x06+\x01\"&5\x1146\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\xfe|2\n\x0f\xaf\n\x0f\x0f\n\xfa\n\x0f\x0f\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5\x0d\x0f\n\xfe\xed\x0f\n2\n\x0f\x0f\n\x01^\n\x0f\x00\x00\x00\x00\x02\xff\x9c\x00\x00\x05\x14\x04\xb0\x00\x0b\x00\x0f\x00\x00)\x01\x03#\x03!\x013\x033\x033\x01\x033\x03\x05\x14\xfd\xe6)\xf2)\xfd\xe6\x01\xaf\xd1\x15\xa2\x14\xd0\xfe\x9e\x1b\xe0\x1b\x01\x90\xfep\x04\xb0\xfe\xd4\x01,\xfep\xfe\xd4\x01,\x00\x00\x00\x00\x02\x00d\x00\x00\x04\xb0\x04\xb0\x00\x15\x00/\x00\x00\x0132\x16\x15\x1132\x16\x07\x01\x06\"'\x01&6;\x01\x1146\x0132\x16\x15\x11\x14\x06#!\"&5\x1146;\x012\x16\x1d\x01!546\x02&\xc8\x15\x1d\xbf\x14\x0b\x0d\xfe\xb9\x0d&\x0d\xfe\xb9\x0d\x0b\x14\xbf\x1d\x02T2\n\x0f\x0f\n\xfb\xe6\n\x0f\x0f\n2\n\x0f\x03\x84\x0f\x04\xb0\x1d\x15\xfe>\x17\x10\xfep\x10\x10\x01\x90\x10\x17\x01\xc2\x15\x1d\xfc\xe0\x0f\n\xfe\xa2\n\x0f\x0f\n\x01^\n\x0f\x0f\n\xaf\xaf\n\x0f\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x001\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x0532\x16\x15\x1132\x16\x07\x03\x06\"'\x03&6;\x01\x1146\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\xfe|\x96\n\x0f\x89\x15\n\x0d\xdf\x0d&\x0d\xdf\x0d\n\x15\x89\x0f\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5\x0d\x0f\n\xfe\xed\x17\x10\xfe\xed\x10\x10\x01\x13\x10\x17\x01\x13\n\x0f\x00\x00\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x001\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&%\x13\x16\x06+\x01\x11\x14\x06+\x01\"&5\x11#\"&7\x1362\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\xfe\xe7\xdf\x0d\n\x15\x89\x0f\n\x96\n\x0f\x89\x15\n\x0d\xdf\x0d&\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5\x01\xfe\xed\x10\x17\xfe\xed\n\x0f\x0f\n\x01\x13\x17\x10\x01\x13\x10\x00\x00\x00\x00\x02\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x19\x009\x00\x00\x13!2\x16\x17\x13\x16\x15\x11\x14\x06\x07\x06#!\"&'&5\x1347\x13>\x01\x05!\"\x06\x07\x03\x06\x16;\x012\x16\x1f\x01\x1e\x01;\x0126?\x01>\x01;\x0126'\x03.\x01\xe1\x02\xee\n\x13\x03\xba\x07\x08\x05\x0c\x19\xfb\xb4\x0c\x1e\x02\x06\x01\x07\xb9\x03\x13\x02\x97\xfd\xd4\n\x12\x02W\x02\x0c\n\x96\n\x13\x02&\x02\x13\n\xfa\n\x13\x02&\x02\x13\n\x96\n\x0c\x02W\x02\x12\x04\xb0\x0e\n\xfdt\x18\x19\xfeW\x0c\x1e\x02\x06\x08\x04\x0d\x19\x01\xa9\x19\x18\x02\x8c\n\x0e\xc8\x0e\x0b\xfe>\x0b\x0e\x0e\n\x98\n\x0e\x0e\n\x98\n\x0e\x0e\x0b\x01\xc2\x0b\x0e\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x00'\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x05\x17\x16\x14\x0f\x01\x06&5\x1146\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\xfe\x8b\xfd\x11\x11\xfd\x10\x18\x18\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5]\xbe\x0c$\x0c\xbe\x0c\x0b\x15\x01\x90\x15\x0b\x00\x01\x00\x17\x00\x17\x04\x99\x04\xb0\x00(\x00\x00\x0176\x16\x15\x11\x14\x06#!\"&?\x01&#\"\x0e\x01\x14\x1e\x012>\x0153\x14\x0e\x02\".\x024>\x0232\x03\xb3\x87\x07\x0b\x0f\n\xfe\x96\x0b\x04\x07\x85m\x81t\xc5rr\xc5\xe8\xc5r\x96[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6u\xc0\x04$\x87\x07\x04\x0b\xfe\x96\n\x0f\x0b\x07\x85Lr\xc5\xe8\xc5rr\xc5tu\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[\x00\x00\x00\x00\x02\x00\x17\x00\x01\x04\x99\x04\xb0\x00\x1a\x005\x00\x00\x0176\x16\x15\x11\x14\x06#!\"&?\x01&#\"\x0e\x01\x15#4>\x0232\x133\x14\x0e\x02#\"'\x07\x06&5\x11463!2\x16\x0f\x01\x1632>\x01\x03\xb3\x87\x07\x0b\x0f\x0b\xfe\x97\x0b\x04\x07\x86n\x81t\xc5r\x96[\x9b\xd6u\xc0\xeb\x96[\x9b\xd6u\xc0\x9c\x86\x07\x0b\x0f\x0b\x01h\n\x05\x08\x85n\x82t\xc5r\x04$\x87\x07\x04\x0b\xfe\x97\x0b\x0f\x0b\x07\x86Kr\xc5tu\xd6\x9b[\xfd\xbfu\xd6\x9b[v\x86\x08\x05\n\x01h\x0b\x0f\x0b\x07\x85Lr\xc5\x00\x00\x00\n\x00d\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00\x1f\x00/\x00?\x00O\x00_\x00o\x00\x7f\x00\x8f\x00\x9f\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x0532\x16\x1d\x01\x14\x06+\x01\"&=\x01463!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x0732\x16\x1d\x01\x14\x06+\x01\"&=\x01463!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x0732\x16\x1d\x01\x14\x06+\x01\"&=\x01463!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x0732\x16\x1d\x01\x14\x06+\x01\"&=\x01463!2\x16\x1d\x01\x14\x06#!\"&=\x0146}\x04\x1a\n\x0f\x0f\n\xfb\xe6\n\x0f\x0f\x03\xc0\xfc\xae\n\x0f\x0f\n\x03R\n\x0f\x0f\xfd\x082\n\x0f\x0f\n2\n\x0f\x0f\xd2\x01\xc2\n\x0f\x0f\n\xfe>\n\x0f\x0f\xbe2\n\x0f\x0f\n2\n\x0f\x0f\xd2\x01\xc2\n\x0f\x0f\n\xfe>\n\x0f\x0f\xbe2\n\x0f\x0f\n2\n\x0f\x0f\xd2\x01\xc2\n\x0f\x0f\n\xfe>\n\x0f\x0f\xbe2\n\x0f\x0f\n2\n\x0f\x0f\xd2\x01\xc2\n\x0f\x0f\n\xfe>\n\x0f\x0f\x04\xb0\x0f\n\xfb\x82\n\x0f\x0f\n\x04~\n\x0f\xc8\x0f\n\xfc\xae\n\x0f\x0f\n\x03R\n\x0fd\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\xc8\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\xc8\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\xc8\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\x0f\n2\n\x0f\x00\x00\x00\x00\x02\x00\x00\x00\x00\x04L\x04\xb0\x00\x19\x00#\x00\x00\x0154&#!\"\x06\x1d\x01#\"\x06\x15\x11\x14\x163!265\x114&#!546;\x012\x16\x1d\x01\x03\x84uS\xfe\xd4Rvd);;)\x03\x84);;)\xfd\xa8\x1e\x14\xc8\x14\x1e\x03 \xc8SuvR\xc8;)\xfd\xa8);;)\x02X);\x96\x15\x1d\x1d\x15\x96\x00\x02\x00d\x00\x00\x04L\x04L\x00 \x007\x00\x00\x1332\x16\x15\x11#\x1146\x052\x17\x16\x15\x11\x14\x07\x0e\x03#\".\x01'.\x02#\"\x07\x06#\"'&5\x11>\x0176\x1e\x03\x17\x1e\x023276}2\n\x0fd\x0f\x03\xc0\x04\x05\x10\x03!C@1\x1c\x1a?*'),G\x1eUK\x07\x08\x06\x05\x0e\x16x;\x17(.\x139\x04)-E\x1dgP\x07\x04L\x0f\n\xfb\xcd\x043\n\x0f0\x02\x06\x11\xfe[\x06\x06;P$\x0d\x0d\x0e\x0e\x0f\x0f\x0f9\x05\x03\x07\x0f\x01\xb67W\x03\x02\x03\x0b\x06\x14\x01\x0e\x0e\x0eW\x08\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x04\x97\x00!\x001\x00A\x00\x00\x002\x1e\x02\x15\x11\x14\x06+\x01\"&5\x114.\x01 \x0e\x01\x15\x11\x14\x06+\x01\"&5\x114>\x01\x0332\x16\x15\x11\x14\x06+\x01\"&5\x1146!32\x16\x15\x11\x14\x06+\x01\"&5\x1146\x01\xe4\xe8\xde\xa3c\x0f\n2\n\x0f\x8d\xe4\xfe\xfa\xe4\x8d\x0f\n2\n\x0fc\xa3*\xa0\x08\x0c\x0c\x08\xa0\x08\x0c\x0c\x02`\xa0\x08\x0c\x0c\x08\xa0\x08\x0c\x0c\x04\x97c\xa3\xdet\xfe\xd4\n\x0f\x0f\n\x01,\x7f\xd1rr\xd1\x7f\xfe\xd4\n\x0f\x0f\n\x01,t\xde\xa3\xfd\xc0\x0c\x08\xfe4\x08\x0c\x0c\x08\x01\xcc\x08\x0c\x0c\x08\xfe4\x08\x0c\x0c\x08\x01\xcc\x08\x0c\x00\x00\x00\x00\x02\x00\x00\x00\xd3\x04G\x03\xdd\x00\x15\x009\x00\x00\x01%6\x16\x15\x11\x14\x06'%&+\x01\"&5\x1146;\x012\x05762\x1f\x01\x16\x14\x0f\x01\x17\x16\x14\x0f\x01\x06\"/\x01\x07\x06\"/\x01&4?\x01'&4?\x0162\x17\x01A\x01\x02 \x0c\x0c \xfe\xfe\x15\x19\xfa\n\x0f\x0f\n\xfa\x19\x02Xx\x07\x14\x07\"\x07\x07xx\x07\x07\"\x07\x14\x07xx\x07\x14\x07\"\x07\x07ww\x07\x07\"\x07\x14\x07\x03.\xac\x06\x07\n\xfd\x12\n\x07\x06\xac\x0e\x0f\n\x01^\n\x0f\x84x\x07\x07\"\x07\x14\x07xx\x07\x14\x07\"\x07\x07ww\x07\x07\"\x07\x14\x07xx\x07\x14\x07\"\x08\x08\x00\x00\x00\x00\x02\x00\x00\x00\xd3\x03r\x03\xdd\x00\x15\x00/\x00\x00\x01%6\x16\x15\x11\x14\x06'%&+\x01\"&5\x1146;\x012%3\x16\x17\x16\x15\x14\x07\x06\x0f\x01\"/\x01.\x017654'&6?\x016\x01A\x01\x02 \x0c\x0c \xfe\xfe\x15\x19\xfa\n\x0f\x0f\n\xfa\x19\x01\xd2\x04\n\x06`Z\x06\x0b\x03 \x07\x1d\x07\x03\x06HN\x06\x03\x08\x1d\x07\x03.\xac\x06\x07\n\xfd\x12\n\x07\x06\xac\x0e\x0f\n\x01^\n\x0fd\x01 \x81\xa1\x9a\x7f \x01\x01\x06\x17\x07\x13\x08g~\x84j\x08\x14\x07\x16\x05\x00\x00\x00\x00\x03\x00\x00\x00\xc4\x04b\x03\xec\x00\x1b\x001\x00K\x00\x00\x013\x16\x17\x16\x15\x14\x06\x07\x06\x07#\"/\x01.\x017654&'&6?\x016\x05%6\x16\x15\x11\x14\x06'%&+\x01\"&5\x1146;\x012%3\x16\x17\x16\x15\x14\x07\x06\x0f\x01\"/\x01.\x017654'&6?\x016\x03\xc7\x03\x0b\x06\x87D@\x07\n\x03 \x07*\x08\x02\x06o;7\x06\x02 *\x07\xfd\x82\x01\x02 \x0c\x0c \xfe\xfe\x15\x19\xfa\n\x0f\x0f\n\xfa\x19\x01\xd2\x04\n\x06`Z\x06\x0b\x03 \x07\x1d\x07\x03\x06HN\x06\x03\x08\x1d\x07\x03\xec\x01 \xb3\xd9i\xcbT \x01\x06\"\x06\x14\x08\x96\xb2Z\xacG \x14\x06!\x05\xbe\xac\x06\x07\n\xfd\x12\n\x07\x06\xac\x0e\x0f\n\x01^\n\x0fd\x01 \x81\xa1\x9a\x7f \x01\x01\x06\x17\x07\x13\x08g~\x84j\x08\x15\x06\x16\x05\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x04\xb0\x04\xb0\x00 \x00\x15\x00\x19\x00\x1d\x00!\x00%\x00-\x00;\x00?\x00C\x00G\x00K\x00O\x00\x00\x013\x15#\x15!\x15#\x11!\x01#\x153\x15!\x11#\x11#5!\x05\x11!\x11!\x11!\x11\x05#53\x05#53\x013\x11!\x11353\x013\x15#\x15#5#535#5!\x05\x11!\x11\x07#53\x05#53\x01#53\x05!5!\x01\xf4dd\xfepd\x01\xf4\x02\xbc\xc8\xc8\xfe\xd4\xc8d\x02X\xfb\xb4\x01,\x01\x90\x01,\xfc\xe0dd\x02\xbcdd\xfdD\xc8\xfe\x0c\xc8d\x01\xf4\xc8d\xc8dd\xc8\x01,\xfdD\x01,ddd\x03\x84dd\xfe\x0cdd\x01\xf4\xfe\xd4\x01,\x03\x84ddd\x02X\xfe\x0cd\xc8\x01,\x01,\xc8d\xfe\xd4\x01,\xfe\xd4\x01,\xc8ddd\xfe\x0c\xfe\x0c\x01\xf4d\xfe\xd4dddd\xc8d\xc8\xfe\xd4\x01,\xc8ddd\xfe\xd4ddd\x00\x00\x00\x00 \x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x03\x00\x07\x00\x0b\x00\x0f\x00\x13\x00\x17\x00\x1b\x00\x1f\x00#\x00\x007#\x113\x13#\x113\x01#\x113\x13#\x113\x01#\x113\x01!5!\x17#53\x17#53\x05#53ddd\xc8dd\x01\x90\xc8\xc8\xc8dd\x01,\xc8\xc8\xfc\xe0\xfe\xd4\x01,\xc8dd\xc8dd\x01,\xc8\xc8\xc8\x03\xe8\xfc\x18\x03\xe8\xfc\x18\x03\xe8\xfc\x18\x03\xe8\xfc\x18\x03\xe8\xfbPdd[[[[[\x00\x00\x02\x00\x00\x00\n\x04\xa6\x04\xb0\x00\x0d\x00\x15\x00\x00 \x01\x16\x14\x07\x01\x06\"'\x01\x13463\x04&\"\x06\x14\x1626\x01\xf4\x02\xaa\x08\x08\xfe0\x08\x14\x08\xfdV\x01\x0f\n\x01C;S;;S;\x04\xb0\xfdV\x08\x14\x08\xfe0\x08\x08\x02\xaa\x01\xdb\n\x0f\xcd;;T;;\x00\x00\x00\x00\x03\x00\x00\x00\n\x05\xd2\x04\xb0\x00\x0d\x00\x19\x00!\x00\x00 \x01\x16\x14\x07\x01\x06\"'\x01\x13463!\x01\x16\x14\x07\x01\x06\"/\x01 \x01\x04&\"\x06\x14\x1626\x01\xf4\x02\xaa\x08\x08\xfe0\x08\x14\x08\xfdV\x01\x0f\n\x03\x06\x02\xaa\x08\x08\xfe0\x08\x14\x088\x01\xa8\xfdD\xfe\xd3;S;;S;\x04\xb0\xfdV\x08\x14\x08\xfe0\x08\x08\x02\xaa\x01\xdb\n\x0f\xfdV\x08\x14\x08\xfe0\x08\x088\x01\xaa\x02\xbc\xcd;;T;;\x00\x00\x00\x00\x01\x00d\x00\x00\x04\xb0\x04\xb0\x00&\x00\x00\x01!2\x15\x11\x14\x0f\x01\x06&5\x114&#!\"\x0f\x01\x06\x163!2\x16\x15\x11\x14\x06#!\"&5\x114?\x016\x01,\x039K\x12@\x08\n\x0f\n\xfdD\x19\x12@\x08\x05\n\x02\xbc\n\x0f\x0f\n\xfc\xae\n\x0f\x12\x8b\x12\x04\xb0K\xfc|\x19\x12@\x08\x05\n\x03\xb6\n\x0f\x12@\x08\n\x0f\n\xfcJ\n\x0f\x0f\n\x03\xcf\x19\x12\x8b\x12\x00\x00\x00\x01\x00\xc8\xff\xff\x04L\x04\xb0\x00\n\x00\x00\x13!2\x16\x15\x11 \x01\x1146\xfa\x03 \x15\x1d\xfe>\xfe>\x1d\x04\xb0\x1d\x15\xfb\x82\x01\xbc\xfeC\x04\x7f\x15\x1d\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x15\x00E\x00U\x00\x00\x01!\"\x06\x07\x03\x06\x1f\x01\x1e\x013!26?\x016'\x03.\x01\x01#\"\x06\x0f\x01\x0e\x01#!\"&/\x01.\x01+\x01\"\x06\x15\x11\x14\x16;\x0126=\x01463!2\x16\x1d\x01\x14\x16;\x01265\x114&\x01!\"\x06\x0f\x01\x06\x163!26/\x01.\x01\x036\xfeD\x0b\x10\x01N\x07\x0e9\x06\x17\n\x01\xc2\n\x17\x06>\x0e\x07S\x01\x10\x01V\x96\n\x16\x04N\x04\x16\n\xfd\xda\n\x16\x04N\x04\x16\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x02\xee\n\x0f\x0f\n\x96\n\x0f\x0f\xfe\xb1\xfe\x0c\n\x13\x02&\x02\x0b\n\x02X\n\x0b\x02&\x02\x13\x04\xb0\x0f\n\xfe\xd3\x18\x15l \x0c\x0c l\x15\x18\x01-\n\x0f\xfep\x0d \x9c \x0d\x0d \x9c \x0d\x0f\n\xfdv\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x02\x8a\n\x0f\xfd\xa8\x0e\n\x98\n\x0e\x0e\n\x98\n\x0e\x00\x00\x00\x00\x04\x00\x00\x00d\x04\xb0\x04L\x00\x1d\x00!\x00)\x001\x00\x00\x0132\x1e\x02\x1f\x0132\x16\x15\x11\x14\x06#!\"&5\x1146;\x01>\x04\x01\x1535\x04\"\x06\x14\x16264$2\x16\x14\x06\"&4\x01\xf4\xc88]4$\x07\x06\x96);;)\xfc\x18);;)\x96\x02 '3]\x01\xc8d\xfe\xcf\xbe\x87\x87\xbe\x87\xfe\xefV<\x01?\x01\x01\x03!\x03\x02\xa9\x01\x81\x14(\x12% \n\xfe_5,\x11R\xfey:\"\x0b *2\x1e\xfe\x938\x1c\x0c\x1a\x07\x07\x01\x8f\xac\x01T\xa2\x04\xaf\xfc\x1a2*\x13\x15\x01\x02BBW-\xde\x91Y\".\x1c\x0cBB\x18\x1c\x0c%\x0d\x0d\x03\xee\xfdZ\x01\xc9\x00\x00\x00\x00\x03\x00d\x00\x00\x03\xf0\x04\xb0\x00'\x002\x00;\x00\x00\x01\x1e\x06\x15\x14\x0e\x03#!5>\x015\x114.\x04'5\x052\x1e\x02\x15\x14\x0e\x02\x07%32654.\x02+\x01\x1132654&+\x01\x02\xf1\x05\x1350;*\x1d7Xml0\xfe\x0c);\x01\x06\x0b\x17!\x1a\x01\xd79uc>\x1f--\x10\xfe\x8f\x8bNi\x11*S>v\xd8PR}^\x9f\x02\x81\x01\x07\x18\x1d3:R.CuN7\x1aY\x073(\x03;\x18\x14\x1c\x0b\x10 \x07G\x01)IsC3[:+ 1aJ);4\x1b\xfc\xaeePZ\x81\x00\x00\x01\x00\xc8\x00\x00\x03o\x04\xb0\x00\x19\x00\x00\x01\x17\x0e\x01\x07\x03\x06\x16\x17\x15!567\x1364.\x04'&'5\x03m\x02SB\x07\x84 ,J\xfe\x0c\xba\x0e\xad\x03\x03\x0f\x0c\x1f\x15\x17\x0d\x06\x04\xb09\x135(\xfc\xb91(\x06aa\x10R\x03@\x11\x1a\x13\x10 \x0b\x06\x07\x03\x029\x00\x00\x00\x00\x02\xff\xb5\x00\x00\x05\x14\x04\xb0\x00%\x00/\x00\x00\x01#4.\x05+\x01\x11\x14\x16\x1f\x01\x15!52>\x035\x11#\"\x0e\x05\x15#\x11!\x05#\x113\x07'3\x11#7\x05\x142\x08\x0b\x19\x13&\x18\x19\xc82\x19\x19\xfep\x04\x0e\"\x1a\x16\xc8\x19\x18&\x13\x19\x0b\x082\x03\xe8\xfb\x9bKK}}KK}\x03\x84\x15 \x15\x0e\x08\x03\x01\xfc\xae\x16\x19\x01\x02dd\x01\x05 \x15\x0e\x03R\x01\x03\x08\x0e\x15 \x15\x01,\xc8\xfc\xe0\xa7\xa7\x03 \xa7\x00\x02\x00!\xff\xb5\x04\x8f\x04\xb0\x00%\x00/\x00\x00\x01#4.\x05+\x01\x11\x14\x16\x1f\x01\x15!52>\x035\x11#\"\x0e\x05\x15#\x11!\x13\x075!\x15'7\x15!5\x04L2\x08\x0b\x19\x13&\x18\x19\xc82\x19\x19\xfep\x04\x0e\"\x1a\x16\xc8\x19\x18&\x13\x19\x0b\x082\x03\xe8C\xa7\xfc\xe0\xa7\xa7\x03 \x03\x84\x15 \x15\x0e\x08\x03\x01\xfdv\x16\x19\x01\x02dd\x01\x05 \x15\x0e\x02\x8a\x01\x03\x08\x0e\x15 \x15\x01,\xfb\x82}KK}}KK\x00\x04\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x01462\x02X\x15\x1d\x1d\x15\xfd\xa8\x15\x1d\x1d\x15\x03\xe8\x15\x1d\x1d\x15\xfc\x18\x15\x1d\x1d\x15\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\x15\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x04\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x03!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x03!2\x16\x1d\x01\x14\x06#!\"&=\x0146\xfa\x02\xbc\x15\x1d\x1d\x15\xfdD\x15\x1d\x1d\xb3\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\xdd\x02\xbc\x15\x1d\x1d\x15\xfdD\x15\x1d\x1d\xb3\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x04\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x015463!2\x16\x1d\x01\x14\x06#!\"&\x015463!2\x16\x1d\x01\x14\x06#!\"&\x135463!2\x16\x1d\x01\x14\x06#!\"&\x015463!2\x16\x1d\x01\x14\x06#!\"&\x01\xf4\x1d\x15\x02X\x15\x1d\x1d\x15\xfd\xa8\x15\x1d\xfep\x1d\x15\x03\xe8\x15\x1d\x1d\x15\xfc\x18\x15\x1d\xc8\x1d\x15\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\xfe\xd4\x1d\x15\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x03\xb6d\x15\x1d\x1d\x15d\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x01462\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x15\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x15\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x15\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x08\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x1f\x00/\x00?\x00O\x00_\x00o\x00\x7f\x00\x00\x1332\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146)\x012\x16\x1d\x01\x14\x06#!\"&=\x01462d\x15\x1d\x1d\x15d\x15\x1d\x1d\x01A\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\x01A\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\x01A\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\xfe\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\x01A\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x00\x06\xff\x9c\x00\x00\x04\xb0\x04L\x00\x03\x00\x13\x00#\x00*\x00:\x00J\x00\x00!#\x11;\x022\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x05\x075#535\x05!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x01\x90dd\x96d\x15\x1d\x1d\x15d\x15\x1d\x1d\x15\x01\xf4\x15\x1d\x1d\x15\xfe\x0c\x15\x1d\x1d\xfe\xfa\xa7\xc8\xc8\x01\xc2\x01,\x15\x1d\x1d\x15\xfe\xd4\x15\x1d\x1d\x15\x02X\x15\x1d\x1d\x15\xfd\xa8\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfa}KdK\xaf\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x00\x00\x06\x00\x00\x00\x00\x05\x14\x04L\x00\x0f\x00\x13\x00#\x00*\x00:\x00J\x00\x00\x1332\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x01#\x113\x01!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x053\x15#\x15'7\x05!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x01462d\x15\x1d\x1d\x15d\x15\x1d\x1d\x03gdd\xfc\xae\x01\xf4\x15\x1d\x1d\x15\xfe\x0c\x15\x1d\x1d\x04/\xc8\xc8\xa7\xa7\xfb\xe6\x01,\x15\x1d\x1d\x15\xfe\xd4\x15\x1d\x1d\x15\x02X\x15\x1d\x1d\x15\xfd\xa8\x15\x1d\x1d\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfb\xb4\x04L\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xc8dK}}\xaf\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfe\xd4\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x00\x00\x00\x02\x00\x00\x00\xc8\x04\xb0\x03\xe8\x00\x0f\x00\x12\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146 \x02K\x02\xee\x1f,,\x1f\xfd\x12\x1f,,\x04\x84\xfe\xd4\x01,\x03\xe8,\x1f\xfdv\x1f,,\x1f\x02\x8a\x1f,\xfdD\x01,\x01,\x00\x03\x00\x00\x00\x00\x04\xb0\x04L\x00\x0f\x00\x17\x00\x1f\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x117\x05'\x01\x13\x11\x042\x16\x14\x06\"&4,\x04X\x12\x1a\x1a\x12\xfb\xa8\x12\x1a\x1aJ\xf7\x01*J\x01%\xec\xfc\xd2pNNpN\x04L\x1a\x12\xfc\x0c\x12\x1a\x1a\x12\x03\xf4\x12\x1ad\xfd\x1f\xb6\x83\x9c\x01>\xfe\xe0\x01\xf4tNoOOo\x00\x00\x00\x00\x02\x00\xdb\x00\x05\x046\x04\x91\x00\x16\x00\x1e\x00\x00\x012\x1e\x01\x15\x14\x07\x0e\x01\x0f\x01.\x04'&54>\x02\x16\"\x06\x14\x16264\x02\x88u\xc6sFE\xb266 !^Xm)\x08!fh\x98H\x84uX\xa3yH\xc3\x82\xb8\x81\x81\xb8\x00\x00\x00\x02\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x17\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x01\x11\"\x0e\x01\x14\x1e\x01\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01Kt\xc5rr\xc5\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xfco\x03Vr\xc5\xe8\xc5r\x00\x00\x02\x00u\x00\x03\x03\xdf\x05\x0f\x00\x1a\x005\x00\x00\x01\x1e\x06\x15\x14\x0e\x03\x07.\x0354>\x05\x03\x0e\x02\x17\x1e\x04\x17\x166?\x016&'.\x02'&76#&\x02*\x15IOWN>%3Vp}?T\x9b|J$?LWPI\xbc\x17)(\x03\x01\x1b!1\x1c\x13\x15\x16\x02\x06\x02 \x05\x12)\x0c\x1a \x02\x08\x08\x05\x0fH\x8fuwsu\x87EG\x80^F&\x04\x05:c\x97YE\x87vsxv\x90\xfd\xfe!K\x82:%A'#\x0e\x08\x07\x0c\x10\"\n\x18\x07\x04\x10A)Y\xb6\x0b\x01\x00\x00\x00\x03\x00\x00\x00\x00\x04\xcb\x04l\x00\x0c\x00*\x00/\x00\x00\x017>\x02\x1e\x01\x17\x1e\x01\x0f\x02%!2\x17\x07!\"\x06\x15\x11\x14\x163!26=\x017\x11\x14\x06#!\"&5\x1146 \x01\x077\x01\x03\xe8l\x02\x06\x14\x15\x1d\x0e\x16\n\x05\x06l\xfd\x05\x01\x9027\xbb\xfe\x90);;)\x01\xf4);\xc8\xbb\xa3\xfep\xa5\xb9\xb9\x038\xfe\x96\xa17\x01c\x03\xf5s\x01\x02\x02\x04\x0f\x0e\x16*\x0b\ns\xc8\x0d\xbb;)\xfe\x0c);;)\xb6\xc8\xfe\xb4\xa5\xb9\xb9\xa5\x01\x90\xa5\xb9\xfe\xd7\xfe\x962\xaa\x01c\x00\x02\x00\x00\x00\x00\x04\x93\x04L\x00\x1b\x006\x00\x00\x01!\x06\x07#\"\x06\x15\x11\x14\x163!2657\x15\x14\x06#!\"&5\x1146\x05\x01\x16\x14\x07\x01\x06&'5&\x0e\x03\x07>\x0675>\x01\x01^\x01i\xa44\xc3);;)\x01\xf4);\xc8\xb9\xa5\xfep\xa5\xb9\xb9\x02\x7f\x01S\x08\x08\xfe\xac\x07\x0b\x01\x1a9dTX\x1a\n.9I@F*\x13\x01\x0b\x04L\x926;)\xfe\x0c);;)\x99g\xa5\xb9\xb9\xa5\x01\x90\xa5\xb9\x1b\xfe\xd3\x07\x15\x07\xfe\xce\x06\x04 \xcb\x01\x02\x0d\x160!;bA4\x1d\x14\x07\x01\xd2\n\x05\x00\x00\x00\x00\x02\x00\x00\x00\x00\x04\x9d\x04L\x00\x1d\x005\x00\x00\x01!2\x17\x07!\"\x06\x15\x11\x14\x163!26=\x017\x15\x14\x06#!\"&5\x1146 \x0162\x1f\x01\x16\x14\x07\x01\x06\"/\x01&4?\x0162\x1f\x01\x162\x01^\x01^\x01\x08\x08\x14\x08\x01\x04\x07\x05\n\xaa\xc8\n\x08\x01\x08\x08\x08\xfe\xf8\x08\n\xc8\xaa\n\x05\x07\xfe\xfc\x08\x14\x08\xfe\xf8\x08\x05\n\xaf\xc8\n\x08\xfe\xf8\x08\x08\x01\x08\x08\n\xc8\xaf\n\x05\x03\x96\x01\x08\x08\x08\xfe\xf8\x08\n\xc8\xad\n\x04\x07\xfe\xfc\x07\x15\x07\xfe\xf7\x07\x04\n\xad\xc8\n\x08\xfe\xf8\x08\x08\x01\x08\x08\n\xc8\xad\n\x04\x07\x01 \x07\x15\x07\x01\x04\x07\x04\n\xad\xc8\n\x00\x01\x00\xc8\x00\x00\x03\x84\x04L\x00\x19\x00\x00\x1332\x16\x15\x11\x016\x16\x15\x11\x14\x06'\x01\x11\x14\x06+\x01\"&5\x1146\xfad\x15\x1d\x01\xd0\x0f\x15\x15\x0f\xfe0\x1d\x15d\x15\x1d\x1d\x04L\x1d\x15\xfeJ\x01\xc5\x0e\x08\x15\xfc\x18\x15\x08\x0e\x01\xc5\xfeJ\x15\x1d\x1d\x15\x03\xe8\x15\x1d\x00\x00\x00\x01\x00\x00\x00\x00\x04\xb0\x04L\x00#\x00\x00\x1332\x16\x15\x11\x016\x16\x15\x11\x016\x16\x15\x11\x14\x06'\x01\x11\x14\x06'\x01\x11\x14\x06+\x01\"&5\x11462d\x15\x1d\x01\xd0\x0f\x15\x01\xd0\x0f\x15\x15\x0f\xfe0\x15\x0f\xfe0\x1d\x15d\x15\x1d\x1d\x04L\x1d\x15\xfeJ\x01\xc5\x0e\x08\x15\xfeJ\x01\xc5\x0e\x08\x15\xfc\x18\x15\x08\x0e\x01\xc5\xfeJ\x15\x08\x0e\x01\xc5\xfeJ\x15\x1d\x1d\x15\x03\xe8\x15\x1d\x00\x00\x00\x01\x00\x9d\x00\x19\x04\xb0\x043\x00\x15\x00\x00\x01\x11\x14\x06'\x01\x11\x14\x06'\x01&47\x016\x16\x15\x11\x016\x16\x04\xb0\x15\x0f\xfe0\x15\x0f\xfe\x14\x0f\x0f\x01\xec\x0f\x15\x01\xd0\x0f\x15\x04\x1a\xfc\x18\x15\x08\x0e\x01\xc5\xfeJ\x15\x08\x0e\x01\xe0\x0e*\x0e\x01\xe0\x0e\x08\x15\xfeJ\x01\xc5\x0e\x08\x00\x00\x00\x01\x00\xc8\x00\x16\x043\x046\x00\x0b\x00\x00\x13\x01\x16\x14\x07\x01\x06&5\x1146\xf3\x03.\x12\x12\xfc\xd2\x12\x19\x19\x042\xfe\x0e\x0b\x1e\x0b\xfe\x0e\x0b\x0e\x15\x03\xe8\x15\x0e\x00\x02\x00\xc8\x00d\x03\x84\x03\xe8\x00\x0f\x00\x1f\x00\x00\x1332\x16\x15\x11\x14\x06+\x01\"&5\x1146!32\x16\x15\x11\x14\x06+\x01\"&5\x1146\xfa\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x01\xa5\xc8\x15\x1d\x1d\x15\xc8\x15\x1d\x1d\x03\xe8\x1d\x15\xfc\xe0\x15\x1d\x1d\x15\x03 \x15\x1d\x1d\x15\xfc\xe0\x15\x1d\x1d\x15\x03 \x15\x1d\x00\x00\x01\x00\xc8\x00d\x04L\x03\xe8\x00\x0f\x00\x00\x01\x11\x14\x06#!\"&5\x11463!2\x16\x04L\x1d\x15\xfc\xe0\x15\x1d\x1d\x15\x03 \x15\x1d\x03\xb6\xfc\xe0\x15\x1d\x1d\x15\x03 \x15\x1d\x1d\x00\x00\x00\x00\x01\x00\x00\x00\x19\x04\x13\x043\x00\x15\x00\x00\x01\x1146\x17\x01\x16\x14\x07\x01\x06&5\x11\x01\x06&5\x1146\x17\x01\xf4\x15\x0f\x01\xec\x0f\x0f\xfe\x14\x0f\x15\xfe0\x0f\x15\x15\x0f\x02d\x01\xb6\x15\x08\x0e\xfe \x0e*\x0e\xfe \x0e\x08\x15\x01\xb6\xfe;\x0e\x08\x15\x03\xe8\x15\x08\x0e\x00\x00\x01\xff\xfe\x00\x02\x04\xb3\x04O\x00#\x00\x00\x0172\x16\x15\x13\x14\x06#\x07\"&5\x03\x01\x06&5\x03\x01\x06&5\x0346\x17\x01\x0346\x17\x01\x0346\x04\x18d\x14\x1e\x05\x1d\x15d\x15\x1d\x02\xfe1\x0e\x15\x02\xfe2\x0f\x15\x05\x15\x0f\x01\xd2\x02\x15\x0f\x01\xd2\x02\x1d\x04N\x01\x1d\x15\xfc\x18\x15\x1d\x01\x1d\x15\x01\xb5\xfe:\x0f \x15\x01\xb5\xfe9\x0e \x14\x03\xe8\x15 \x0f\xfe>\x01\xb6\x14 \x0e\xfe=\x01\xb6\x15\x1d\x00\x00\x01\x01,\x00\x00\x03\xe8\x04L\x00\x19\x00\x00\x0132\x16\x15\x11\x14\x06+\x01\"&5\x11\x01\x06&5\x1146\x17\x01\x1146\x03Rd\x15\x1d\x1d\x15d\x15\x1d\xfe0\x0f\x15\x15\x0f\x01\xd0\x1d\x04L\x1d\x15\xfc\x18\x15\x1d\x1d\x15\x01\xb6\xfe;\x0e\x08\x15\x03\xe8\x15\x08\x0e\xfe;\x01\xb6\x15\x1d\x00\x00\x02\x00d\x00\xc8\x04\xb0\x04H\x00\x0b\x00\x1b\x00\x00 \x01\x16\x06#!\"&7\x0162\x01!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x02\xae\x01\xf5\x0f \x16\xfb\xee\x16 \x0f\x01\xf5\x0f*\xfd\xf7\x03\xe8\x15\x1d\x1d\x15\xfc\x18\x15\x1d\x1d\x049\xfd\xe4\x0f\x16\x16\x0f\x02\x1c\x0f\xfdH\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x01\x00\x88\xff\xfc\x03u\x04J\x00\x05\x00\x00 \x02\x07 \x01\x03u\xfe\xa0\x01`\xc5\xfd\xd8\x02(\x03\x84\xfe\x9f\xfe\x9f\xc6\x02(\x02&\x00\x00\x00\x00\x01\x01;\xff\xfc\x04(\x04J\x00\x05\x00\x00 \x01' \x017\x04(\xfd\xd9\xc6\x01a\xfe\x9f\xc6\x02#\xfd\xd9\xc6\x01a\x01a\xc6\x00\x02\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x003\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05#\"\x06\x1d\x01#\"\x06\x1d\x01\x14\x16;\x01\x15\x14\x16;\x0126=\x01326=\x014&+\x0154&\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01}d\x15\x1d\x96\x15\x1d\x1d\x15\x96\x1d\x15d\x15\x1d\x96\x15\x1d\x1d\x15\x96\x1d\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xba\x1d\x15\x96\x1d\x15d\x15\x1d\x96\x15\x1d\x1d\x15\x96\x1d\x15d\x15\x1d\x96\x15\x1d\x00\x00\x00\x00\x02\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1f\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x01!\"\x06\x1d\x01\x14\x163!26=\x014&\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x02E\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1d\x1d\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xfe~\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x02\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x003\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0f\x01'&\"\x0f\x01\x06\x14\x1f\x01\x07\x06\x14\x1f\x01\x162?\x01\x17\x162?\x0164/\x01764/\x01\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xe5\x19 xx \x19 \x8d xx \x8d \x19 xx \x19 \x8d xx \x8d\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xad xx \x8d \x19 xx \x19 \x8d xx \x8d \x19 xx \x19 \x8d\x00\x02\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00$\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x01'&\"\x0f\x01\x06\x14\x1f\x01\x1627\x0164/\x01&\"\x07\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\x15T\x07\x15\x08\x8b\x07\x07\xf2\x07\x15\x07\x01w\x07\x07\x8b\x07\x15\x07\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xfe1U\x07\x07\x8b\x08\x14\x08\xf1\x08\x08\x01w\x07\x15\x08\x8b\x07\x07\x00\x00\x00\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00;\x00K\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05\"\x0e\x03\x15\x14;\x01\x167>\x0132\x16\x15\x14\x06\x07\"\x0e\x05\x07\x06\x16;\x012>\x0354.\x03\x13#\"\x06\x1d\x01\x14\x16;\x0126=\x014&\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01?\x1d\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05#\"\x06\x1d\x01\x14\x16;\x0126=\x014&\x03#\"\x06\x1d\x01\x14\x16;\x01\x15#\"\x06\x1d\x01\x14\x163!26=\x014&+\x01\x114&\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\x96\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\xfa\n\x0f\x0f\nKK\n\x0f\x0f\n\x01^\n\x0f\x0f\nK\x0f\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9bV\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfe\xd4\x0f\n2\n\x0f\xc8\x0f\n2\n\x0f\x0f\n2\n\x0f\x01\x13\n\x0f\x00\x02\x00\x00\x00\x00\x04\xb0\x04\xb0\x00/\x00_\x00\x00\x0132\x16\x1d\x01\x1e\x01\x1732\x16\x1d\x01\x14\x06+\x01\x0e\x01\x07\x15\x14\x06+\x01\"&=\x01.\x01'#\"&=\x0146;\x01>\x017546\x13\x15\x14\x06+\x01\"&=\x01\x0e\x01\x0732\x16\x1d\x01\x14\x06+\x01\x1e\x01\x17546;\x012\x16\x1d\x01>\x017#\"&=\x0146;\x01.\x01\x02\x0d\x96\n\x0fg\x97\x1b\xc2\n\x0f\x0f\n\xc2\x1b\x97g\x0f\n\x96\n\x0fg\x97\x1b\xc2\n\x0f\x0f\n\xc2\x1b\x97g\x0f\xb9\x0f\n\x96\n\x0fDf\x17\xa8\n\x0f\x0f\n\xa8\x17fD\x0f\n\x96\n\x0fDf\x17\xa8\n\x0f\x0f\n\xa8\x17f\x04\xb0\x0f\n\xc2\x1b\x97g\x0f\n\x96\n\x0fg\x97\x1b\xc2\n\x0f\x0f\n\xc2\x1b\x97g\x0f\n\x96\n\x0fg\x97\x1b\xc2\n\x0f\xfe\xcd\xa8\n\x0f\x0f\n\xa8\x17fD\x0f\n\x96\n\x0fDf\x17\xa8\n\x0f\x0f\n\xa8\x17fD\x0f\n\x96\n\x0fDf\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x00?\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x07\x17\x16\x14\x0f\x01\x17\x16\x14\x0f\x01\x06\"/\x01\x07\x06\"/\x01&4?\x01'&4?\x0162\x1f\x01762\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\x9a@\x07\x07||\x07\x07@\x07\x15\x07||\x07\x15\x07@\x07\x07||\x07\x07@\x07\x15\x07||\x07\x15\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5Z@\x07\x15\x07||\x07\x15\x07@\x07\x07||\x07\x07@\x07\x15\x07||\x07\x15\x07@\x07\x07||\x07\x00\x00\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1b\x000\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x07\x17\x16\x14\x07\x01\x06\"/\x01&4?\x0162\x1f\x01762\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xbf\xe8\xc5rr\xc5\xe8\xc5rr\x83j\x07\x07\xfe\xc0\x08\x14\x08\xca\x08\x08j\x07\x15\x07O\xc5\x07\x15\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5\xe8\xc5rr\xc5\xe8\xc5}j\x07\x15\x07\xfe\xbf\x07\x07\xcb\x07\x15\x07j\x08\x08O\xc5\x07\x00\x00\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x18\x00!\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05\"\x0e\x01\x15\x14\x17\x01&\x17\x01\x1632>\x0154\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01Kt\xc5rA\x02Ki\xf5\xfd\xb8hst\xc5r\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b;r\xc5txi\x02KA\xd0\xfd\xb8>r\xc5ts\x00\x00\x00\x00\x01\x00\x17\x00S\x04\xb0\x03\xf9\x00\x15\x00\x00\x13\x016\x16\x15\x11!2\x16\x1d\x01\x14\x06#!\x11\x14\x06'\x01&4'\x02\n\x10\x17\x02&\x15\x1d\x1d\x15\xfd\xda\x17\x10\xfd\xf6\x10\x02F\x01\xab\x0d\n\x15\xfe\xdd\x1d\x15\xc8\x15\x1d\xfe\xdd\x15\n\x0d\x01\xab\x0d&\x00\x00\x00\x00\x01\x00\x00\x00S\x04\x99\x03\xf9\x00\x15\x00\x00 \x01\x16\x14\x07\x01\x06&5\x11!\"&=\x01463!\x1146\x02\x7f\x02\n\x10\x10\xfd\xf6\x10\x17\xfd\xda\x15\x1d\x1d\x15\x02&\x17\x03\xf1\xfeU\x0d&\x0d\xfeU\x0d\n\x15\x01#\x1d\x15\xc8\x15\x1d\x01#\x15\n\x00\x00\x00\x01\x00\xb7\x00\x00\x04]\x04\x99\x00\x15\x00\x00 \x01\x16\x06#!\x11\x14\x06+\x01\"&5\x11!\"&7\x0162\x02\xaa\x01\xab\x0d\n\x15\xfe\xdd\x1d\x15\xc8\x15\x1d\xfe\xdd\x15\n\x0d\x01\xab\x0d&\x04\x89\xfd\xf6\x10\x17\xfd\xda\x15\x1d\x1d\x15\x02&\x17\x10\x02\n\x10\x00\x00\x00\x01\x00\xb7\x00\x17\x04]\x04\xb0\x00\x15\x00\x00\x0132\x16\x15\x11!2\x16\x07\x01\x06\"'\x01&63!\x1146\x02&\xc8\x15\x1d\x01#\x15\n\x0d\xfeU\x0d&\x0d\xfeU\x0d\n\x15\x01#\x1d\x04\xb0\x1d\x15\xfd\xda\x17\x10\xfd\xf6\x10\x10\x02\n\x10\x17\x02&\x15\x1d\x00\x00\x01\x00\x00\x00\xb7\x04\x99\x04]\x00\x17\x00\x00 \x01\x16\x14\x07\x01\x06&5\x11\x0e\x03\x07>\x047\x1146\x02\x7f\x02\n\x10\x10\xfd\xf6\x10\x17^\xb0\xa5\x81$\x05,[\x87\xc7~\x17\x04U\xfeU\x0d&\x0d\xfeU\x0d\n\x15\x01#\x02$DuMi\xb1\x9dqF\x07\x01\x06\x15\n\x00\x02\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x15\x00+\x00\x00\x01!2\x16\x15\x11\x14\x06/\x01\x07\x06\"/\x01&4?\x01'&6\x01!\"&5\x1146\x1f\x01762\x1f\x01\x16\x14\x0f\x01\x17\x16\x06\x03R\x01,\x15\x1d\x15\x0e^\xf9\x08\x14\x08j\x07\x07\xf9^\x0e\x08\xfe!\xfe\xd4\x15\x1d\x15\x0e^\xf9\x08\x14\x08j\x07\x07\xf9^\x0e\x08\x04\xb0\x1d\x15\xfe\xd4\x15\x08\x0e^\xf9\x07\x07j\x08\x14\x08\xf9^\x0e\x15\xfbP\x1d\x15\x01,\x15\x08\x0e^\xf9\x07\x07j\x08\x14\x08\xf9^\x0e\x15\x00\x00\x00\x02\x00I\x00I\x04g\x04g\x00\x15\x00+\x00\x00\x01\x17\x16\x14\x0f\x01\x17\x16\x06#!\"&5\x1146\x1f\x01762\x01!2\x16\x15\x11\x14\x06/\x01\x07\x06\"/\x01&4?\x01'&6\x03\xf6j\x07\x07\xf9^\x0e\x08\x15\xfe\xd4\x15\x1d\x15\x0e^\xf9\x08\x14\xfd\x0c\x01,\x15\x1d\x15\x0e^\xf9\x08\x14\x08j\x07\x07\xf9^\x0e\x08\x04`j\x08\x14\x08\xf9^\x0e\x15\x1d\x15\x01,\x15\x08\x0e^\xf9\x07\xfd\xf1\x1d\x15\xfe\xd4\x15\x08\x0e^\xf9\x07\x07j\x08\x14\x08\xf9^\x0e\x15\x00\x00\x00\x00\x03\x00\x17\x00\x17\x04\x99\x04\x99\x00\x0f\x00\x1f\x00/\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05#\"\x06\x17\x13\x1e\x01;\x01267\x136&\x03#\"\x06\x1d\x01\x14\x16;\x0126=\x014&\x01\xe3\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b[[\x9b\x01\xb3\xd0\x14\x18\x04:\x04#\x146\x14#\x04:\x04\x181\x96\n\x0f\x0f\n\x96\n\x0f\x0f\x04\x99[\x9b\xd6\xea\xd6\x9b[[\x9b\xd6\xea\xd6\x9b\xba\x1d\x14\xfe\xd2\x14\x1d\x1d\x14\x01.\x14\x1d\xfe\x0c\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x00\x00\x00\x00\x05\x00\x00\x00\x00\x04\xb0\x04\xb0\x00I\x00U\x00a\x00h\x00o\x00\x00\x012\x16\x1f\x01\x16\x1f\x01\x16\x17\x1676?\x0167632\x16\x1f\x01\x16\x1f\x02\x1e\x01;\x012\x16\x1d\x01\x14\x06+\x01\"\x06\x1d\x01!\x11#\x11!54&+\x01\"&=\x0146;\x012654?\x0167>\x04\x05\x07\x06\x16;\x0126/\x01.\x01\x05'&\x06\x0f\x01\x06\x16;\x0126\x13!\"&5\x11)\x01\x11\x14\x06#!\x11\x01\x04 \x14\x05\x05\x16\x16\xc4\x15\x0f\x1d\x08\x0b&\xc4\x16\x17\x11\x15\x170\x0d\x0d\x0e\x04\x19\x08\x01\x0f\n=\n\x0f\x0f\n2\n\x0f\xfep\xc8\xfep\x0f\n2\n\x0f\x0f\n=\x0b\x0e \x19\x05\x0e\x02\x06\x15\x16\x1f\x02\x9d\xa6\x07\x04\n\xdb\n\n\x033\x03\x10\xfe5\xb1\x08\x10\x033\x03\n\n\xe7\x0b\x03\x1e\xfe\x89\n\x0f\x01\x90\x02X\x0f\n\xfe\x89\x04\xb0\x04\x02\x02\n\x0dv\x0d\x0b\x15 \x0d\x16v\x0d\n\x07!\x11\x10\x15\x18{, \x0b\x0f\n2\n\x0f\x0f\n\xaf\x01,\xfe\xd4\xaf\n\x0f\x0f\n2\n\x0f\x06\x05\x040\x80\x19\x14\x03\x08\x16\x11\x0ey\xa2\x07\n\x0e\n\x95\n\x04\xaa\x9d\x07\x04\n\x8f\n\x0e\n\xfcr\x0f\n\x01w\xfe\x89\n\x0f\x01\x90\x00\x00\x00\x00\x02\x00\x00\x00\x0c\x04\xaf\x04\xa6\x00+\x00I\x00\x00\x016\x16\x15\x06\x02\x0e\x04.\x01'&\x07&\x0e\x01\x0f\x01\x06&547>\x017>\x01'.\x01>\x017>\x06\x17&\x06\x07\x0e\x01\x0f\x01\x0e\x04\x07\x0e\x01\x1667>\x027>\x037>\x01\x04\x8d\x08\x1a\x02-Bla\x8bb\x8eD8=\x11\x04\x113\x1b\x99\x1a\x16*\x08U\x18\x13 \x03 \x02\x10:1'Ra\\\x87{\xc0%\x08\x1d\x1f&\xa2=>8\\tYR\x17\x18\x0e\x12-!\x19\x8aq[Fak[)\x16\x04\x04\xa6\x03\x13\x08\xb2\xfe\xdd\xc8\x95X1\x11\x08\x0b\x0b\x0c\x01\x01\x02\x1b\x1b\x99\x18\x13\"@&\x08J\x15\x11<\x1f7_\x7f\x85?3J5%#\x1b/D \x18&/q!!\x1e6ROg58<\x04'(\x1e[@1%@_\x7fU2\x14\x00\x01\x00]\x00\x1e\x04r\x04\xcf\x00O\x00\x00\x01\x0e\x01\x1e\x04\x17.\x07>\x017\x1e\x03\x06\x07\x0e\x04\x07\x06'&767>\x04.\x01'\x16\x0e\x03&'.\x01'&>\x047\x06\x1e\x037>\x01.\x02'&>\x03\x02\x8d'\x1f\n'8GB \x04\x1b\n\x18\n\x10\x03\x01 \x12\x0e`\x8aH \x06\x10\x0d>JS>\x16H7\x1f\x12\x06\x0b\x0d'+\" \x16NA\n\x155M[`/Pg\x02\x02\x04\x05\x16!;(\x08\x06\x1d'2\x18\x1f\x0f\x18\"&\x07\x0f\"IbY\x04\xcfC\x80e\\D9$\x0c\x078\x178\x1e6#1%)\x12*\x83\x91\xa7\x97J7gG: \x06\x16\x0b\x06\x0d\x03\x04\x05 8G\\au9h\xaaoK\x1d\x07\x15$\x9c]\x1754<\x04\x05&\x06\x17\x16\x15\x14\x06\"&5476&\x07\x06\x07\x0e\x01\x17\x1e\x042>\x0376&'&\x05\x07\x0e\x01\x17\x16\x17\x166?\x016&'&'.\x01\x02\x06\xa4\x9d{nO9\x1c\x1c:On{\x9d\xa2\x9d{nO:\x1c\x1c9On{\x02\x0f\x0f\x06\x08\x19\xb0\xf8\xb0\x17\x08\x07\x0eFZ\x0d\x04\x0b\x162Z_\x83\x88\x83_Z2\x16\x0b\x04\x0cZ\xfd\xff#\x0f %8\x10-\x0e#\x0e\x03\x0f,-\x0b\"\x04F-I\\b\\I*I\\b\\I--I\\b\\I*I\\b\\I\xdc\x0f\x06\x139>|\xb0\xb0|;7\x13\x06\x0fEs\x101\x12$F^D10E^E$\x121\x11u\x1e$\x0f/\x12D0\x0d\x04\x0f\"\x0f%\x0f,I\x12\x04\x00\x00\x00\x04\xff\xdc\x00\x00\x04\xd4\x04\xb0\x00\x14\x00'\x00;\x00L\x00\x00!#7.\x0454>\x0532\x1773\x137\x1e\x01\x15\x14\x0e\x03\x077>\x0176&'&%\x06\x07\x0e\x01\x17\x1e\x04\x177.\x015476&\x17\x07\x0e\x01\x17\x16\x17\x166?\x02'&'.\x01\x02P\x94\x1f[\xa9vY,\x1c9On{\x9dR=A \x94&/l\x89'Pj\x98R.Mv&\x0b\x04\x0c6\xfdQFZ\x0d\x04\x0b\x12*HLh5)k\x8f\x17\x08\x07|#\x0f %8\x10-\x0e\x0c\x16\x03,-\x0b\"x\x14atzb\x1b\x15I\\b\\I-\x0fy\xfeR\xb5U\xd64\x1aZrnc\x1a\xad1\x88?\x121\x11FrEs\x101\x12\x1e\xfe\xad\xfe\xbb\x0e1\x14)\x14\x0d\x0c\xf9\xfe\xf9]\x14@\x15\x10 \x80\x80 \x10\x15@\x14]\x01\x07\xf9\x0c\x0d\x14)\x141\x0e\x01E\x01S>\x00\x00\x00\x11\x00\x00\x00\x00\x04L\x04\xb0\x00\x1d\x00'\x00+\x00/\x003\x007\x00;\x00?\x00C\x00G\x00K\x00O\x00S\x00W\x00[\x00_\x00c\x00\x00\x0132\x16\x1d\x0132\x16\x1d\x01!546;\x01546;\x012\x16\x1d\x01!546\x01\x11\x14\x06#!\"&5\x11\x17\x15353\x15353\x15353\x15353\x1535\x05\x15353\x15353\x15353\x15353\x1535\x05\x15353\x15353\x15353\x15353\x1535\x03Rd\x15\x1d2\x15\x1d\xfb\xb4\x1d\x152\x1d\x15d\x15\x1d\x01\xf4\x1d\x01\x0f\x1d\x15\xfc\x18\x15\x1ddddddddddd\xfc|ddddddddd\xfc|ddddddddd\x04\xb0\x1d\x152\x1d\x15\x96\x96\x15\x1d2\x15\x1d\x1d\x1522\x15\x1d\xfep\xfd\x12\x15\x1d\x1d\x15\x02\xee\xc8dddddddddd\xc8dddddddddd\xc8dddddddddd\x00\x00\x00\x03\x00\x00\x00\x19\x05w\x04\x97\x00\x19\x00%\x007\x00\x00\x01\x17\x16\x14\x0f\x01\x06&=\x01#\x01!\"&=\x0146;\x01\x013546\x01\x07'#\"&=\x01463!\x01\x17\x16\x14\x0f\x01\x06&=\x01#'7\x173546\x04o\xf9\x0f\x0f\xf9\x0e\x15\x9f\xfd\xa8\xfe\xdd\x15\x1d\x1d\x15\xd1\x02X\xf1\x15\xfd\xa9\x8dz\xd1\x15\x1d\x1d\x15\x01#\x03\x1a\xf9\x0f\x0f\xf9\x0e\x15\xf1\xb5\x8dz\x9f\x15\x04\x8d\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xfd\xa8\x1d\x15d\x15\x1d\x02X\x96\x15\x08\xfe\x98\x8dz\x1d\x15d\x15\x1d\xfeM\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xb5\x8dz\x96\x15\x08\x00\x00\x00\x01\x00\x00\x00\x00\x04\xb0\x04L\x00\x12\x00\x00\x13!2\x16\x15\x11\x14\x06#!\x01\x11#\"&5\x1146d\x03\xe8);;)\xfd\xac\xfe\xd0d);;\x04L;)\xfd\xa8);\xfe\xd4\x01,;)\x02X);\x00\x00\x00\x03\x00d\x00\x00\x04L\x04\xb0\x00 \x00\x13\x00?\x00\x00\x1332\x16\x1d\x01!546!32\x16\x1d\x01!546\x01\x11\x14\x0e\x05\".\x055\x11!\x15\x14\x15\x1c\x01\x1e\x062>\x06&54=\x01\x96\xc8\x15\x1d\xfe\xd4\x1d\x02\xd1\xc8\x15\x1d\xfe\xd4\x1d\x01\x0f\x06\x18(Lf\x9c\xc0\x9cfL(\x18\x06\x01,\x03\x07\x0d\x14\x1f'6B6'\x1f\x13\x0f\x05\x05\x01\x04\xb0\x1d\x15\xfa\xfa\x15\x1d\x1d\x15\xfa\xfa\x15\x1d\xfep\xfe\xd4)IjV\\>((>\\VjI)\x01,\xfa \x12\x15+'%!\x1b\x16\x10\x08 \x10\x17\x1c!%'*\x15\x11\x08\xfa\x00\x00\x00\x01\xff\xff\x00\xd4\x04L\x03\xc2\x00\x05\x00\x00\x01\x07 \x01'\x01\x04L\xc6\xfe\x9f\xfe\x9f\xc5\x02'\x01\x9b\xc7\x01a\xfe\x9f\xc7\x02'\x00\x01\x00\x00\x00\xee\x04M\x03\xdc\x00\x05\x00\x00 \x027 \x01\x04M\xfd\xda\xfd\xd9\xc6\x01a\x01a\x03\x15\xfd\xd9\x02'\xc7\xfe\x9f\x01a\x00\x00\x00\x00\x02\xffQ\x00d\x05_\x03\xe8\x00\x14\x00)\x00\x00\x01!2\x16\x15\x1132\x16\x0f\x01\x06\"/\x01&6;\x01\x11!%\x17\x16\x06+\x01\x11!\x17!\"&5\x11#\"&?\x0162\x01\x94\x02\xea\x15\x1d\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xfe\x83\xfe\x1c\xe6\x0e\x08\x15\x96\x01\x81\xd7\xfd\x12\x15\x1d\x96\x15\x08\x0e\xe6\x0e*\x03\xe8\x1d\x15\xfd\xd9\x15\x0e\xf9\x0f\x0f\xf9\x0e\x15\x01\x91\xb8\xf9\x0e\x15\xfep\xc8\x1d\x15\x02&\x15\x0e\xf9\x0f\x00\x00\x01\x00\x06\x00\x00\x04\x9e\x04\xb0\x000\x00\x00\x1332\x16\x1f\x01!2\x16\x07\x03\x0e\x01#!\x17!2\x16\x14\x06+\x01\x15\x14\x06\"&=\x01!\x15\x14\x06\"&=\x01#\"&/\x01\x03#\"&468^\x11\x1c\x04&\x03\x80\x18\x1e\x05d\x05,!\xfd\x870\x02\x17\x15\x1d\x1d\x152\x1d*\x1d\xfe\xd4\x1d*\x1d\x1f\x12\x1d\x06\x05\xc96\x14\x1e\x1e\x04\xb0\x16\x10\xa2%\x18\xfe%\x15+\xc8\x1d*\x1d2\x15\x1d\x1d\x1522\x15\x1d\x1d\x152\x14 \n\x03\xc1\x1d*\x1d\x00\x00\x00\x00\x02\x00\x00\x00\x00\x04\xb0\x04L\x00\x0b\x00\x0f\x00\x00\x01\x15!53463!2\x16\x15\x05!\x11!\x04\xb0\xfbP\xc8;)\x01,);\xfdD\x04\xb0\xfbP\x03\xe8dd);;)\xc8\xfc\xe0\x00\x02\x00\x00\x00\x00\x05\xdc\x04L\x00\x0c\x00\x10\x00\x00\x13\x03\x113463!2\x16\x15!\x15\x05\x01!\x01\xc8\xc8\xc8;)\x01,*:\x01\xf4\x01,\xfe\xd4\xfbP\x01,\x03 \xfep\x02X);;)\xc8d\xfdD\x02\xbc\x00\x01\x01E\x00\x00\x03k\x04\xaf\x00\x1b\x00\x00\x01\x17\x16\x06+\x01\x1132\x16\x0f\x01\x06\"/\x01&6;\x01\x11#\"&?\x0162\x02{\xe6\x0e\x08\x15\x96\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\x96\x15\x08\x0e\xe6\x0e*\x04\xa0\xf9\x0e\x15\xfd\xa7\x15\x0e\xf9\x0f\x0f\xf9\x0e\x15\x02Y\x15\x0e\xf9\x0f\x00\x00\x00\x01\x00\x01\x01D\x04\xaf\x03k\x00\x1b\x00\x00\x01\x17\x16\x14\x0f\x01\x06&=\x01!\x15\x14\x06/\x01&4?\x016\x16\x1d\x01!546\x03\xa8\xf9\x0e\x0e\xf9\x0f\x15\xfd\xa8\x15\x0f\xf9\x0e\x0e\xf9\x0f\x15\x02X\x15\x03`\xe5\x0f)\x0f\xe5\x0f \x14\x97\x97\x14 \x0f\xe5\x0f)\x0f\xe5\x0f \x15\x95\x95\x15 \x00\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00 \x00\x19\x00\x1d\x00!\x00\x00\x01\x03.\x01#!\"\x06\x07\x03\x05!\"\x06\x1d\x01\x14\x163!26=\x014&\x0553\x15353\x15\x04\x91\xac\x05$\x14\xfd`\x14$\x05\xac\x04-\xfc\x18);;)\x03\xe8);;\xfe\xabddd\x01\x90\x02\xdc\x17-(\x15\xfd\x1dd;)d);;)d);\xc8dddd\x00\x00\x00\x03\xff\x9c\x00d\x04\xb0\x04L\x00\x0b\x00#\x001\x00\x00\x002\x16\x15\x11\x14\x06\"&5\x114\x03%#\x13\x16\x06#\"+\x01\"&'\x02=\x01454>\x01;\x01%\x01\x15\".\x03=\x014>\x027\x04i*\x1d\x1d*\x1dd\xfd]&/\x03\x11\x15\x05\x02T\x14\x1d\x047\x02\x0b\x0c\xc8\x02\xa3\xfc\x18\x04\x0e\"\x1a\x16\x15\x1d\x1d\x0b\x04L\x1d\x15\xfc\xae\x15\x1d\x1d\x15\x03R\x15\xfc\x99\xc8\xfe\xec\x10\x08\x1c\x15\x01Q\x0e\xfa\x02\x04\x10\x0f\x0d\xfa\xfe\xd4\xfa\x01\x0b\x13)\x1c2\x1a(\x14\x0c\x01\x00\x00\x00\x02\x00J\x00\x00\x04f\x04\xb0\x00,\x005\x00\x00\x0132\x16\x0f\x01\x1e\x01\x17\x13\x1732\x16\x14\x06\x07\x0e\x04#\".\x04/\x01.\x0146;\x017\x13>\x017'&6\x03\x16327\x0e\x01\"&\x02)^\x14\x12\x06\x12Sz\x0f?v\x11\x13\x1c\x1a\x12\x08\x1edj\x9fO9t\\U>/\x0c\x0b\x12\x1a\x1c\x13\x11v?\x0fzS\x12\x06\x13$2451\x0c7F8\x04\xb0\x15\x13%\x13\x7fM\xfe\xb9\xad\x1d)(\x07\x04\x0b\x1c\x16\x12\n\x0e\x11\x12\x0e\x05\x04\x08()\x1d\xad\x01GM~\x14 \x13\x1a\xfb\xbe\x06\x061==\x00\x01\x00\x14\x00\x14\x04\x9c\x04\x9c\x00\x17\x00\x00\x017\x07\x17\x07\x17\x07\x17'\x07'\x07'\x077'7'7'\x177\x177\x03 \xe0N\xea\xb4\xb4\xeaN\xe0-\x9b\x9b-\xe0N\xea\xb4\xb4\xeaN\xe0-\x9b\x9b\x03\xb2N\xe0-\x9b\x9b-\xe0N\xea\xb4\xb4\xeaN\xe0-\x9b\x9b-\xe0N\xea\xb4\xb4\x00\x03\x00\x00\x00d\x04\xb0\x04\xb0\x00!\x00-\x00=\x00\x00\x0132\x16\x1d\x01\x07!2\x16\x1d\x01\x14\x07\x03\x0e\x01+\x01\"&/\x01#\"&5\x114?\x02>\x01\x17\x0f\x01\x113\x173\x135!75\x0132\x16\x15\x11\x14\x06+\x01\"&5\x1146\x02\x8a2(<\x1c\x01H(<\x1d\xee\x10,\x17\xfa\x07F\x1f\x1f=-7\x14\x91`\x0d1\x1bd\x96d\x88\xd6\xfa\xfe>2\xfdvd\x15\x1d\x1d\x15d\x15\x1d\x1d\x04\xb0Q,\x96}Q,d-\x1d\xfe\xa8\x18!2\x19\x19$'\x01\x90$\x1b\xc4\xc6\x1c(d\xd4\xd5\xfe\x89d\x01w}\xe1\xaf\xfe\xd4\x1d\x15\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1d\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x04L\x00 \x000\x00<\x00\x00\x0132\x16\x17\x13\x16\x1d\x01\x14\x06#!\x16\x1d\x01\x14\x06+\x01\"&/\x02&5\x1146;\x016\x0532\x16\x15\x11\x14\x06+\x01\"&5\x1146!\x07#\x11\x1f\x0135'!5\x03\x02X\xfa\x17,\x10\xee\x1d<(\xfe\xb8\x1c<(2\x1b1\x0d`\x91\x147-=|\xfd\xe9d\x15\x1d\x1d\x15d\x15\x1d\x1d\x02_\x88d\x96d22\x01\xc2\xfa\x04L!\x18\xfe\xa8\x1d-d,Qv\x07\x96,Q(\x1c\xc6\xc4\x1b$\x01\x90'$dd\x1d\x15\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1dd\xfe\x89\xd5\xd4\xaf\xe1}\x01w\x00\x03\x00\x00\x00d\x05\x0e\x04O\x00\x1b\x007\x00G\x00\x00\x01%6\x1f\x01\x1e\x01\x0f\x01!2\x16\x14\x06+\x01\x03\x0e\x01#!\"&5\x11467\x17\x11\x17!\x13>\x01;\x0126&#!*\x03.\x04'&?\x01'\x0532\x16\x15\x11\x14\x06+\x01\"&5\x1146\x01d\x01k\x1f\x16n\x0d\x01\x0cT\x01.TlnTj\x83\x06\x1b\x0f\xfe\xaa\x07\xa6\x1c\x0e:d\x01%\x83\x06\x1b\x0f\xcb\x15\x13\x12\x16\xfe8\x02\n\x02 \x03\x07\x03\x05\x03\x01\x0c\x11\x92V\xfdOd\x15\x1d\x1d\x15d\x15\x1d\x1d\x03i\xe6\x10\x16p\x0d&\x0fyL\x90N\xfe\xad\x15(\xa2\x0d\x02\x0d\x11%\nH\xfe Y\x01S\x15(22\x01\x02\x02\x03\x05\x02\x19\x16\xb7S\xe4\x1d\x15\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1d\x00\x00\x00\x03\xff\x9c\x00d\x04\xb0\x04O\x00\x1d\x006\x00F\x00\x00\x01\x05\x1e\x04\x15\x11\x14\x06#!\"&'\x03#\"&463!'&6?\x016\x07\x17\x16\x07\x0e\x05*\x02#!\x1532\x16\x17\x13!7\x11%\x0532\x16\x15\x11\x14\x06+\x01\"&5\x1146\x01\xdb\x01n\x02\x08\x14\x10\x0d\xac\x07\xfe\xaa\x0f\x1b\x06\x83jUmlT\x01.U\x0b\x01\x0dn\x16J\x92\x11\x0c\x02\x03\x05\x03\x07\x03 \x03\n\x01\xfe%\xdd\x0f\x1c\x06\x82\x01&j\xfe\xaa\x02Pd\x15\x1d\x1d\x15d\x15\x1d\x1d\x04O\xe6\x01\x05\x10\x11\x17\x0b\xfd\xf3\x0d\xa2(\x15\x01SN\x90Ly\x0f&\x0dp\x16\xae\xb7\x16\x19\x02\x05\x03\x02\x02\x01d(\x15\xfe\xadY\x01\xf7\xec\xe4\x1d\x15\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1d\x00\x00\x00\x03\x00a\x00\x00\x04L\x05\x0e\x00\x1b\x007\x00G\x00\x00\x002\x16\x1d\x01\x05\x1e\x01\x15\x11\x14\x06#!\"&/\x01\x03&?\x01>\x01\x1f\x01\x114\x1754&\x06\x15\x11\x1c\x03\x0e\x04\x07\x06/\x01\x07\x13!7\x11%.\x01\x03!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x01\xde\x90N\x01S\x15(\xa2\x0d\xfd\xf3\x11%\n \xe6\x10\x16p\x0d&\x0fy\xc622\x01\x02\x02\x03\x05\x02\x19\x16\xb7S\xec\x01\xf7Y\xfe\xad\x15(\x96\x01\xf4\x15\x1d\x1d\x15\xfe\x0c\x15\x1d\x1d\x05\x0enTj\x83\x06\x1b\x0f\xfe\xaa\x07\xa6\x1c\x0e\x0e\x01k\x1f\x16n\x0d\x01\x0cT\x01.T\xd6\xcb\x15\x13\x12\x16\xfe8\x02\n\x02 \x03\x07\x03\x05\x03\x01\x0c\x11\x92V\xfe\xadd\x01%\x83\x06\x1b\xfd\x0b\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\x00\x03\xff\xfd\x00\x06\x03\xe8\x05\x14\x00\x0f\x00-\x00I\x00\x00\x01!26=\x014&#!\"\x06\x1d\x01\x14\x16\x01\x15\x14\x06\"&5\x11\x07\x06&/\x01&7\x13>\x043!2\x16\x15\x11\x14\x06\x07\x01\x03\x1776\x17\x1e\x05\x1c\x02\x15\x11\x14\x166=\x01467%\x11'\x01^\x01\xf4\x15\x1d\x1d\x15\xfe\x0c\x15\x1d\x1d\x01\x0fN\x90Ly\x0f&\x0dp\x16\x10\xe6\x01\x05\x10\x11\x17\x0b\x02\x0d\x0d\xa2(\x15\xfd\x89\xecS\xb7\x16\x19\x02\x05\x03\x02\x02\x0122(\x15\x01SY\x04L\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d\xfc\xe6jTnlT\x01.T\x0c\x01\x0dn\x16\x1f\x01k\x02\x08\x13\x0f\x0c\xa6\x07\xfe\xaa\x0f\x1b\x06\x01\xcf\xfe\xadV\x92\x11\x0c\x01\x03\x05\x03\x07\x03 \x02\n\x02\xfe8\x16\x12\x13\x15\xcb\x0f\x1b\x06\x83\x01%d\x00\x02\x00\x16\x00\x16\x04\x9a\x04\x9a\x00\x0f\x00%\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x01%&\x06\x1d\x01!\"\x06\x1d\x01\x14\x163!\x15\x14\x167%64\x01\xe2\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b[[\x9b\x02\x86\xfe\xed\x10\x17\xfe\xed\n\x0f\x0f\n\x01\x13\x17\x10\x01\x13\x10\x04\x9a[\x9b\xd6\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b\xfe9\xdf\x0d\n\x15\x89\x0f\n\x96\n\x0f\x89\x15\n\x0d\xdf\x0d&\x00\x00\x02\x00\x16\x00\x16\x04\x9a\x04\x9a\x00\x0f\x00%\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x0d\x01\x06\x14\x17\x05\x166=\x01!26=\x014&#!54&\x01\xe2\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b[[\x9b\x01%\xfe\xed\x10\x10\x01\x13\x10\x17\x01\x13\n\x0f\x0f\n\xfe\xed\x17\x04\x9a[\x9b\xd6\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b\xe8\xdf\x0d&\x0d\xdf\x0d\n\x15\x89\x0f\n\x96\n\x0f\x89\x15\n\x00\x00\x00\x02\x00\x16\x00\x16\x04\x9a\x04\x9a\x00\x0f\x00%\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x01\x03&\"\x07\x03\x06\x16;\x01\x11\x14\x16;\x01265\x11326\x01\xe2\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b[[\x9b\x02K\xdf\x0d&\x0d\xdf\x0d\n\x15\x89\x0f\n\x96\n\x0f\x89\x15\n\x04\x9a[\x9b\xd6\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b\xfe@\x01\x13\x10\x10\xfe\xed\x10\x17\xfe\xed\n\x0f\x0f\n\x01\x13\x17\x00\x00\x02\x00\x16\x00\x16\x04\x9a\x04\x9a\x00\x0f\x00%\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x05#\"\x06\x15\x11#\"\x06\x17\x13\x1627\x136&+\x01\x114&\x01\xe2\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b[[\x9b\x01\x97\x96\n\x0f\x89\x15\n\x0d\xdf\x0d&\x0d\xdf\x0d\n\x15\x89\x0f\x04\x9a[\x9b\xd6\xec\xd6\x9b[[\x9b\xd6\xec\xd6\x9b\xbb\x0f\n\xfe\xed\x17\x10\xfe\xed\x10\x10\x01\x13\x10\x17\x01\x13\n\x0f\x00\x00\x03\x00\x18\x00\x18\x04\x98\x04\x98\x00\x0f\x00\x96\x00\xa6\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01%\x0e\x03\x07\x06&\x07\x0e\x01\x07\x06\x16\x07\x0e\x01\x07\x06\x16\x07\x14\x16\x072\x1e\x01\x17\x1e\x02\x17\x1e\x027\x16\x0e\x01\x17\x1e\x02\x17\x14\x06\x14\x17\x167>\x027.\x01'.\x01'\"\x0e\x02\x07\x06'&65.\x01'6.\x01\x06\x07\x06'&767\x1e\x02\x17\x1e\x03\x1f\x01>\x02'&>\x017>\x037&72\x16267.\x03'4>\x02&'\x1e\x01?\x016.\x02'\x06\x07\x14\x1e\x01\x15.\x02'>\x017\x162>\x01\x01\xe4\xe8\xd5\x9b\\\\\x9b\xd5\xe8\xd5\x9b\\\\\x9b\x01d\x0f+\x1c:\n\x0f=\x0f\x14?\x03\x03\x13\x01\x031\x05 \x1c\x06\"\x01\x0c\x16\x19\x07\x10\"/\x0b\x15?9\x1d\x07\x14\x19\x03\x0d\x14#\x13\x07\x05hu\x1e!$\x03\x0d0\x0c\x0fE\x11\x12.(,\x103\x0f\x04\x01\x06)\x04\x01\x03\x0b\x1a\x12\x17\x13\x13\x0b\x06\x10\x06(\x1b\x06\x07\x16\x15\x13\x06\x05\x02\x0b\x05\x03\x03\x0d\x17\x04\x06 \x07\x18\x16\x06\x10\x08 \x11\x17 \n*!A\x0b\x04\x02\x01\x03\x03\x1f7\x0b\x0c\x05\x1d,8\x0d\x12!\x10\x12\x08?*\x10\x03\x1a\x03\n\x12\n\x11\x04\x98\\\x9b\xd5\xe8\xd5\x9b\\\\\x9b\xd5\xe8\xd5\x9b\x11\x0c\x11\x07 \x02\x03\x06\x05\x07'\x0f\x0b\x17\x07\"r\x16\"v\x1c G\x18\n\x14\x04\x08\x0e\x10\x04 .\x1e\x04\x0f&*\x11\x15\x1b\x1c\x04\x07\x12\n\x0c\x02r\x1d$> \x08\x01\x07\x07\x10\x0b\x01\x02\x0b\x0b#\x17\x011\x01\x0d \x02\x0f\x1f\x19\x02\x14\x19\x1d\x1c\x1e\x10\x06\x01\x01\x07\n\x0c\x18\x11\x0d\x04\x03\x0c% \x10\x12\x16\x17\x0d\x0e*\x14\x19\n\x12\x12\x03 \x0b\x17'\x14\"\x06\x01\x0e \x0c\x07\x01\x0d\x03\x04\x05\x1c$\x0c\x12\x0b\x04g\x112(\x03\x01 \x0b\x0b\x0b%\x07\n\x0c\x11\x00\x00\x00\x00\x01\x00\x00\x00\x02\x04\xaf\x04\x85\x00\x16\x00\x00\x016\x17\x05\x177\x16\x06\x07\x0e\x01'\x01\x06\"/\x01&47\x01&6\x02\xf4\xa4\x8e\xfe\xfd\x91\xfb\x06PM<\x86;\xfd\xac\x0f+\x0fo\x0f\x0f\x02X\"O\x04\x85\\e\x8a\xe8~Y\x87+\"\x0b\x16\xfd\xac\x10\x10n\x0f+\x10\x02We\xc9\x00\x06\x00\x00\x00`\x04\xb0\x04\xac\x00\x0f\x00\x13\x00#\x00'\x007\x00;\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x05#\x153\x05!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x05!\x15!\x05!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x05!\x15!d\x03\xe8);;)\xfc\x18);;\x04\x11\xc8\xc8\xfc\x18\x03\xe8);;)\xfc\x18);;\x04\x11\xfe\x0c\x01\xf4\xfc\x18\x03\xe8);;)\xfc\x18);;\x04\x11\xfe\xd4\x01,\x04\xac;)d);;)d);dd\xc8;)d);;)d);dd\xc8;)d);;)d);dd\x00\x00\x00\x02\x00d\x00\x00\x04L\x04\xb0\x00\x0b\x00\x11\x00\x00\x13!2\x16\x14\x06#!\"&46\x01\x11\x07\x11\x01!\x96\x03\x84\x15\x1d\x1d\x15\xfc|\x15\x1d\x1d\x02;\xc8\xfe\xa2\x03\x84\x04\xb0\x1d*\x1d\x1d*\x1d\xfdD\xfe\xd4\xc8\x01\xf4\x01\xf4\x00\x00\x00\x03\x00\x00\x00d\x04\xb0\x04\xb0\x00\x17\x00\x1b\x00%\x00\x00\x0132\x16\x1d\x01!2\x16\x15\x11!5#\x15!\x11463!546\x17\x1535\x01\x15\x14\x06#!\"&=\x01\x01\xf4\xc8);\x01,);\xfe\x0c\xc8\xfe\x0c;)\x01,;)\xc8\x01\xf4;)\xfc\x18);\x04\xb0;)d;)\xfepdd\x01\x90);d);ddd\xfdD\xc8);;)\xc8\x00\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x15\x00+\x00A\x00W\x00\x00\x13!2\x16\x0f\x01\x17\x16\x14\x0f\x01\x06\"/\x01\x07\x06&5\x1146)\x012\x16\x15\x11\x14\x06/\x01\x07\x06\"/\x01&4?\x01'&6\x01\x17\x16\x14\x0f\x01\x17\x16\x06#!\"&5\x1146\x1f\x01762\x0576\x16\x15\x11\x14\x06#!\"&?\x01'&4?\x0162\x172\x01,\x15\x08\x0e^\xc7\x07\x07j\x08\x14\x08\xc7^\x0e\x15\x1d\x035\x01,\x15\x1d\x15\x0e^\xc7\x08\x14\x08j\x07\x07\xc7^\x0e\x08\xfe/j\x07\x07\xc7^\x0e\x08\x15\xfe\xd4\x15\x1d\x15\x0e^\xc7\x08\x14\x02\xcb^\x0e\x15\x1d\x15\xfe\xd4\x15\x08\x0e^\xc7\x07\x07j\x08\x14\x08\x04\xb0\x15\x0e^\xc7\x08\x14\x08j\x07\x07\xc7^\x0e\x08\x15\x01,\x15\x1d\x1d\x15\xfe\xd4\x15\x08\x0e^\xc7\x07\x07j\x08\x14\x08\xc7^\x0e\x15\xfd&j\x08\x14\x08\xc7^\x0e\x15\x1d\x15\x01,\x15\x08\x0e^\xc7\x07\xce^\x0e\x08\x15\xfe\xd4\x15\x1d\x15\x0e^\xc7\x08\x14\x08j\x07\x07\x00\x00\x00\x06\x00\x00\x00\x00\x04\xa8\x04\xa8\x00\x0f\x00\x1b\x00#\x00;\x00C\x00K\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&$2\x16\x14\x06\"&4$2\x16\x14\x06#\"'\x07\x16\x15\x14\x06\"&546?\x02&54\x042\x16\x14\x06\"&4$2\x16\x14\x06\"&4\x01\xdb\xf2\xdd\x9f__\x9f\xdd\xf2\xdd\xa0^^\xa0\x01\xc6\xe0\xbfoo\xbf\xe0\xbfoo\xfe-- - \x01L- \x16\x0e\x0f7\x113H3)\x1fz\x01 \xfe\x87- - \x02\x1d- - \x04\xa8_\x9f\xdd\xf2\xdd\xa0^^\xa0\xdd\xf2\xdd\x9fWo\xbf\xe0\xbfoo\xbf\xe0\xbf\x06 -!!- -!\n\x91\x17\x1c$33$ 1\x05~\x01\x0e\x0e\x17\xa4 - - - -\x00\x01\xff\xd8\x00Z\x04\xb9\x04\xbc\x00[\x00\x00%\x01676&'&#\"\x0e\x03\x07\x00\x07\x0e\x04\x17\x1e\x01327\x016'.\x01#\"\x07\x06\x07\x01\x0e\x01&47\x007>\x0132\x17\x1e\x01\x17\x16\x06\x07\x0e\x06\x07\x06#\"&'&67\x0167632\x17\x1e\x01\x17\x16\x06\x07\x01\x0e\x01'.\x01\x02\"\x01\xd5[\x08\x07v_\x16\x19\"A0?! \xfe\x88\x0f\x1e\x1e-\x13\x0d\x05 Y7J3\x02$$\x10\x07\x1d\x12\x1a\x18\x0d\x1a\xfe\xab\x0f)\x1c\x0e\x01G\"#A.\x0e\x0f,=\x0d\x0e\x18#\x0c(wn\x8bkV\x0e8@Fv\"0\x1aD\x01\xffG([kPHNg\x0f\x118B\xfe\x1e\x0f*\x10\x10\x03\xad\x01\xd6[eb\x9b\x11\x04\x14\x1a2!\x1f\xfe\x89\x0f\x1b\x1d5(7\x1d>B3\x02$$'\x10\x14\x17\x0c\x1a\xfe\xae\x0f\x01\x1c)\x0e\x01M\"#!\x01\x087)/c#\x0b*xn\x89fL\x07\x1b@9N\xbeD\x01\xffH\x187!$\x86W]\xb5B\xfe$\x0f\x02\x0f\x0f&\x00\x00\x00\x02\x00d\x00X\x04\xaf\x04D\x00\x19\x00D\x00\x00\x01>\x02\x1e\x02\x15\x14\x0e\x03\x07.\x0454>\x02\x1e\x01\x05\"\x0e\x03\".\x03#\"\x06\x15\x14\x1e\x02\x17\x16\x17\x1e\x042>\x03767>\x0454&\x02\x890{xuX6Cy\x84\xa8>>\xa7\x85xC8Zvxy\x01#!?2-*!')-?\"Co\x1bA23\x0f\x07\x0f:+1!\x0d\"3)@\x0c\x04\x08+)?\x1d\x17j\x03\xb5DH\x05-Sv@9y\x80\x7f\xb2UU\xb2\x7f\x80y9@vS-\x05H-&65&&56&oM\x178J41\x0f\x07\x0e<*.\x18\x180(@\x0b\x04 )*D*2\x13Om\x00\x02\x009\xff\xf2\x04w\x04\xbe\x00\x17\x00.\x00\x00\x002\x1f\x01\x16\x15\x14\x06\x0f\x01&/\x017'\x01\x17\x07/\x01&47\x01\x037\x16\x1f\x01\x16\x14\x07\x01\x06\"/\x01&4?\x01\x16\x1f\x01\x07\x17\x01\x02\xab\xbbB\x8dB8\"\x1d\x1f.\x12_\xf7\xfe{\xd4i\x13\x8dBB\x01\x1b\x12i\n \x8dBB\xfe\xe5B\xbaB\x8dBB7\x1d.\x12_\xf7\x01\x85\x04\xbeB\x8dB^*k\"\x1d5.\x12_\xf8\xfe{\xd4j\x12\x8dB\xbaB\x01\x1b\xfeFi\x08 \x8dB\xbaB\xfe\xe5BB\x8dB\xbbB77/\x11_\xf8\x01\x85\x00\x00\x00\x00\x03\x00\xc8\x00\x00\x03\xe8\x04\xb0\x00\x11\x00\x15\x00\x1d\x00\x00\x002\x1e\x02\x15\x11\x14\x06#!\"&5\x114>\x01\x07\x11!\x11\x00\"\x06\x14\x16264\x02\x06\xaa\x9ad:;)\xfd\xa8);\x02X\xfe\xffV==V=\x04\xb0\x1e.2\x15\xfcG);;)\x03\xb9\x153-\xaa\xfdD\x02\xbc\xfd\x16=V==V\x00\x01\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x16\x00\x00 \x01\x16\x14\x06\"/\x01\x01\x11'\x01\x05\x13\x01'!\x01'&462\x03\x86\x01\x1b\x0f\x1d*\x0e$\xfe\xe9\xd4\xfe\xcc\xfe\xe8\xcb\x013\xd2\x01,\x01\x0b#\x0f\x1d*\x04\xa1\xfe\xe6\x0f*\x1d\x0f#\xfe\xf5\xfe\xd4\xd2\xfe\xcd\xcb\x01\x18\x014\xd4\x01\x17$\x0e*\x1d\x00\x00\x00\x00\x03\x01'\x00\x11\x04 \x04\xe0\x002\x00@\x00K\x00\x00\x01\x15\x1e\x04\x17#.\x03'\x11\x17\x1e\x04\x15\x14\x06\x07\x15#5&'.\x01'3\x1e\x01\x17\x11'.\x0454>\x0275\x19\x01\x0e\x03\x15\x14\x1e\x03\x17\x16\x17\x11>\x044.\x02\x02\xbc&ER<,\x02\x9f\x04\x0d\x1d3'@\"\x8b\xaa\x0cMO\x10W(kVMb\x10\x01O\x08\x0e\x19/9X6FpH*\x06M\xfe\x12\x01\x12\x04\x0e\x1d6&\x1d+\x19\x14\x08\x06\x03\xd0\xfe\xca\x02 \x16 4C4%\x19\x00\x00\x00\x01\x00d\x00f\x03\x94\x04\xad\x00J\x00\x00\x012\x1e\x01\x15#4.\x02#\"\x06\x07\x06\x15\x14\x1e\x01\x17\x16\x173\x15#\x16\x06\x07\x06\x07>\x0136\x16327\x17\x0e\x03#\".\x01\x07\x0e\x01\x0f\x01'>\x057>\x01'#53&'.\x02>\x0176\x021T\x99^\x99'<;\x1a%T\x14)\x1b\x1a\x18\x06\x03\xf1\xc5\x08\x15\x15-6\"b\x16 \x8c\"S52\x1f68\x1c \x17jt&'V\x18\x177\x04\x18\x07\x13\x0c\x11 0\x0c$\xdd\xa6\x17\x15\x07\n\x02\x0e-$a\x04\xadP\x8bN(?\"\x12\x1d\x15,9\x1aJ0* \x05d2\x82\x1e>2\n\x0f\x01\"\x1e\x93\x13\x17\x08\x01\"\x1f\x04\x03\x1a\x0c\x0b\x91\x03\x10\x05\x0d\x0b\x11\n7\x8fGd/9\x14+DAL!X\x00\x00\x00\x02\x00\x19\xff\xff\x04\x97\x04\xb0\x00\x0f\x00\x1f\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x05\x17\x16\x06+\x01\x11#\x11#\"&?\x0162\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x02\x17\xe6\x0e\x08\x15\x96\xc8\x96\x15\x08\x0e\xe6\x0e*\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\x10\xf9\x0e\x15\xfc|\x03\x84\x15\x0e\xf9\x0f\x00\x00\x04\x00\x19\xff\xff\x03\xe8\x04\xb0\x00\x07\x00\x17\x00\x1b\x00%\x00\x00\x01#5#\x15#\x11!\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x05\x1535\x13\x073\x15!57#5!\x03\xe8ddd\x01,\xfd\xa8\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x01\x91dc\xc8\xc8\xfe\xd4\xc8\xc8\x01,\x02\xbcdd\x01\xf4\xfc|\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84d\xc8\xc8\xfd\x12\xfad\x96\xfad\x00\x00\x00\x00\x04\x00\x19\xff\xff\x03\xe8\x04\xb0\x00\x0f\x00\x19\x00!\x00%\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x05\x073\x15!57#5!\x11#5#\x15#\x11!\x07\x1535\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x02X\xc8\xc8\xfe\xd4\xc8\xc8\x01,ddd\x01,\xc7d\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\x96\xfad\x96\xfad\xfbPdd\x01\xf4d\xc8\xc8\x00\x00\x00\x04\x00\x19\xff\xff\x04L\x04\xb0\x00\x0f\x00\x15\x00\x1b\x00\x1f\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x01#\x11#53\x13#5#\x11!\x07\x1535\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x02Xdd\xc8dd\xc8\x01,\xc7d\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\xfe\x0c\x01\x90d\xfbPd\x01\x90d\xc8\xc8\x00\x00\x00\x00\x04\x00\x19\xff\xff\x04L\x04\xb0\x00\x0f\x00\x15\x00\x19\x00\x1f\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x01#5#\x11!\x07\x1535\x03#\x11#53\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x02\xbcd\xc8\x01,\xc7d\x01dd\xc8\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\xfe\x0cd\x01\x90d\xc8\xc8\xfb\xb4\x01\x90d\x00\x00\x00\x00\x05\x00\x19\xff\xff\x04\xb0\x04\xb0\x00\x0f\x00\x13\x00\x17\x00\x1b\x00\x1f\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x05#53\x13!5!\x13!5!\x13!5!\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x01\xf4\xc8\xc8d\xfe\xd4\x01,d\xfep\x01\x90d\xfe\x0c\x01\xf4\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\xc8\xc8\xfe\x0c\xc8\xfe\x0c\xc8\xfe\x0c\xc8\x00\x05\x00\x19\xff\xff\x04\xb0\x04\xb0\x00\x0f\x00\x13\x00\x17\x00\x1b\x00\x1f\x00\x00\x0132\x16\x0f\x01\x06\"/\x01&6;\x01\x113\x05!5!\x03!5!\x03!5!\x03#53\x01\x90\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x03 \xfe\x0c\x01\xf4d\xfep\x01\x90d\xfe\xd4\x01,d\xc8\xc8\x01,\x16\x0e\xfa\x0f\x0f\xfa\x0e\x16\x03\x84\xc8\xc8\xfe\x0c\xc8\xfe\x0c\xc8\xfe\x0c\xc8\x00\x02\x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x01^\x01\x90\xa2\xbc\xbb\xa3\xfep\xa5\xb9\xb9\x02g\xfe\x0c);;)\x01\xf4);;\x04L\xbb\xa3\xfep\xa5\xb9\xb9\xa5\x01\x90\xa5\xb9\xc8;)\xfe\x0c);;)\x01\xf4);\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00+\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x05\x17\x16\x14\x0f\x01\x06&5\x1146\x01^\x01\x90\xa5\xb9\xb9\xa5\xfep\xa3\xbb\xbc\x02d\xfe\x0c);;)\x01\xf4);;\xfeo\xfd\x11\x11\xfd\x10\x18\x18\x04L\xb9\xa5\xfep\xa5\xb9\xb9\xa5\x01\x90\xa3\xbb\xc8;)\xfe\x0c);;)\x01\xf4);\x82\xbe\x0c$\x0c\xbe\x0c\x0b\x15\x01\x90\x15\x0b\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00+\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x05!2\x16\x0f\x01\x06\"/\x01&6\x01^\x01\x90\xa3\xbb\xb9\xa5\xfep\xa5\xb9\xb9\x02g\xfe\x0c);;)\x01\xf4);;\xfe\x15\x01\x90\x15\x0b\x0c\xbe\x0c$\x0c\xbe\x0c\x0b\x04L\xbc\xa2\xfep\xa5\xb9\xb9\xa5\x01\x90\xa3\xbb\xc8;)\xfe\x0c);;)\x01\xf4);\xc8\x18\x10\xfd\x11\x11\xfd\x10\x18\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00+\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x05\x17\x16\x06#!\"&?\x0162\x01^\x01\x90\xa5\xb9\xb9\xa5\xfep\xa3\xbb\xb9\x02g\xfe\x0c);;)\x01\xf4);;\xfe\xfb\xbe\x0c\x0b\x15\xfep\x15\x0b\x0c\xbe\x0c$\x04L\xb9\xa5\xfep\xa3\xbb\xbc\xa2\x01\x90\xa5\xb9\xc8;)\xfe\x0c);;)\x01\xf4);\xcf\xfd\x10\x18\x18\x10\xfd\x11\x00\x00\x00\x00\x02\x00\x00\x00\x00\x05\x14\x04L\x00\x1f\x005\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&=\x01463!265\x114&#!\"&=\x0146\x07\x01\x16\x14\x07\x01\x06&=\x01#\"&=\x0146;\x01546\x02&\x01\x90\xa5\xb9\xb9\xa5\xfep\x15\x1d\x1d\x15\x01\xc2);;)\xfe>\x15\x1d\x1d\xbf\x01D\x10\x10\xfe\xbc\x10\x16\xfa\x15\x1d\x1d\x15\xfa\x16\x04L\xb9\xa5\xfep\xa5\xb9\x1d\x15d\x15\x1d;)\x01\xf4);\x1d\x15d\x15\x1d\xe9\xfe\xe4\x0e&\x0e\xfe\xe4\x0e\n\x15\x96\x1d\x15\xc8\x15\x1d\x96\x15\n\x00\x00\x01\x00\xd9\x00\x02\x03\xd7\x04\x9e\x00#\x00\x00\x01\x17\x16\x07\x06\x02\x07%2\x16\x07\"\x08\x01\x07\x06+\x01'&7>\x01?\x01!\"'&76\x006763\x03\x19 \x08\x03\x03\x98\x02\x01,\x18\x11\x0e\x01\xfe\xf7\xfe\xf3\x04\x0c\x0e \x05\x02P''\xfe\xd2\x17\x08\n\x10K\x01\x0d\xbb\x05 \x10\x04\x9e \n\x11\x0b\xfeS\x07\x01#\x12\xfe\xca\xfe\xc5\x05\x0f\x08\x0b\x15 \xe5nn\x13\x15\x14V\x01/\xd3\x06\x10\x00\x00\x00\x02\x00\x00\x00\x00\x04\xfe\x04L\x00\x1f\x005\x00\x00\x01!2\x16\x1d\x01\x14\x06#!\"\x06\x15\x11\x14\x163!2\x16\x1d\x01\x14\x06#!\"&5\x1146\x05\x01\x16\x14\x07\x01\x06&=\x01#\"&=\x0146;\x01546\x01^\x01\x90\x15\x1d\x1d\x15\xfe>);;)\x01\xc2\x15\x1d\x1d\x15\xfep\xa5\xb9\xb9\x02\xf1\x01D\x10\x10\xfe\xbc\x10\x16\xfa\x15\x1d\x1d\x15\xfa\x16\x04L\x1d\x15d\x15\x1d;)\xfe\x0c);\x1d\x15d\x15\x1d\xb9\xa5\x01\x90\xa5\xb9\xe9\xfe\xe4\x0e&\x0e\xfe\xe4\x0e\n\x15\x96\x1d\x15\xc8\x15\x1d\x96\x15\n\x00\x02\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x15\x001\x00\x00\x01!2\x16\x15\x11\x14\x06/\x01\x01\x06\"/\x01&47\x01'&6\x01#\"\x06\x15\x11\x14\x163!26=\x017\x11\x14\x06#!\"&5\x11463!\x02\xee\x01\x90\x15\x1d\x15\x0em\xfe\xc8\x0f)\x0f\x8d\x0f\x0f\x018m\x0e\x08\xfe\xef\x9c);;)\x01\xf4);\xc8\xbb\xa3\xfep\xa5\xb9\xb9\xa5\x01,\x04\xb0\x1d\x15\xfep\x15\x08\x0em\xfe\xc8\x0f\x0f\x8d\x0f)\x0f\x018m\x0e\x15\xfe\xd4;)\xfe\x0c);;)\x94\xc8\xfe\xd6\xa5\xb9\xb9\xa5\x01\x90\xa5\xb9\x00\x00\x03\x00\x0e\x00\x0e\x04\xa2\x04\xa2\x00\x0f\x00\x1b\x00#\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x042\x16\x14\x06\"&4\x01\xe1\xee\xd9\x9d]]\x9d\xd9\xee\xd9\x9d]]\x9d\x01\xc3\xe6\xc2qq\xc2\xe6\xc2qq\xfe{\xa0rr\xa0r\x04\xa2]\x9d\xd9\xee\xd9\x9d]]\x9d\xd9\xee\xd9\x9dGq\xc2\xe6\xc2qq\xc2\xe6\xc2sr\xa0rr\xa0\x00\x00\x03\x00\x00\x00\x00\x04L\x04\xb0\x00\x15\x00\x1f\x00#\x00\x00\x0132\x16\x15\x1132\x16\x07\x01\x06\"'\x01&6;\x01\x1146\x01!2\x16\x1d\x01!546\x05\x1535\x01\xc2\xc8\x15\x1d\xf5\x14\n\x0e\xfe\x81\x0e'\x0d\xfe\x85\x0d \x15\xfa\x1d\xfe\x85\x03\xe8\x15\x1d\xfb\xb4\x1d\x03gd\x04\xb0\x1d\x15\xfe\xa2\x16\x0f\xfeV\x0f\x0f\x01\xaa\x0f\x16\x01^\x15\x1d\xfc|\x1d\x15\xfa\xfa\x15\x1dd22\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04\xac\x00\x15\x00\x1f\x00#\x00\x00 \x01\x16\x06+\x01\x15\x14\x06+\x01\"&=\x01#\"&7\x01>\x01\x01!2\x16\x1d\x01!546\x05\x1535\x02G\x01z\x0e\n\x15\xf4\"\x16\xc8\x14\x18\xfa\x14\x0b\x0e\x01\x7f\x0e'\xfd\xf9\x03\xe8\x15\x1d\xfb\xb4\x1d\x03gd\x04\x9e\xfeM\x0f \xfa\x14\x1e\x1d\x15\xfa!\x0f\x01\xaf\x10\x02\xfc\x7f\x1d\x15\xfa\xfa\x15\x1dd22\x00\x03\x00\x00\x00\x00\x04L\x04K\x00\x14\x00\x1e\x00\"\x00\x00 \x0162\x1f\x01\x16\x14\x07\x01\x06\"'\x01&4?\x0162\x17\x03!2\x16\x1d\x01!546\x05\x1535\x01\x8c\x01q\x07\x15\x07\x8b\x07\x07\xfd\xf3\x07\x15\x07\xfe\xdc\x07\x07\x8b\x08\x15\x07\xd4\x03\xe8\x15\x1d\xfb\xb4\x1d\x03gd\x02\xd3\x01q\x07\x07\x8b\x08\x15\x07\xfd\xf3\x08\x08\x01#\x08\x14\x08\x8b\x07\x07\xfd\xd2\x1d\x15\xfa\xfa\x15\x1dd22\x00\x04\x00\x00\x00\x00\x04L\x04\x9b\x00 \x00\x19\x00#\x00'\x00\x00\x13762\x1f\x01\x07'&4\x0576\x16\x15\x03\x0e\x01#\x05\"&?\x01'7\x01!2\x16\x1d\x01!546\x05\x1535\x87\x8e\x0e*\x0eM\xd4M\x0e\x02\x16\xd2\x0e\x15\x02\x01\x1d\x15\xfd\xab\x15\x08\x0e\xd0\x9a\xd4\xfe=\x03\xe8\x15\x1d\xfb\xb4\x1d\x03gd\x03\xff\x8e\x0e\x0eM\xd4L\x0f*\x9a\xd2\x0e\x08\x15\xfd\xa9\x14\x1e\x01\x15\x0e\xd0\x9b\xd4\xfd:\x1d\x15\xfa\xfa\x15\x1dd22\x00\x00\x00\x04\x00\x00\x00\x00\x04L\x04\xb0\x00\x0f\x00\x19\x00#\x00'\x00\x00\x01\x05\x1e\x01\x15\x13\x14\x06/\x01\x07'7'&6\x13\x07\x06\"/\x01&4?\x01\x01!2\x16\x1d\x01!546\x05\x1535\x01^\x02W\x14\x1e\x01\x15\x0e\xd0\x9b\xd4\x9b\xd2\x0e\x08\xc3L\x0f*\x0e\x8e\x0e\x0eM\xfe\xfa\x03\xe8\x15\x1d\xfb\xb4\x1d\x03gd\x04\xb0\x02\x01\x1d\x15\xfd\xab\x15\x08\x0e\xd0\x9a\xd4\x9a\xd2\x0e\x15\xfdPM\x0e\x0e\x8e\x0e*\x0eM\xfeX\x1d\x15\xfa\xfa\x15\x1dd22\x00\x02\x00\x04\xff\xec\x04\xb0\x04\xaf\x00\x05\x00\x08\x00\x00% \x01\x11! \x01\x15\x01\x04\xb0\xfe\x1d\xfe\xc6\xfeq\x04\xac\xfd3\x02\xabg\x01\x14\xfeq\x01\xa7\x03\x1c\xfc\xf9\xe6\x03\xb9\x00\x00\x00\x00\x02\x00\x00\x00d\x04L\x04\xb0\x00\x15\x00\x19\x00\x00\x01\x11\x14\x06+\x01\x11!\x11#\"&5\x1146;\x01\x11!\x113\x07#53\x04L\x1d\x15\x96\xfdD\x96\x15\x1d\x1d\x15\xfa\x01\xf4d\xc8dd\x03\xe8\xfc\xae\x15\x1d\x01\x90\xfep\x1d\x15\x03\xe8\x14\x1e\xfe\xd4\x01,\xc8\xc8\x00\x00\x03\x00\x00\x00E\x04\xdd\x04\xb0\x00\x16\x00\x1a\x00/\x00\x00\x01\x07\x01'&\"\x0f\x01!\x11#\"&5\x1146;\x01\x11!\x113\x07#53\x01\x17\x16\x14\x07\x01\x06\"/\x01&4?\x0162\x1f\x01\x0162\x04L\x02\xfe\xd5_ \x19 \x95\xfe\xc8\x96\x15\x1d\x1d\x15\xfa\x01\xf4d\xc8dd\x01\xb0j\x07\x07\xfe\\\x08\x14\x08\xca\x08\x08j\x07\x15\x07O\x01)\x07\x15\x03\xe8\x95\xfe\xd5_ \x93\xfep\x1d\x15\x03\xe8\x14\x1e\xfe\xd4\x01,\xc8\xc8\xfd\xcej\x07\x15\x07\xfe[\x07\x07\xcb\x07\x15\x07j\x08\x08O\x01)\x07\x00\x03\x00\x00\x00\x0d\x05\x07\x04\xb0\x00\x16\x00\x1a\x00>\x00\x00\x01\x11\x07'.\x01\x07\x01!\x11#\"&5\x1146;\x01\x11!\x113\x07#53\x01\x17\x16\x14\x0f\x01\x17\x16\x14\x0f\x01\x06\"/\x01\x07\x06\"/\x01&4?\x01'&4?\x0162\x1f\x01762\x04Lg\x86\x0f%\x10\xfe\xf6\xfe\xb7\x96\x15\x1d\x1d\x15\xfa\x01\xf4d\xc8dd\x01\xf6F\x0f\x0f\x83\x83\x0f\x0fF\x0f)\x0f\x83\x83\x0f)\x0fF\x0f\x0f\x83\x83\x0f\x0fF\x0f)\x0f\x83\x83\x0f)\x03\xe8\xfe\xf3g\x86\x0f\x03\x0e\xfe\xf6\xfep\x1d\x15\x03\xe8\x14\x1e\xfe\xd4\x01,\xc8\xc8\xfd\x8cF\x0f)\x0f\x83\x83\x0f)\x0fF\x0f\x0f\x83\x83\x0f\x0fF\x0f)\x0f\x83\x83\x0f)\x0fF\x0f\x0f\x83\x83\x0f\x00\x00\x03\x00\x00\x00\x15\x04\x97\x04\xb0\x00\x15\x00\x19\x00/\x00\x00\x01\x11!\"\x06\x1d\x01!\x11#\"&5\x1146;\x01\x11!\x113\x07#53\x1332\x16\x1d\x0132\x16\x0f\x01\x06\"/\x01&6;\x01546\x04L\xfe\xa2\x15\x1d\xfe\x0c\x96\x15\x1d\x1d\x15\xfa\x01\xf4d\xc8dd\x96d\x15\x1d\x96\x15\x08\x0e\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\x1d\x03\xe8\xfe\xd4\x1d\x15\x96\xfep\x1d\x15\x03\xe8\x14\x1e\xfe\xd4\x01,\xc8\xc8\xfd\xa8\x1d\x15\xfa\x15\x0e\xe6\x0e\x0e\xe6\x0e\x15\xfa\x15\x1d\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04\x97\x04\xb0\x00\x15\x00\x19\x00/\x00\x00\x01\x11'&\"\x07\x01!\x11#\"&5\x1146;\x01\x11!\x113\x07#53\x13\x17\x16\x06+\x01\x15\x14\x06+\x01\"&=\x01#\"&?\x0162\x04L\xa5\x0e*\x0e\xfe\xf9\xfen\x96\x15\x1d\x1d\x15\xfa\x01\xf4d\xc8dd\xeb\xe6\x0e\x08\x15\x96\x1d\x15d\x15\x1d\x96\x15\x08\x0e\xe6\x0e*\x03\xe8\xfep\xa5\x0f\x0f\xfe\xf7\xfep\x1d\x15\x03\xe8\x14\x1e\xfe\xd4\x01,\xc8\xc8\xfd\x85\xe5\x0f\x15\xfa\x14\x1e\x1e\x14\xfa\x15\x0f\xe5\x0f\x00\x03\x00\x00\x00\xc8\x04\xb0\x04L\x00 \x00\x13\x00\x17\x00\x00\x13!2\x16\x1d\x01!546\x01\x11\x14\x06#!\"&5\x11\x13\x15!52\x04L\x15\x1d\xfbP\x1d\x04\x93\x1d\x15\xfb\xb4\x15\x1dd\x01\x90\x04L\x1d\x15\x96\x96\x15\x1d\xfe\xd4\xfd\xda\x15\x1d\x1d\x15\x02&\xfe\xd4\xc8\xc8\x00\x00\x06\x00\x03\x00}\x04\xad\x04\x97\x00\x0f\x00\x19\x00\x1d\x00-\x001\x00;\x00\x00\x01\x17\x16\x14\x0f\x01\x06&=\x01!5!546\x01#\"&=\x0146;\x01\x17#53\x0176\x16\x1d\x01!\x15!\x15\x14\x06/\x01&4\x05#5;\x022\x16\x1d\x01\x14\x06+\x01\x03\xa7\xf8\x0e\x0e\xf8\x0e\x15\xfep\x01\x90\x15\xfd/2\x15\x1d\x1d\x152\xc8dd\xfe\x82\xf7\x0e\x15\x01\x90\xfep\x15\x0e\xf7\x0f\x03\x81ddd3\x14\x1d\x1d\x143\x04\x8d\xe6\x0e*\x0e\xe6\x0e\x08\x15\x96\xc8\x96\x15\x08\xfe\x85\x1d\x15d\x15\x1d\xc8\xc8\xfd\xcb\xe6\x0e\x08\x15\x96\xc8\x96\x15\x08\x0e\xe6\x0e*y\xc8\x1d\x15d\x15\x1d\x00\x00\x00\x00\x02\x00d\x00\x00\x04\xb0\x04\xb0\x00\x16\x00Q\x00\x00\x01%6\x16\x15\x11\x14\x06+\x01\"&5\x11.\x045\x1146%2\x16\x1f\x01\x11\x14\x0e\x02\x0f\x01\x11\x14\x06+\x01\"&5\x11.\x045\x114>\x0332\x16\x1f\x01\x113\x11<\x01>\x0232\x16\x1f\x01\x113\x114>\x03\x03^\x01\x14\x19%\x1d\x15\xc8\x15\x1d\x04\x0e\"\x1a\x16%\xfe\xe1\x16\x19\x02\x01\x15\x1d\x1d\x0b\n\x1d\x15\xc8\x15\x1d\x04\x0e\"\x1a\x16\x01\x07\n\x13\x0d\x14\x19\x02\x03d\x05 \x15\x0f\x17\x19\x01\x01d\x01\x05 \x15\x041t\x12\x14\x1f\xfb\xae\x15\x1d\x1d\x15\x01\x8d\x01\x08\x1b\x1f5\x1e\x01g\x1fD\x91\x19\x0c\x0d\xfe>\x1c?1)\x0b\x0b\xfeA\x15\x1d\x1d\x15\x01\xbf\x04\x0f..@\x1c\x01\xc2\x02\x07\x11\x0d\x0b\x19\x0c\x0d\xfe\xa2\x01^\x02\x07\x11\x0d\x0b\x19\x0c\x0d\xfe\xa2\x01^\x02\x07\x11\x0d\x0b\x00\x01\x00d\x00\x00\x04\xb0\x04L\x003\x00\x00\x01\x15\"\x0e\x03\x15\x11\x14\x16\x1f\x01\x15!5265\x11!\x11\x14\x163\x15!52>\x035\x114&/\x015!\x15\"\x06\x15\x11!\x114\x04\xb0\x04\x0e\"\x1a\x162\x19\x19\xfepK\x19\xfe\x0c\x19K\xfep\x04\x0e\"\x1a\x162\x19\x19\x01\x90K\x19\x01\xf4\x19K\x04L8\x01\x05\n\x14\x0e\xfc\x88\x16\x19\x01\x0288\x0d%\x01\x8a\xfev%\x0d88\x01\x05\n\x14\x0e\x03x\x16\x19\x01\x0288\x0d%\xfev\x01\x8a%\x0d8\x00\x00\x00\x06\x00\x00\x00\x00\x04L\x04L\x00\x0c\x00\x1c\x00 \x00$\x00(\x004\x00\x00\x01!2\x16\x1d\x01#\x055'!7!\x05!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x15!5\x01\x055%\x05\x15!5\x05\x15\x14\x06#!\x07!'!73\x02\xa3\x01w\x15\x1di\xfe\xd9\xc8\xfepd\x01w\xfe%\x01,);;)\xfe\xd4);;)\x01,\x02\xbc\xfep\x01\x90\xfc\x18\x01,\x02\xbc\x1d\x15\xfe\x89d\xfe\x89d\x01\x90\xc8i\x03\xe8\x1d\x15\x96bb\xc8d\xc8;)\xfe\x0c);;)\x01\xf4);d\xc8\xc8\xfe\xf7\x85\xa3\x85\xc6\xc8\xc8f\xf8\x15\x1ddd\xc8\x00\x01\x00\x10\x00\x10\x04\x9f\x04\x9f\x00&\x00\x00\x1376\x16\x1f\x01\x16\x06\x0f\x01\x1e\x01\x177>\x01\x1f\x01\x1e\x01\x0f\x01\x06\".\x06'.\x057\x11\xa2\x11.\x0e\x8b\x0e\x06\x11wf\xfc\x8dw\x113\x13\xc0\x13\x07\x11\xa3\x03\x0d.1LOefx;JwF2\x13\x0b\x01\x03\xef\xa2\x11\x06\x13\xc2\x141\x11v\x8e\xfcev\x11\x04\x0e\x88\x0e/\x11\xa2\x01\x04\x08\x15 5Cc;J\x99|sU@\x10\x00\x00\x00\x02\x00\x00\x00\x00\x04\xb0\x04L\x00#\x00A\x00\x00\x002\x1e\x04\x1f\x01\x15\x14\x06/\x01.\x01=\x01& \x07\x15\x14\x06\x0f\x01\x06&=\x01>\x05\x122\x1e\x02\x1f\x01\x15\x01\x1e\x01\x1d\x01\x14\x06#!\"&=\x01467\x0154>\x02\x01\xfc\xb8\xa6ud?, \x1d\x14\xca\x14\x1d\x8d\xfe\xc2\x8d\x1d\x14\xca\x14\x1d\x03\x0d1;ft\xcapR&\x16\x01\x01\x01m\x0e\x15\x1d\x15\xfb\xb4\x15\x1d\x15\x0e\x01m\x02\x16&\x04L\x15!((\"\n\n\xc8\x15\x18\x03\"\x03\"\x15\x92\x18\x18\x92\x15\"\x03\"\x03\x18\x15\xc8\x04\x0d'$+ \xfe\xe4\x13\x1c\x1c\n\n2\xfe\xd1\x0f2\x14\xd4\x15\x1d\x1d\x15\xd4\x142\x0f\x01/2\x04\x0d!\x19\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0d\x00\x1d\x00'\x00\x00\x01\x07!'\x113\x15353\x15353\x01!2\x16\x14\x06+\x01\x17!7#\"&46\x03!2\x16\x1d\x01!546\x04L\xc8\xfd\xa8\xc8\xc8\xc8\xc8\xc8\xc8\xfc\xae\x02\xbc\x15\x1d\x1d\x15\x0c\x89\xfcJ\x89\x0c\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x02\xbc\xc8\xc8\x01\xf4\xc8\xc8\xc8\xc8\xfc\xe0\x1d*\x1ddd\x1d*\x1d\xfe\xd4\x1d\x1522\x15\x1d\x00\x00\x00\x03\x00d\x00\x00\x04\xb0\x04L\x00 \x00\x13\x00\x1d\x00\x00\x01#\"\x06\x15\x11!\x114&\x01#\"\x06\x15\x11!\x114&\x01!\x1146;\x012\x16\x15\x02\xbcd);\x01,;\x01gd);\x01,;\xfd\x1b\xfe\xd4;)d);\x04L;)\xfc\x18\x03\xe8);\xfe\xd4;)\xfdD\x02\xbc);\xfc\xe0\x01\x90);;)\x00\x00\x00\x00\x05\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x1f\x00%\x00)\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x05#\x153\x11!535#\x11!\x05\x11\x07#\x113\x07\x113\x11\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0c\xc8\xc8\xfe\xd4\xc8\xc8\x01,\x01\x90d\xc8\xc8dd\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xc8d\xfe\xd4dd\x01,d\xfe\xd4d\x01\xf4d\xfe\xd4\x01,\x00\x00\x00\x05\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x1f\x00%\x00)\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x01#5#\x15#\x113\x15353\x05\x11\x07#\x113\x07\x113\x11\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0cdddddd\x01\x90d\xc8\xc8dd\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xfd\xa8\xc8\xc8\x01\xf4\xc8\xc8d\xfe\xd4d\x01\xf4d\xfe\xd4\x01,\x00\x04\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x1b\x00#\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x05#\x113\x15!\x11!\x05#\x113\x15!\x11!\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0c\xc8\xc8\xfe\xd4\x01,\x01\x90\xc8\xc8\xfe\xd4\x01,\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xc8\xfe\xd4d\x01\xf4d\xfe\xd4d\x01\xf4\x00\x00\x00\x04\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x16\x00\x19\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x01-\x01\x0d\x01\x11\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0c\xfe\xd4\x01,\x01\x90\xfe\xd4\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xfe\x0c\x96\x96\x96\x96\x01,\x00\x00\x00\x05\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x17\x00 \x00)\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x03!\x11!\x07#\"\x06\x15\x14\x16;\x01\x01\x1132654&#\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84d\xfdD\x02\xbcd\x82&96)\x82\xfe\x0c\x82)69&\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xfd\xa8\x01\xf4dVAAT\x01,\xfe\xd4TAAV\x00\x00\x00\x05\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x1f\x00%\x00)\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x05#\x153\x11!535#\x11!\x01#\x11#53\x03#53\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0c\xc8\xc8\xfe\xd4\xc8\xc8\x01,\x01\x90dd\xc8\xc8dd\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xc8d\xfe\xd4dd\x01,\xfe\x0c\x01\x90d\xfe\x0cd\x00\x06\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x19\x00\x1f\x00#\x00'\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x053\x11!\x113\x01#\x11#53\x05\x1535\x01#53\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfdD\xc8\xfe\xd4d\x02Xdd\xc8\xfd\xa8d\x01,dd\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xc8\xfep\x01\xf4\xfe\x0c\x01\x90d\xc8\xc8\xc8\xfe\xd4d\x00\x05\xff\x9c\x00\x00\x04\xb0\x04L\x00\x0f\x00\x13\x00\x1c\x00\"\x00&\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x01\x07#575#5!\x01#\x11#53\x03#53\xc8\x02\xbc|\xb0\xb0|\xfdD|\xb0\xb0\x18\x03\x84\xfe\x0c\xc7d\xc7\xc8\x01,\x01\x90dd\xc8\xc7dd\x04L\xb0|\xfe\x0c|\xb0\xb0|\x01\xf4|\xb0\xc8\xfdD\x02\xbc\xfep\xc82\xc8\x96d\xfe\x0c\x01\x90d\xfe\x0cd\x00\x00\x00\x03\x00 \x00 \x04\xa7\x04\xa7\x00\x0f\x00\x1b\x00%\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x07!\x15!\x15!'57!\x01\xe0\xf0\xdb\x9e^^\x9e\xdb\xf0\xdb\x9e^^\x9e\x01\xc5\xe4\xc2qq\xc2\xe4\xc2qql\xfe\xd4\x01,\xfe\xd4dd\x01,\x04\xa7^\x9e\xdb\xf0\xdb\x9e^^\x9e\xdb\xf0\xdb\x9eLq\xc2\xe4\xc2qq\xc2\xe4\xc2\xd0\xc8dd\xc8d\x00\x00\x00\x00\x04\x00 \x00 \x04\xa7\x04\xa7\x00\x0f\x00\x1b\x00'\x00+\x00\x00\x002\x1e\x02\x14\x0e\x02\".\x024>\x01\x04\"\x0e\x01\x14\x1e\x012>\x014&\x07\x15\x07\x17\x15#'#\x15#\x11!\x07\x1535\x01\xe0\xf0\xdb\x9e^^\x9e\xdb\xf0\xdb\x9e^^\x9e\x01\xc5\xe4\xc2qq\xc2\xe4\xc2qql2ddd\x96d\x01,\xc8\xc8\x04\xa7^\x9e\xdb\xf0\xdb\x9e^^\x9e\xdb\xf0\xdb\x9eLq\xc2\xe4\xc2qq\xc2\xe4\xc2\xd0d2d2dd\x01\x90ddd\x00\x00\x02\xff\xf2\xff\x9d\x04\xc2\x04A\x00 \x006\x00\x00\x012\x16\x17632\x16\x14\x06+\x0154&#!\"\x06\x1d\x01#\"&5467&54>\x01\x1332\x16\x15\x1132\x16\x0f\x01\x06\"/\x01&6;\x01\x1146\x01\xf7n\xb5,,.x\xaa\xaax\x80\x1d\x15\xfe\xd4\x15\x1d\xdePpVA\x02b\xaaz\x96\n\x0f\x89\x15\n\x0d\xdf\x0d&\x0d\xdf\x0d\n\x15\x89\x0f\x04Awa\x0e\xad\xf1\xad\xfa\x15\x1d\x1d\x15\xfasOEk\x0e\x13\x12d\xaab\xfd\xb3\x0f\n\xfe\xed\x17\x10\xf4\x10\x10\xf4\x10\x17\x01\x13\n\x0f\x00\x00\x00\x00\x02\xff\xf2\xff\x9c\x04\xc3\x04A\x00\x1c\x003\x00\x00\x012\x16\x17632\x16\x17\x14\x06\x07\x01&\"\x07\x01#\"&5467&54>\x01\x13\x17\x16\x06+\x01\x11\x14\x06+\x01\"&5\x11#\"&76762\x01\xf6n\xb6,+.y\xaa\x01xZ\xfe\x86\x0d%\x0d\xfe\x83 OqVA\x02b\xa9\xe6\xdf\x0d\n\x15\x89\x0f\n\x96\n\x0f\x89\x15\n\x0d\xc7\x18\x13\x19\x04Awa\x0f\xadxc\xa4\x1c\x01h\x10\x10\xfe\x93sOEk\x0e\x13\x13d\xa9c\xfd\x92\xe4\x10\x17\xfe\xed\n\x0f\x0f\n\x01\x13\x17\x10\xcc\x18\x13\x00\x00\x00\x01\x00d\x00\x00\x04L\x04m\x00\x18\x00\x00%5!\x013\x013 \x013\x013\x01!\x15#\"\x06\x1d\x01!54&#\x02\xbc\x01\x90\xfe\xf2\xaa\xfe\xf2\xaa\xfe\xd4\xfe\xd4\xaa\xfe\xf2\xaa\xfe\xf2\x01\x902\x15\x1d\x01\x90\x1d\x15dd\x01,\x01,\x01M\xfe\xb3\xfe\xd4\xfe\xd4d\x1d\x1522\x15\x1d\x00\x00\x00\x00\x01\x00y\x00\x00\x047\x04\x9b\x00/\x00\x00\x012\x16\x17\x1e\x01\x15\x14\x06\x07\x16\x15\x14\x06#\"'\x152\x16\x1d\x01!54635\x06#\"&547.\x0154632\x174&546\x02X^\x93\x1aY{;2 iJ7-\x15\x1d\xfe\xd4\x1d\x15-7Ji\x04/9iJ\x05\x12\x02\xa3\x04\x9bqY\x06\x83Z=g\x1f\x1d\x1aJi\x1e\xfb\x1d\x1522\x15\x1d\xfb\x1eiJ\x12\x14\x15X5Ji\x02\x02\x10\x05t\xa3\x00\x00\x00\x06\x00'\x00\x14\x04\x89\x04\x9c\x00\x11\x00*\x00B\x00J\x00b\x00{\x00\x00\x01\x16\x12\x02\x07\x0e\x01\"&'&\x02\x127>\x012\x16\x05\"\x07\x0e\x01\x07\x06\x16\x1f\x01\x163276767>\x01/\x01&'&\x17\"\x07\x0e\x01\x07\x06\x16\x1f\x01\x16327>\x017>\x01/\x01&'&\x16&\"\x06\x14\x16267\"\x07\x0e\x01\x07\x0e\x01\x1f\x01\x16\x17\x16327>\x0176&/\x01&\x17\"\x07\x06\x07\x06\x07\x0e\x01\x1f\x01\x16\x17\x16327>\x0176&/\x01&\x03\xf2oOOoS\xd9\xdc\xd9SoOOoS\xd9\xdc\xd9\xfe=\x04\x04y\xb1\"\x04\x0d\x0c$\x03\x04\x17\x06\x1bGF`\x0b\x0d\x03 \x03\x0b\x07\x1c\x04\x05Pu\x18\x04\x0c\x0d\"\x04\x04\x16\x06\x12Q9\x0c\x0c\x03 \x03\x0b\x07\xf9c\x8ccc\x8ccV\x16\x06\x12Q:\x0b\x0c\x03 \x03\x0b\x07\x08\x04\x05Pu\x18\x04\x0d\x0c\"\x04\x8d\x17\x06\x1bGF`\x0b\x0d\x03 \x03\x0b\x07\x08\x04\x04y\xb1\"\x04\x0d\x0c$\x03\x03\xf2o\xfe\xd5\xfe\xd5oSWWSo\x01+\x01+oSWW\x1c\x01\"\xb1y\x0c\x16\x03 \x01\x16`FG\x1b\x03\x15\x0c#\x0d\x06\x04\x91\x02\x18uP\x0d\x16\x03 \x01\x15:Q\x12\x03\x15\x0b#\x0c\x07\x04\xfacc\x8ccc\x15\x15:Q\x11\x04\x15\x0b#\x0c\x07\x04\x02\x18uP\x0d\x16\x03 \x01$\x16`FG\x1b\x03\x15\x0c#\x0d\x06\x04\x01\"\xb1y\x0c\x16\x03 \x01\x00\x00\x00\x05\x00d\x00\x00\x03\xe8\x04\xb0\x00\x0c\x00\x0f\x00\x16\x00\x1c\x00\"\x00\x00\x01!\x11#5!\x15!\x11463!\x01#5\x033\x07'353\x03!\"&5\x11\x05\x15\x14\x06+\x01\x02\xbc\x01,\xb4\xfe\xac\xfe\x84\x0f\n\x02?\x01,\xc8d\xa2\xd4\xd4\xa2d\xb4\xfeu\n\x0f\x03\x84\x0f\n\xc3\x03 \xfe\x84\xc8\xc8\x02\xf3\n\x0f\xfe\xd4\xc8\xfc\xe0\xd4\xd4\xc8\xfe\x0c\x0f\n\x01\x8b\xc8\xc3\n\x0f\x00\x00\x00\x00\x05\x00d\x00\x00\x03\xe8\x04\xb0\x00 \x00\x0c\x00\x13\x00\x1a\x00!\x00\x00\x01!\x11 \x01\x11463!\x01#5\x13#\x15#5#7\x03!\"&=\x01)\x01\x15\x14\x06+\x015\x02\xbc\x01,\xfe\xa2\xfd\xda\x0f\n\x02?\x01,\xc8>\xa2d\xa2\xd4\xaa\xfe\x9d\n\x0f\x01|\x02\x08\x0f\n\x9b\x03 \xfd\xf8\x01^\xfd\xda\x04G\n\x0f\xfe\xd4\xc8\xfc|\xc8\xc8\xd4\xfed\x0f\n77\n\x0fP\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x03\xf4\x00\x08\x00\x19\x00\x1f\x00\x00\x01#\x153\x17!\x11#7\x0532\x1e\x02\x15!\x15!\x03\x1134>\x02\x013\x03!\x01!\x04\x8a\xa2dd\xfe\xd4\xa2\xd4\xfd\x12\xc8\x1b\x1a!\x0e\x01,\xfd\xa8\xc8d\x0e!\x1a\x02s\xf0\xf0\xfc\xe0\x01,\x01\xf4\x03 \xc8d\x01,\xd4\xd4\x04\x11+$d\xfe\xa2\x01\xc2$+\x11\x04\xfep\xfep\x01\xf4\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04L\x00\x19\x002\x009\x00\x00\x0132\x16\x1d\x0132\x16\x1d\x01\x14\x06#!\"&=\x0146;\x01546\x0552\x16\x15\x11\x14\x06#!\"'7\x01\x11463\x15\x14\x163!26\x01\x075#535\x02\x8ad\x15\x1d2\x15\x1d\x1d\x15\xfe\xd4\x15\x1d\x1d\x152\x1d\x01s);;)\xfd\xa8\x01\x13\xf6\xfe\xba;)X>\x01,>X\xfd\xb4\xd4\xc8\xc8\x04L\x1d\x152\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d2\x15\x1d\xfa\x96;)\xfc\xe0);\x04\xf6\x01F\x01D);\x96>XX\xfd\xe6\xd4\xa2d\xa2\x00\x03\x00d\x00\x00\x04\xbc\x04L\x00\x19\x006\x00=\x00\x00\x0132\x16\x1d\x0132\x16\x1d\x01\x14\x06#!\"&=\x0146;\x01546\x0552\x16\x15\x11#\x113\x14\x0e\x02#!\"&5\x11463\x15\x14\x163!26\x01\x075#535\x01\xc2d\x15\x1d2\x15\x1d\x1d\x15\xfe\xd4\x15\x1d\x1d\x152\x1d\x01s);\xc8\xc8\x0e!\x1a\x1b\xfd\xa8);;)X>\x01,>X\x01\x9c\xd4\xc8\xc8\x04L\x1d\x152\x1d\x15d\x15\x1d\x1d\x15d\x15\x1d2\x15\x1d\xfa\x96;)\xfe\x0c\xfe\xd4$+\x11\x04;)\x03 );\x96>XX\xfd\xe6\xd4\xa2d\xa2\x00\x00\x00\x03\xff\xa2\x00\x00\x05\x16\x04\xd4\x00\x0b\x00\x1b\x00\x1f\x00\x00 \x01\x16\x06#!\"&7\x0162\x13#\"\x06\x17\x13\x1e\x01;\x01267\x136&\x03\x1535\x02\x92\x02}\x17 ,\xfb\x04, \x17\x02}\x16@D\xd0\x14\x18\x04:\x04#\x146\x14#\x04:\x04\x18\xe0\xc8\x04\xad\xfb\xb0&77&\x04P'\xfeL\x1d\x14\xfe\xd2\x14\x1d\x1d\x14\x01.\x14\x1d\xfe\x0cdd\x00\x00\x00\x00 \x00\x00\x00\x00\x04L\x04L\x00\x0f\x00\x1f\x00/\x00?\x00O\x00_\x00o\x00\x7f\x00\x8f\x00\x00\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x1332\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x0132\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146!32\x16\x1d\x01\x14\x06+\x01\"&=\x0146\x01\xa9\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x01\x9a\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\xfc\xea\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x01\x9a\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x01\x9a\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\xfc\xea\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x01\x9a\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x01\x9a\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x04L\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfe\xd4\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfe\xd4\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfe\xd4\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x00\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x05\x14\x00\x19\x00)\x003\x00\x00\x013\x15#\x15!2\x16\x0f\x01\x06\x07!&/\x01&63!5#5353\x01!2\x16\x14\x06+\x01\x17!7#\"&46\x03!2\x16\x1d\x01!546\x02\xbcdd\x01^>1\x1cB)(\xfc\xfc()B\x1c1>\x01^dd\xc8\xfe>\x02\xbc\x15\x1d\x1d\x15\x0c\x89\xfcJ\x8a\x0d\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x04\xb0\xc8dO7\x84S33S\x847Od\xc8d\xfc|\x1d*\x1ddd\x1d*\x1d\xfe\xd4\x1d\x1522\x15\x1d\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x05\x14\x00\x05\x00\x19\x00+\x005\x00\x00\x002\x16\x15#4\x07!\x16\x15\x14\x07!2\x16\x0f\x01!'&63!&54\x03!2\x16\x14\x06+\x01\x15\x05!%5#\"&46\x03!2\x16\x1d\x01!546\x020P9\xc2<\x01:\x12\x03\x01H)\x07\"\xaf\xfdZ\xb2\"\n)\x01H\x03\xaf\x02\xbc\x15\x1d\x1d\x15\x96\x01\x13\xfcJ\x01\x13\x96\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x05\x14;))\x8d%&\x08\x11!\x16\x91\x91\x16!\x11\x08&\xfe\x95\x1d*\x1d\xc8\xc8\xc8\xc8\x1d*\x1d\xfd\xa8\x1d\x1522\x15\x1d\x00\x04\x00\x00\x00\x00\x04\xb0\x04\x9d\x00\x07\x00\x14\x00$\x00.\x00\x00\x002\x16\x14\x06\"&4\x1332\x16\x15\x14\x17!65463\x01!2\x16\x14\x06+\x01\x17!7#\"&46\x03!2\x16\x1d\x01!546\x02\x0d\x96jj\x96j\xb7.\"+'\xfe\xbc'+#\xfe\xcd\x02\xbc\x15\x1d\x1d\x15\x0d\x8a\xfcJ\x89\x0c\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x04\x9dj\x96jj\x96\xfe\xeb9:LkkL:9\xfer\x1d*\x1ddd\x1d*\x1d\xfe\xd4\x1d\x1522\x15\x1d\x00\x04\x00\x00\x00\x00\x04\xb0\x05\x14\x00\x0f\x00\x1c\x00,\x006\x00\x00\x012\x1e\x01\x15\x14\x06\"&547\x177'6\x1332\x16\x15\x14\x17!65463\x01!2\x16\x14\x06+\x01\x17!7#\"&46\x03!2\x16\x1d\x01!546\x02X/[3o\x9co\"\x90o\xa3\"\x1f.\"+'\xfe\xbc'+#\xfe\xcd\x02\xbc\x15\x1d\x1d\x15\x0d\x8a\xfcJ\x89\x0c\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x05\x14k\x8b6NooN>Q\x8fo\xa3\x1a\xfe\n9:LkkL:9\xfer\x1d*\x1ddd\x1d*\x1d\xfe\xd4\x1d\x1522\x15\x1d\x00\x00\x00\x03\x00\x00\x00\x00\x04\xb0\x05\x12\x00\x12\x00\"\x00,\x00\x00\x01\x05\x15!\x14\x1e\x03\x17!.\x0154>\x027\x01!2\x16\x14\x06+\x01\x17!7#\"&46\x03!2\x16\x1d\x01!546\x02X\x01,\xfe\xd4%??M\x13\xfd\xee<=Bm\x8fJ\xfe\xa2\x02\xbc\x15\x1d\x1d\x15\x0d\x8a\xfcJ\x89\x0c\x15\x1d\x1d\xb3\x04L\x15\x1d\xfbP\x1d\x04\xb0\xa1\x8b9fQ?H\x19S\xbdTT\xa1vK\x04\xfc~\x1d*\x1ddd\x1d*\x1d\xfe\xd4\x1d\x1522\x15\x1d\x00\x02\x00\xc8\x00\x00\x03\xe8\x05\x14\x00\x0f\x00)\x00\x00\x002\x16\x1d\x01\x1e\x01\x1d\x01!546754\x03!2\x16\x17#\x153\x15#\x153\x15#\x153\x14\x06#!\"&5\x1146\x02/R;.6\xfep6.d\x01\x906\\\x1a\xac\xc8\xc8\xc8\xc8\xc8uS\xfepSuu\x05\x14;)N\x1a\\6226\\\x1aN)\xfeG6.dddddSuuS\x01\x90Su\x00\x00\x03\x00d\xff\xff\x04L\x04L\x00\x0f\x00/\x003\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x03!2\x16\x1d\x01\x14\x06#!\x17\x16\x14\x06\"/\x01!\x07\x06\"&4?\x01!\"&=\x0146\x05\x07!'\x96\x03\x84\x15\x1d\x1d\x15\xfc|\x15\x1d\x1d\x04\x03\xb6\n\x0f\x0f\n\xfe\xe5\xe0\x0d\x1a%\x0dX\xfd\xf4W\x0d&\x1a\x0d\xe0\xfe\xdf\n\x0f\x0f\x01\xaad\x01Dd\x04L\x1d\x15\xfe\x0c\x15\x1d\x1d\x15\x01\xf4\x15\x1d\xfdD\x0f\n2\n\x0f\xe0\x0d%\x1b\x0dXX\x0d\x1b%\x0d\xe0\x0f\n2\n\x0fddd\x00\x00\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04L\x00\x19\x00#\x00-\x007\x00\x00\x13!2\x16\x1d\x01#4&+\x01\"\x06\x15#4&+\x01\"\x06\x15#546\x03!2\x16\x15\x11!\x1146\x13\x15\x14\x06+\x01\"&=\x01!\x15\x14\x06+\x01\"&=\x01\xc8\x03 Sud;)\xfa);d;)\xfa);du\x11\x03\xe8);\xfbP;\xf1\x1d\x15d\x15\x1d\x03\xe8\x1d\x15d\x15\x1d\x04LuS\xc8);;));;)\xc8Su\xfe\x0c;)\xfe\xd4\x01,);\xfe\x0c2\x15\x1d\x1d\x1522\x15\x1d\x1d\x152\x00\x03\x00\x01\x00\x00\x04\xa9\x04\xac\x00\x0d\x00\x11\x00\x1b\x00\x00 \x01\x16\x14\x0f\x01!\x01&47\x0162 \x03\x03!2\x16\x1d\x01!546\x01\xe0\x02\xaa\x1f\x1f\x83\xfe\x1f\xfd\xfb \x01'\x1fY\xfe\xac\x01V\x01/\xfe\xab\xa2\x03 \x15\x1d\xfc|\x1d\x04\x8d\xfdU\x1fY\x1f\x83\x02\x06\x1fY\x1f\x01(\x1f\xfen\xfe\xaa\x010\x01U\xfc\x1b\x1d\x1522\x15\x1d\x00\x00\x00\x00\x02\x00\x8f\x00\x00\x04!\x04\xb0\x00\x17\x00/\x00\x00\x01\x03.\x01#!\"\x06\x07\x03\x06\x163!\x15\x14\x1626=\x01326\x03!546;\x01546;\x01\x113\x1132\x16\x1d\x0132\x16\x15\x04!\xbd\x08'\x15\xfep\x15'\x08\xbd\x08\x13\x15\x02q\x1d*\x1d}\x15\x13\xa8\xfd\xad\x1d\x152\x1d\x150\xc8/\x15\x1d2\x15\x1d\x02\x87\x01\xfa\x13\x1c\x1c\x13\xfe\x06\x13\x1c\x96\x15\x1d\x1d\x15\x96\x1c\xfd\x8c2\x15\x1d2\x15\x1d\x01,\xfe\xd4\x1d\x152\x1d\x15\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0d\x00\x10\x00\x1f\x00\"\x00\x00\x01!\x11\x14\x06#!\x11\x015463!\x01#5\x01!\x11\x14\x06#!\"&5\x11463!\x01#5\x03\x84\x01,\x0f\n\xfe\x89\xfe\xd4\x0f\n\x01w\x01,\xc8\xfd\xa8\x01,\x0f\n\xfdv\n\x0f\x0f\n\x01w\x01,\xc8\x03 \xfd\xc1\n\x0f\x02O\x01,T\n\x0f\xfe\xd4\xc8\xfe\x0c\xfd\xc1\n\x0f\x0f\n\x03\xb6\n\x0f\xfe\xd4\xc8\x00\x02\xff\x9c\x00d\x05\x14\x04G\x00F\x00V\x00\x00\x0132\x1e\x02\x17\x16\x17\x16676'&7>\x01\x16\x17\x16\x07\x0e\x01+\x01\x0e\x01\x0f\x01\x0e\x01+\x01\"&?\x01\x06+\x01\"'\x07\x0e\x01+\x01\"&?\x01&/\x01.\x01=\x0146;\x0167'&6;\x016\x05#\"\x06\x1d\x01\x14\x16;\x0126=\x014&\x01\xe4\xc3K\x8ejI\x0c\x12\x11\x1fC\n\n\x19\x13\n\x05\x0f\x17\x0c)\x02\x03V=>\x078\x1c'\x03\"\x15d\x15\x18\x03\x0c1*\xc3\x18)\x0b\x03\"\x15d\x15\x18\x03\x1aT,\x9f\x13\x1c\x1d\x15|\x0b-o\x10\x07\x15\xcbt\x01E\xfa\n\x0f\x0f\n\xfa\n\x0f\x0f\x04GAk\x8aI\x0e\x06\n! \"%\x1b\x10\x08\x06\x08\x0c,=?W7|&\xea\x14\x1d\x1d\x14F\x0e\x08@\x14\x1d\x1d\x14\x9dJe5\x07&\x152\x15\x1dWO_\x0e\x13e_\x0f\n2\n\x0f\x0f\n2\n\x0f\x00\x06\xff\x9c\xff\xe6\x05\x14\x04~\x00 \x00$\x004\x00<\x00R\x00b\x00\x00\x01%6\x16\x1f\x01\x16\x06\x0f\x01%32\x16\x1f\x01!2\x16\x1d\x01\x14\x06\x07\x05\x06&'&#!\"&=\x0146\x17#\"\x06\x1d\x01\x14;\x012654&'&\x04\"\x06\x14\x16264\x017>\x01\x1e\x01\x17\x1e\x01?\x01\x17\x16\x06\x0f\x01\x06&/\x01&6%\x07\x06\x1f\x01\x1e\x01?\x016'.\x01'.\x01\x02\x81\x01\xa7\x13.\x0e \x0e\x03\x10\x8b\xfc+jCH\x1ef\x037\x15\x1d\x1c\x13\xfd\x1f\x17\" *:\xfe\xd4>XX\xb9P\x13\x12*\x86\x10\x0b \x0b\x12\x01\x80@--@-\xfe\x0f\x98\x12 \x1e\x13\x10\x1c-\x1a?0\x0d!3P/|)\x82( \x01)f\x1f!%\x0d\x1d\x11=\x13\x05\x02\x14\x07\x0b\x10\x03\x84\xf7\x08\x0b\x10&\x0f*\x0dx\xc8\"6\xd4\x1d\x152\x15&\x07\x84\x04\x14\x0fCX>\xc8>X\xac\x1c\x1583\x10\x10\x0bD\x11\x1c\xc9-@--@\xfe\xdb\x82\x13\n\x11\x12\x13# \x03\x05\xb3=I+E( /\x97/}\x1cX\x1b&+\x0f \x0b5\x10!\x14H \x0c\x04\x00\x00\x00\x00\x03\x00d\x00\x00\x049\x04\xb0\x00Q\x00`\x00o\x00\x00\x0132\x16\x1d\x01\x1e\x01\x17\x16\x0e\x02\x0f\x012\x1e\x05\x15\x14\x0e\x05#\x15\x14\x06+\x01\"&=\x01#\x15\x14\x06+\x01\"&=\x01#\"&=\x0146;\x01\x11#\"&=\x0146;\x01546;\x012\x16\x1d\x013546\x03\x15!2>\x02574.\x03#\x01\x15!2>\x02574.\x03#\x02q\x96\n\x0fOh\x01\x01 ..\x10\x11\x06\x1240:*\x1d\x17\"6-@#\x1a\x0f\n\x96\n\x0fd\x0f\n\x96\n\x0f\xaf\n\x0f\x0f\nKK\n\x0f\x0f\n\xaf\x0f\n\x96\n\x0fd\x0f\xd7\x01\x07\x1c)\x13\x0b\x01\x01\x0b\x13)\x1c\xfe\xf9\x01k\x1c)\x13\x0b\x01\x01\x0b\x13)\x1c\x04\xb0\x0f\nm!mJ.M-\x1f\x06\x06\x03\x0f\x14(2N-;]<*\x15\x0b\x02K\n\x0f\x0f\nKK\n\x0f\x0f\nK\x0f\n\x96\n\x0f\x02X\x0f\n\x96\n\x0fK\n\x0f\x0f\nKK\n\x0f\xfe\xd4\xc8\x15\x1d\x1d\x0b\n\x04\x0e\"\x1a\x16\xfep\xc8\x15\x1d\x1d\x0b\n\x04\x0e\"\x1a\x16\x00\x00\x03\x00\x04\x00\x02\x04\xb0\x04\xae\x00\x17\x00)\x00,\x00\x00\x13!2\x16\x15\x11\x14\x06\x0f\x01\x0e\x01#!\"&'.\x025\x1146\x04\"\x0e\x04\x0f\x01\x17!7.\x05\x03#\x13\xd4\x03\x0cVz$\x12\x12\x1d\x81R\xfd\xc4R\x82\x1c\x08\x18(z\x02 \x8c}VG+\x1d\x06\x06\x9c\x020\x9c\x02\x08 )IU!\x9d\xc3\x04\xaezV\xfe`3\xb7BBWwvX\x1cZ\xc53\x01\xa0Vz\x99\x17&--%\x0c\x0c\xf3\xf3\x05\x0f,(1#\xfe\xc2\x01\x05\x00\x02\x00\xc8\x00\x00\x03\x84\x05\x14\x00\x0f\x00\x19\x00\x00\x0132\x16\x15\x11\x14\x06#!\"&5\x1146\x01\x15\x14\x06+\x01\"&=\x01\x01\xdb\x96g\xacT)\xfe>)T\xac\x01H6\x15\x96\x156\x05\x14\xacg\xfe\x0c)TT)\x01\xf4g\xac\xfc\x18\xe1\x1566\x15\xe1\x00\x00\x02\x00\xc8\x00\x00\x03\x84\x05\x14\x00\x0f\x00\x19\x00\x00\x013\x14\x163\x11\x14\x06#!\"&5\x1146\x01\x15\x14\x06+\x01\"&=\x01\x01\xdb`\xb3\x96T)\xfe>)T\xac\x01H6\x15\x96\x156\x05\x14\x96\xb3\xfeB)TT)\x01\xf4g\xac\xfc\x18\xe1\x1566\x15\xe1\x00\x00\x02\x00\x00\x00\x14\x05\x0e\x04\x1a\x00\x14\x00\x1a\x00\x00 \x01%\x07\x15\x17\x15'\x075754&>\x02?\x01' \x01\x05%5\x05%\x05\x0e\xfd\x82\xfe\x86Nd\x96\x96d\x01\x01\x01\x05\x04/\x93\x02\x82\x01\\\xfe\xa2\xfe\xa2\x01^\x01^\x02\xff\xfe\xe5\xaa<\xe0\x96\xc7\x94\x95\xc8\x96\xfa\x04\x0d\x06\n\x06\x03(A\x01\x1b\xfdb\xa6\xa6\x93\xa5\xa5\x00\x00\x03\x00d\x01\xf4\x04\xb0\x03 \x00\x07\x00\x0f\x00\x17\x00\x00\x122\x16\x14\x06\"&4$2\x16\x14\x06\"&4$2\x16\x14\x06\"&4\xbc|XX|X\x01\xe8|XX|X\x01\xe8|XX|X\x03 X|XX|XX|XX|XX|XX|\x00\x00\x00\x00\x03\x01\x90\x00\x00\x02\xbc\x04L\x00\x07\x00\x0f\x00\x17\x00\x00\x002\x16\x14\x06\"&4\x122\x16\x14\x06\"&4\x122\x16\x14\x06\"&4\x01\xe8|XX|XX|XX|XX|XX|X\x04LX|XX|\xfe\xc8X|XX|\xfe\xc8X|XX|\x00\x00\x00\x03\x00d\x00d\x04L\x04L\x00\x0f\x00\x1f\x00/\x00\x00\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146\x13!2\x16\x1d\x01\x14\x06#!\"&=\x0146}\x03\xb6\n\x0f\x0f\n\xfcJ\n\x0f\x0f\n\x03\xb6\n\x0f\x0f\n\xfcJ\n\x0f\x0f\n\x03\xb6\n\x0f\x0f\n\xfcJ\n\x0f\x0f\x04L\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfep\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\xfep\x0f\n\x96\n\x0f\x0f\n\x96\n\x0f\x00\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00\x1f\x00/\x003\x00\x00\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05!\"\x06\x15\x11\x14\x163!265\x114&\x05!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x15!5\x01^\x01\xf4\xa2\xbc\xbb\xa3\xfe\x0c\xa5\xb9\xb9\x02\xcb\xfd\xa8);;)\x02X);;\xfd\xb1\x01\xf4\x15\x1d\x1d\x15\xfe\x0c\x15\x1d\x1dG\x01\x90\x04\xb0\xbb\xa3\xfe\x0c\xa5\xb9\xb9\xa5\x01\xf4\xa5\xb9\xc8;)\xfd\xa8);;)\x02X);d\x1d\x15\xfe\xd4\x15\x1d\x1d\x15\x01,\x15\x1dd\xc8\xc8\x00\x00\x00\x00\x01\x00d\x00d\x04\xb0\x04L\x00;\x00\x00\x13!2\x16\x14\x06+\x01\x1532\x16\x14\x06+\x01\x1532\x16\x14\x06+\x01\x1532\x16\x14\x06#!\"&46;\x015#\"&46;\x015#\"&46;\x015#\"&46\x96\x03\xe8\x15\x1d\x1d\x1522\x15\x1d\x1d\x1522\x15\x1d\x1d\x1522\x15\x1d\x1d\x15\xfc\x18\x15\x1d\x1d\x1522\x15\x1d\x1d\x1522\x15\x1d\x1d\x1522\x15\x1d\x1d\x04L\x1d*\x1d\xc8\x1d*\x1d\xc8\x1d*\x1d\xc8\x1d*\x1d\x1d*\x1d\xc8\x1d*\x1d\xc8\x1d*\x1d\xc8\x1d*\x1d\x00\x00\x00\x06\x01,\x00\x05\x03\xe8\x04\xa3\x00\x07\x00\x0d\x00\x13\x00\x19\x00\x1f\x00*\x00\x00\x01\x1e\x01\x06\x07.\x016\x012\x16\x15\"&%\x14\x06#46\x012\x16\x15\"&%\x14\x06#46\x03\x15\x14\x06\"&=\x01\x1632\x02\x8aW??WW??\xfe\xf9|\xb0|\xb0\x02\xbc\xb0|\xb0\xfd\xc0|\xb0|\xb0\x02\xbc\xb0|\xb0\xb0\x1d*\x1d(\x03\x11\x04\xa3C\xb2\xb2BB\xb2\xb2\xfe\xc0\xb0|\xb0||\xb0|\xb0\xfe\xd4\xb0|\xb0||\xb0|\xb0\xfe\xd3\x90\x15\x1d\x1d\x15\x8e\x04\x00\x00\x01\xff\xb5\x00\xc8\x04\x94\x03\x81\x00B\x00\x00\x0176\x17\x01\x1e\x01\x07\x0e\x01+\x012\x15\x14\x0e\x04+\x01\"\x1147&\"\x07\x16\x15\x10+\x01\".\x03543#\"&'&67\x016\x1f\x01\x1e\x01\x0e\x01/\x01\x07!'\x07\x06.\x016\x02\xe9E\x19\x14\x01*\x0b\x08\x06\x05\x1a\x0f\x08\x01\x04\x0d\x1b'?)\x92\xb8\n\x14T\x15\x0b\xb8\x8e0I'\x19\x07\x02\x07\x0f\x19\x06\x06\x08\x0b\x01*\x14\x1aL\x14\x15\n#\x143\xb6\x03{\xb6,\x14#\n\x16\x03n\x13\x06\x12\xfe\xd9\x0b\x1f\x0f\x0e\x13\x0d\x1d6F82\x1c\x01 \x18\x17\x04\x04\x18\x17\xfe\xe0*\x035\x11#\"\x0e\x05\x15#\x11!\x01#4.\x03+\x01\x11\x14\x16\x173\x15#525\x11#\"\x0e\x03\x15#5!\x04\xb02\x08\x0b\x19\x13&\x18\x19\xc82\x19\x19\xfep\x04\x0e\"\x1a\x16\xc8\x19\x18&\x13\x19\x0b\x082\x03\xe8\xfdD\x19\x08\n\x18\x10\x11d\x19\x0d\x0c\xc82d\x11\x10\x18\n\x08\x19\x01\xf4\x03\x84\x15 \x15\x0e\x08\x03\x01\xfc\xae\x16\x19\x01\x02dd\x01\x05 \x15\x0e\x03R\x01\x03\x08\x0e\x15 \x15\x01,\xfd\x12\x0f\x13\n\x05\x01\xfeW\x0b\x0d\x0122\x19\x01\xa9\x01\x05\n\x13\x0f\x96\x00\x00\x00\x00\x03\x00\x00\x00\x00\x04L\x04\xae\x00\x1d\x00 \x000\x00\x00\x015\"'.\x01/\x01\x01#\x01\x06\x07\x0e\x01\x0f\x01\x15!5\"&?\x01!\x17\x16\x06#\x15\x01\x1b\x01\x01\x15\x14\x06#!\"&=\x01463!2\x16\x03\xe8\x19\x1e\x0e\x16\x05\x04\xfe\xdfE\xfe\xd4\x12\x15 \x1f\x0b\x0b\x01\x111;\x12E\x01%=\x0d!'\xfe\xec\x86y\x01\xb1\x1d\x15\xfc\x18\x15\x1d\x1d\x15\x03\xe8\x15\x1d\x01,2 \x0e\"\n\x0b\x02\xeb\xfd\x0e#\x15 \x13\x05\x0522+.\xb0\xa6\"A2\x01\x87\x01V\xfe\xaa\xfd\xe3d\x15\x1d\x1d\x15d\x15\x1d\x1d\x00\x03\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00G\x00J\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x05#\"\x07\x01\x06\x07\x06\x07\x06\x1d\x01\x14\x163!26=\x014&#\"'&?\x01!\x17\x16\x07\x06#\"\x06\x1d\x01\x14\x163!26=\x014&'\"'&'#\x01&\x13#\x132\x04L\x15\x1d\x1d\x15\xfb\xb4\x15\x1d\x1d\x02FF\x0d\x05\xfe\xd5\x11\x12\x12&\x0c\x0b \x01\x11\x08\x0c\x0c\x087\x10\n\n?\x01\n9\x0b\x11\x0c\x18\x08\x0c\x0c\x08\x019\x08\x0c\x0b\x08\x11\x19\x19\x0f\x01\xfe\xe0\x05\x0e\xc5g\x04\xb0\x1d\x15\xfb\xb4\x15\x1d\x1d\x15\x04L\x15\x1dR\x0c\xfd\x0f \x13\x12\x10\x05\x0d2\x08\x0c\x0c\x082\x08\x0c\x17\x0e\x19\xa3\x99\x1f\x18\x11\x0c\x082\x08\x0c\x0c\x082\x07\x0c\x01\x19\x1b$\x02\xec\x0c\xfe\x05\x01\x08\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x03\x00\x13\x00#\x00'\x00\x00\x01!5!\x05!2\x16\x15\x11\x14\x06#!\"&5\x1146)\x012\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x04\xb0\xfbP\x04\xb0\xfb\x82\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x02m\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1dG\x01,\x04Ld\xc8\x1d\x15\xfc|\x15\x1d\x1d\x15\x03\x84\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1dd\xfe\xd4\x01,\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x0f\x00\x1f\x00#\x00'\x00\x00\x13!2\x16\x15\x11\x14\x06#!\"&5\x1146\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x13!5!2\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1d\x02m\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1dG\x01,\xc8\xfbP\x04\xb0\x04\xb0\x1d\x15\xfc|\x15\x1d\x1d\x15\x03\x84\x15\x1d\xfe\x0c\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1dd\xfe\xd4\x01,\xfd\xa8d\x00\x00\x00\x00\x02\x00\x00\x00d\x04\xb0\x03\xe8\x00'\x00+\x00\x00\x13!2\x16\x15\x1135463!2\x16\x1d\x013\x15#\x15\x14\x06#!\"&=\x01#\x11\x14\x06#!\"&5\x1146\x01\x11!\x112\x01\x90\x15\x1dd\x1d\x15\x01\x90\x15\x1ddd\x1d\x15\xfep\x15\x1dd\x1d\x15\xfep\x15\x1d\x1d\x02\x9f\x01,\x03\xe8\x1d\x15\xfe\xa2\x96\x15\x1d\x1d\x15\x96d\x96\x15\x1d\x1d\x15\x96\xfe\xa2\x15\x1d\x1d\x15\x03 \x15\x1d\xfe\xd4\xfe\xd4\x01,\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x03\x00\x13\x00\x17\x00'\x00\x003#\x113\x17!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146ddd\x96\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1dG\x01,\xfe\xa2\x03\x84\x15\x1d\x1d\x15\xfc|\x15\x1d\x1d\x04\xb0d\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1dd\xfe\xd4\x01,\xfe\x0c\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\x00\x00\x00\x00\x02\x00d\x00\x00\x04L\x04\xb0\x00'\x00+\x00\x00\x0132\x16\x15\x11\x14\x06+\x01\x15!2\x16\x15\x11\x14\x06#!\"&5\x11463!5#\"&5\x1146;\x0153\x07\x11!\x11\x02X\x96\x15\x1d\x1d\x15\x96\x01\xc2\x15\x1d\x1d\x15\xfc|\x15\x1d\x1d\x15\x01^\x96\x15\x1d\x1d\x15\x96d\xc8\x01,\x04L\x1d\x15\xfep\x15\x1dd\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1dd\x1d\x15\x01\x90\x15\x1dd\xc8\xfe\xd4\x01,\x00\x00\x00\x04\x00\x00\x00\x00\x04\xb0\x04\xb0\x00\x03\x00\x13\x00\x17\x00'\x00\x00!#\x113\x05!2\x16\x15\x11\x14\x06#!\"&5\x1146\x17\x11!\x11\x01!2\x16\x15\x11\x14\x06#!\"&5\x1146\x04\xb0dd\xfdv\x01\x90\x15\x1d\x1d\x15\xfep\x15\x1d\x1dG\x01,\xfc\xae\x03\x84\x15\x1d\x1d\x15\xfc|\x15\x1d\x1d\x04\xb0d\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1dd\xfe\xd4\x01,\xfe\x0c\x1d\x15\xfep\x15\x1d\x1d\x15\x01\x90\x15\x1d\x00\x00\x01\x01,\x000\x03o\x04\x80\x00\x0f\x00\x00 \x01\x06#\"&5\x114632\x17\x01\x16\x14\x03a\xfe\x12\x17\x12\x0e\x10\x10\x0e\x12\x17\x01\xee\x0e\x025\xfe\x12\x17\x1b\x19\x03\xe8\x19\x1b\x17\xfe\x12\x0e*\x00\x00\x00\x00\x01\x01A\x002\x03\x84\x04~\x00\x0b\x00\x00 \x016\x16\x15\x11\x14\x06'\x01&4\x01O\x01\xee\x1d**\x1d\xfe\x12\x0e\x02{\x01\xee\x1d\x11)\xfc\x18)\x11\x1d\x01\xee\x0e*\x00\x00\x00\x00\x01\x002\x01A\x04~\x03\x84\x00\x0b\x00\x00\x13!2\x16\x07\x01\x06\"'\x01&6d\x03\xe8)\x11\x1d\xfe\x12\x0e*\x0e\xfe\x12\x1d\x11\x03\x84*\x1d\xfe\x12\x0e\x0e\x01\xee\x1d*\x00\x00\x00\x00\x01\x002\x01,\x04~\x03o\x00\x0b\x00\x00 \x01\x16\x06#!\"&7\x0162\x02{\x01\xee\x1d\x11)\xfc\x18)\x11\x1d\x01\xee\x0e*\x03a\xfe\x12\x1d**\x1d\x01\xee\x0e\x00\x00\x00\x00\x02\x00\x08\x00\x00\x04\xb0\x04(\x00\x06\x00\n\x00\x00\x01\x15\x015-\x015\x01!5!\x02\xbc\xfdL\x01\x9d\xfec\x04\xa8\xfc\xe0\x03 \x02\xe5\xb6\xfe\xbd\xdd\xc1\xc1\xdd\xfb\xd8\xc8\x00\x00\x00\x00\x02\x00\x00\x00d\x04\xb0\x04\xb0\x00\x0b\x001\x00\x00\x01#\x153\x15!\x1135#5!\x0134>\x05;\x01\x11\x14\x06\x0f\x01\x15!5\".\x035\x1132\x1e\x05\x153\x11!\x04\xb0\xc8\xc8\xfe\xd4\xc8\xc8\x01,\xfbP2\x08\x0b\x19\x13&\x18\x19d2\x19\x19\x01\x90\x04\x0e\"\x1a\x16d\x19\x18&\x13\x19\x0b\x082\xfc\xe0\x03\x84dd\x01,dd\xfe\x0c\x15 \x15\x0e\x08\x03\x01\xfd\xda\x16\x19\x01\x02dd\x01\x05 \x15\x0e\x02&\x01\x03\x08\x0e\x15 \x15\x01,\x00\x00\x02\x00\x00\x00\x00\x04L\x03\xe8\x00%\x001\x00\x00\x01#4.\x05+\x01\x11\x14\x16\x1f\x01\x15!52>\x035\x11#\"\x0e\x05\x15#\x11!\x01#\x153\x15!\x1135#5!\x03 2\x08\x0b\x19\x13&\x18\x19d2\x19\x19\xfep\x04\x0e\"\x1a\x16d\x19\x18&\x13\x19\x0b\x082\x03 \x01,\xc8\xc8\xfe\xd4\xc8\xc8\x01,\x02\xbc\x15 \x15\x0e\x08\x03\x01\xfd\xda\x16\x19\x02\x01dd\x01\x05 \x15\x0e\x02&\x01\x03\x08\x0e\x15 \x15\x01,\xfc\xe0dd\x01,dd\x00\x00\x01\x00\xc8\x00f\x03r\x04J\x00\x12\x00\x00\x0132\x16\x07 \x01\x16\x06+\x01\"'\x01&47\x016\x02\xbd\xa0\x10\n\x0c\xfe0\x01\xd0\x0c\n\x10\xa0\x0d\n\xfe)\x07\x07\x01\xd7\n\x04J\x16\x0c\xfe0\xfe0\x0c\x16 \x01\xd7\x08\x14\x08\x01\xd7 \x00\x00\x01\x01>\x00f\x03\xe8\x04J\x00\x12\x00\x00\x0132\x17\x01\x16\x14\x07\x01\x06+\x01\"&7 \x01&6\x01S\xa0\x0d\n\x01\xd7\x07\x07\xfe)\n\x0d\xa0\x10\n\x0c\x01\xd0\xfe0\x0c\n\x04J \xfe)\x08\x14\x08\xfe) \x16\x0c\x01\xd0\x01\xd0\x0c\x16\x00\x00\x01\x00f\x00\xc8\x04J\x03r\x00\x12\x00\x00\x00\x16\x1d\x01\x14\x07\x01\x06\"'\x01&=\x0146\x17 \x01\x044\x16 \xfe)\x08\x14\x08\xfe) \x16\x0c\x01\xd0\x01\xd0\x03w\n\x10\xa0\x0d\n\xfe)\x07\x07\x01\xd7\n\x0d\xa0\x10\n\x0c\xfe0\x01\xd0\x00\x00\x00\x01\x00f\x01>\x04J\x03\xe8\x00\x12\x00\x00 \x01\x16\x1d\x01\x14\x06' \x01\x06&=\x0147\x0162\x02j\x01\xd7 \x16\x0c\xfe0\xfe0\x0c\x16 \x01\xd7\x08\x14\x03\xe1\xfe)\n\x0d\xa0\x10\n\x0c\x01\xd0\xfe0\x0c\n\x10\xa0\x0d\n\x01\xd7\x07\x00\x00\x00\x02\x00\xd9\xff\xf9\x04=\x04\xb0\x00\x05\x00:\x00\x00\x01\x14\x06#46\x0532\x16\x1f\x0167>\x02\x1e\x04\x06\x07\x0e\x06\x07\x06\"&#\"\x06\"'.\x03/\x01.\x01>\x04\x1e\x01\x17'&6\x03\xe8\xb0|\xb0\xfeVd\x15&\x07O\x05\x0b\"(P3G*+\x0f\x05\x11\x01\x04\x12\x17*3M,\x0d:I\x0b\x0eG7\x109_7&\x07\x07\x0f\x06\x0f%*>7F1\x1f\x93\x0c\x0d\x04\xb0|\xb0|\xb0\xc8\x1c\x13\xc2\x01\x02\x06\x07\x07\x05\x0f\x1f5KmC\x07\x19KG\\JB\x11\x05\x05\x07\x07\x19ktl$#?hI7 \x13\x07\x03\x06\x05\xc0\x12\x18\x00\x00\x00\x00\x02\x00\xc8\x00\x15\x03\x84\x04\xb0\x00\x16\x00\x1a\x00\x00\x13!2\x16\x15\x11\x14\x06+\x01\x11\x07\x06&5\x11#\"&5\x1146\x17\x15!5\xfa\x02X\x15\x1d\x1d\x15\x96\xff\x13\x1a\x96\x15\x1d\x1d\xab\x01,\x04\xb0\x1d\x15\xfep\x15\x1d\xfe\x0c\xb2 \x10\x15\x02\x8a\x1d\x15\x01\x90\x15\x1dddd\x00\x00\x00\x02\x00\xc8\x00\x19\x04L\x04\xb0\x00\x0e\x00\x12\x00\x00\x13!2\x16\x15\x11\x05\x11%!\x11#\x1146\x01\x1575\xfa\x02\xee'=\xfdD\x02X\xfdDd\x1f\x01\x0dd\x04\xb0Q,\xfc[u\x03\xb6}\xfc\x18\x04\x01\x174\xfd]d\x14d\x00\x01\x00\x00\x00\x01\x02Mo\xc3\x04__\x0f<\xf5\x00\x1f\x04\xb0\x00\x00\x00\x00\xd0vs\x97\x00\x00\x00\x00\xd0vs\x97\xffQ\xff\x9c\x05\xdc\x05\x14\x00\x00\x00\x08\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x05\x14\xff\x85\x00\x00\x05\x14\xffQ\xfe\xd4\x05\xdc\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa3\x01\xb8\x00(\x00\x00\x00\x00\x01\x90\x00\x00\x04\xb0\x00\x00\x04\xb0\x00d\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00p\x02\x8a\x00\x00\x05\x14\x00\x00\x02\x8a\x00\x00\x05\x14\x00\x00\x01\xb1\x00\x00\x01E\x00\x00\x00\xd8\x00\x00\x00\xd8\x00\x00\x00\xa2\x00\x00\x01\x04\x00\x00\x00H\x00\x00\x01\x04\x00\x00\x01E\x00\x00\x04\xb0\x00d\x04\xb0\x00{\x04\xb0\x00\xc8\x04\xb0\x00\xc8\x01\xf4\x00\x00\x04\xb0\xff\xf2\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\xff\xf0\x04\xb0\x00\x00\x04\xb0\x00\x0e\x04\xb0\x00 \x04\xb0\x00d\x04\xb0\xff\xd3\x04\xb0\xff\xd3\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00&\x04\xb0\x00n\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00d\x04\xb0\x00\x1a\x04\xb0\x00d\x04\xb0\x00\x0c\x04\xb0\x00d\x04\xb0\x00\x17\x04\xb0\xff\x9c\x04\xb0\x00d\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x00\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00d\x04\xb0\x00\x00\x04\xb0\x00d\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00d\x04\xb0\x00\xc8\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x005\x04\xb0\x00d\x04\xb0\x00\xc8\x04\xb0\xff\xb5\x04\xb0\x00!\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\xff\x9c\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\xdb\x04\xb0\x00\x17\x04\xb0\x00u\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\n\x04\xb0\x00\xc8\x04\xb0\x00\x00\x04\xb0\x00\x9d\x04\xb0\x00\xc8\x04\xb0\x00\xc8\x04\xb0\x00\xc8\x04\xb0\x00\x00\x04\xb0\xff\xfe\x04\xb0\x01,\x04\xb0\x00d\x04\xb0\x00\x88\x04\xb0\x01;\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x00\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x17\x04\xb0\x00\x00\x04\xb0\x00\xb7\x04\xb0\x00\xb7\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00I\x04\xb0\x00\x17\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00]\x04\xb0\xff\xdc\x04\xb0\xff\xdc\x04\xb0\xff\x9f\x04\xb0\x00d\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00d\x04\xb0\xff\xff\x04\xb0\x00\x00\x04\xb0\xffQ\x04\xb0\x00\x06\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x01E\x04\xb0\x00\x01\x04\xb0\x00\x00\x04\xb0\xff\x9c\x04\xb0\x00J\x04\xb0\x00\x14\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\x00\x00\x04\xb0\xff\x9c\x04\xb0\x00a\x04\xb0\xff\xfd\x04\xb0\x00\x16\x04\xb0\x00\x16\x04\xb0\x00\x16\x04\xb0\x00\x16\x04\xb0\x00\x18\x04\xb0\x00\x00\x04\xc4\x00\x00\x04\xb0\x00d\x00\x00\x00\x00\x00\x00\xff\xd8\x00d\x009\x00\xc8\x00\x00\x01'\x00d\x00\x19\x00\x19\x00\x19\x00\x19\x00\x19\x00\x19\x00\x19\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd9\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00d\x00d\x00\x00\x00\x10\x00\x00\x00\x00\x00d\xff\x9c\xff\x9c\xff\x9c\xff\x9c\xff\x9c\xff\x9c\xff\x9c\xff\x9c\x00 \x00 \xff\xf2\xff\xf2\x00d\x00y\x00'\x00d\x00d\x00\x00\x00\x00\x00d\xff\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc8\x00d\x00\x00\x00\x01\x00\x8f\x00\x00\xff\x9c\xff\x9c\x00d\x00\x04\x00\xc8\x00\xc8\x00\x00\x00d\x01\x90\x00d\x00\x00\x00d\x01,\xff\xb5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\x00\x00\x01,\x01A\x002\x002\x00\x08\x00\x00\x00\x00\x00\xc8\x01>\x00f\x00f\x00\xd9\x00\xc8\x00\xc8\x00\x00\x00*\x00*\x00*\x00*\x00\xb2\x00\xe8\x00\xe8\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01N\x01\xa4\x02\x06\x02\"\x02~\x02\x86\x02\xac\x02\xe4\x03F\x03n\x03\x8c\x03\xc4\x04\x08\x042\x04b\x04\xa2\x04\xdc\x05\\\x05\xba\x06r\x06\xf4\x07 \x07b\x07\xca\x08\x1e\x08b\x08\xbe 6 \x84 \xb6 \xde\n(\nL\n\x94\n\xe2\x0b0\x0b\x8a\x0b\xca\x0c\x08\x0cX\x0d*\x0d^\x0d\xb0\x0e\x0e\x0eh\x0e\xb4\x0f(\x0f\xa6\x0f\xe6\x10\x12\x10T\x10\x90\x10\xaa\x11*\x11v\x11\xb6\x12\n\x128\x12|\x12\xc0\x13\x1a\x13t\x13\xd0\x14*\x14\xd4\x15<\x15\xa8\x15\xcc\x16\x04\x166\x16`\x16\xb0\x16\xfe\x17R\x17\xa6\x18\x02\x18.\x18j\x18\x96\x18\xb0\x18\xe0\x18\xfe\x19(\x19h\x19\x94\x19\xc4\x19\xda\x19\xee\x1a6\x1ah\x1a\xb8\x1a\xf6\x1b^\x1b\xb4\x1c2\x1c\x94\x1c\xe2\x1d\x1c\x1dD\x1dl\x1d\x94\x1d\xbc\x1d\xe6\x1e.\x1ev\x1e\xc0\x1fb\x1f\xd2 F \xbe!2!v!\xb8\"@\"\x96\"\xb8#\x0e#\"#8#z#\xc2#\xe0$\x02$0$^$\x96$\xe2%4%`%\xbc&\x14&~&\xe6'P'\xbc'\xf8(4(p(\xac)\xa0)\xcc*&*J*\x84+\n+z,\x08,h,\xba,\xec-\x1c-\x88-\xf4.(.f.\xa2.\xd8/\x0e/F/~/\xb2/\xf80>0\x840\xd21\x121`1\xae1\xe82$2^2\x9a2\xde3\"3>3h3\xb64\x184`4\xa84\xd25,5\x9e5\xe86>6|6\xdc7\x1a7N7\x927\xd48\x108B8\x868\xc89\n9J9\x889\xcc:\x1c:l:\x9a:\xde;\xa0;\xdc<\x18:>\x8c>\xd4?(?n?\xaa?\xfa@H@\x80@\xc6A\x02A~B\x18B\xa8B\xeeC\x18CBCvC\xa0C\xcaD\x10D`D\xaeD\xf6EZE\xb6F\x06FtF\xb4F\xf6G6GvG\xb6G\xf6H\x16H2HNHjH\x86H\xccI\x12I8I^I\x84I\xaaJ\x02J.JR\x00\x01\x00\x00\x01\x17\x00\xa7\x00\x11\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x01\x00\x00\x00@\x00.\x00\x00\x00\x00\x00\x00\x00\x10\x00\xc6\x00\x01\x00\x00\x00\x00\x00\x13\x00\x12\x00\x00\x00\x03\x00\x01\x04 \x00\x00\x00j\x00\x12\x00\x03\x00\x01\x04 \x00\x01\x00(\x00|\x00\x03\x00\x01\x04 \x00\x02\x00\x0e\x00\xa4\x00\x03\x00\x01\x04 \x00\x03\x00L\x00\xb2\x00\x03\x00\x01\x04 \x00\x04\x008\x00\xfe\x00\x03\x00\x01\x04 \x00\x05\x00x\x016\x00\x03\x00\x01\x04 \x00\x06\x006\x01\xae\x00\x03\x00\x01\x04 \x00\x08\x00\x16\x01\xe4\x00\x03\x00\x01\x04 \x00 \x00\x16\x01\xfa\x00\x03\x00\x01\x04 \x00\x0b\x00$\x02\x10\x00\x03\x00\x01\x04 \x00\x0c\x00$\x024\x00\x03\x00\x01\x04 \x00\x13\x00$\x02X\x00\x03\x00\x01\x04 \x00\xc8\x00\x16\x02|\x00\x03\x00\x01\x04 \x00\xc9\x000\x02\x92\x00\x03\x00\x01\x04 \xd9\x03\x00\x1a\x02\xc2www.glyphicons.com\x00C\x00o\x00p\x00y\x00r\x00i\x00g\x00h\x00t\x00 \x00\xa9\x00 \x002\x000\x001\x004\x00 \x00b\x00y\x00 \x00J\x00a\x00n\x00 \x00K\x00o\x00v\x00a\x00r\x00i\x00k\x00.\x00 \x00A\x00l\x00l\x00 \x00r\x00i\x00g\x00h\x00t\x00s\x00 \x00r\x00e\x00s\x00e\x00r\x00v\x00e\x00d\x00.\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00 \x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00R\x00e\x00g\x00u\x00l\x00a\x00r\x001\x00.\x000\x000\x009\x00;\x00U\x00K\x00W\x00N\x00;\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00-\x00R\x00e\x00g\x00u\x00l\x00a\x00r\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00 \x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00 \x00R\x00e\x00g\x00u\x00l\x00a\x00r\x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00 \x001\x00.\x000\x000\x009\x00;\x00P\x00S\x00 \x000\x000\x001\x00.\x000\x000\x009\x00;\x00h\x00o\x00t\x00c\x00o\x00n\x00v\x00 \x001\x00.\x000\x00.\x007\x000\x00;\x00m\x00a\x00k\x00e\x00o\x00t\x00f\x00.\x00l\x00i\x00b\x002\x00.\x005\x00.\x005\x008\x003\x002\x009\x00G\x00L\x00Y\x00P\x00H\x00I\x00C\x00O\x00N\x00S\x00H\x00a\x00l\x00f\x00l\x00i\x00n\x00g\x00s\x00-\x00R\x00e\x00g\x00u\x00l\x00a\x00r\x00J\x00a\x00n\x00 \x00K\x00o\x00v\x00a\x00r\x00i\x00k\x00J\x00a\x00n\x00 \x00K\x00o\x00v\x00a\x00r\x00i\x00k\x00w\x00w\x00w\x00.\x00g\x00l\x00y\x00p\x00h\x00i\x00c\x00o\x00n\x00s\x00.\x00c\x00o\x00m\x00w\x00w\x00w\x00.\x00g\x00l\x00y\x00p\x00h\x00i\x00c\x00o\x00n\x00s\x00.\x00c\x00o\x00m\x00w\x00w\x00w\x00.\x00g\x00l\x00y\x00p\x00h\x00i\x00c\x00o\x00n\x00s\x00.\x00c\x00o\x00m\x00W\x00e\x00b\x00f\x00o\x00n\x00t\x00 \x001\x00.\x000\x00W\x00e\x00d\x00 \x00O\x00c\x00t\x00 \x002\x009\x00 \x000\x006\x00:\x003\x006\x00:\x000\x007\x00 \x002\x000\x001\x004\x00F\x00o\x00n\x00t\x00 \x00S\x00q\x00u\x00i\x00r\x00r\x00e\x00l\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\xff\xb5\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x17\x00\x00\x01\x02\x01\x03\x00\x03\x00\x0d\x00\x0e\x01\x04\x00\x96\x01\x05\x01\x06\x01\x07\x01\x08\x01 \x01\n\x01\x0b\x01\x0c\x01\x0d\x01\x0e\x01\x0f\x01\x10\x01\x11\x01\x12\x01\x13\x00\xef\x01\x14\x01\x15\x01\x16\x01\x17\x01\x18\x01\x19\x01\x1a\x01\x1b\x01\x1c\x01\x1d\x01\x1e\x01\x1f\x01 \x01!\x01\"\x01#\x01$\x01%\x01&\x01'\x01(\x01)\x01*\x01+\x01,\x01-\x01.\x01/\x010\x011\x012\x013\x014\x015\x016\x017\x018\x019\x01:\x01;\x01<\x01=\x01>\x01?\x01@\x01A\x01B\x01C\x01D\x01E\x01F\x01G\x01H\x01I\x01J\x01K\x01L\x01M\x01N\x01O\x01P\x01Q\x01R\x01S\x01T\x01U\x01V\x01W\x01X\x01Y\x01Z\x01[\x01\\\x01]\x01^\x01_\x01`\x01a\x01b\x01c\x01d\x01e\x01f\x01g\x01h\x01i\x01j\x01k\x01l\x01m\x01n\x01o\x01p\x01q\x01r\x01s\x01t\x01u\x01v\x01w\x01x\x01y\x01z\x01{\x01|\x01}\x01~\x01\x7f\x01\x80\x01\x81\x01\x82\x01\x83\x01\x84\x01\x85\x01\x86\x01\x87\x01\x88\x01\x89\x01\x8a\x01\x8b\x01\x8c\x01\x8d\x01\x8e\x01\x8f\x01\x90\x01\x91\x01\x92\x01\x93\x01\x94\x01\x95\x01\x96\x01\x97\x01\x98\x01\x99\x01\x9a\x01\x9b\x01\x9c\x01\x9d\x01\x9e\x01\x9f\x01\xa0\x01\xa1\x01\xa2\x01\xa3\x01\xa4\x01\xa5\x01\xa6\x01\xa7\x01\xa8\x01\xa9\x01\xaa\x01\xab\x01\xac\x01\xad\x01\xae\x01\xaf\x01\xb0\x01\xb1\x01\xb2\x01\xb3\x01\xb4\x01\xb5\x01\xb6\x01\xb7\x01\xb8\x01\xb9\x01\xba\x01\xbb\x01\xbc\x01\xbd\x01\xbe\x01\xbf\x01\xc0\x01\xc1\x01\xc2\x01\xc3\x01\xc4\x01\xc5\x01\xc6\x01\xc7\x01\xc8\x01\xc9\x01\xca\x01\xcb\x01\xcc\x01\xcd\x01\xce\x01\xcf\x01\xd0\x01\xd1\x01\xd2\x01\xd3\x01\xd4\x01\xd5\x01\xd6\x01\xd7\x01\xd8\x01\xd9\x01\xda\x01\xdb\x01\xdc\x01\xdd\x01\xde\x01\xdf\x01\xe0\x01\xe1\x01\xe2\x01\xe3\x01\xe4\x01\xe5\x01\xe6\x01\xe7\x01\xe8\x01\xe9\x01\xea\x01\xeb\x01\xec\x01\xed\x01\xee\x01\xef\x01\xf0\x01\xf1\x01\xf2\x01\xf3\x01\xf4\x01\xf5\x01\xf6\x01\xf7\x01\xf8\x01\xf9\x01\xfa\x01\xfb\x01\xfc\x01\xfd\x01\xfe\x01\xff\x02\x00\x02\x01\x02\x02\x02\x03\x02\x04\x02\x05\x02\x06\x02\x07\x02\x08\x02 \x02\n\x02\x0b\x02\x0c\x02\x0d\x02\x0e\x02\x0f\x02\x10\x02\x11\x02\x12\x06glyph1\x06glyph2\x07uni00A0\x07uni2000\x07uni2001\x07uni2002\x07uni2003\x07uni2004\x07uni2005\x07uni2006\x07uni2007\x07uni2008\x07uni2009\x07uni200A\x07uni202F\x07uni205F\x04Euro\x07uni20BD\x07uni231B\x07uni25FC\x07uni2601\x07uni26FA\x07uni2709\x07uni270F\x07uniE001\x07uniE002\x07uniE003\x07uniE005\x07uniE006\x07uniE007\x07uniE008\x07uniE009\x07uniE010\x07uniE011\x07uniE012\x07uniE013\x07uniE014\x07uniE015\x07uniE016\x07uniE017\x07uniE018\x07uniE019\x07uniE020\x07uniE021\x07uniE022\x07uniE023\x07uniE024\x07uniE025\x07uniE026\x07uniE027\x07uniE028\x07uniE029\x07uniE030\x07uniE031\x07uniE032\x07uniE033\x07uniE034\x07uniE035\x07uniE036\x07uniE037\x07uniE038\x07uniE039\x07uniE040\x07uniE041\x07uniE042\x07uniE043\x07uniE044\x07uniE045\x07uniE046\x07uniE047\x07uniE048\x07uniE049\x07uniE050\x07uniE051\x07uniE052\x07uniE053\x07uniE054\x07uniE055\x07uniE056\x07uniE057\x07uniE058\x07uniE059\x07uniE060\x07uniE062\x07uniE063\x07uniE064\x07uniE065\x07uniE066\x07uniE067\x07uniE068\x07uniE069\x07uniE070\x07uniE071\x07uniE072\x07uniE073\x07uniE074\x07uniE075\x07uniE076\x07uniE077\x07uniE078\x07uniE079\x07uniE080\x07uniE081\x07uniE082\x07uniE083\x07uniE084\x07uniE085\x07uniE086\x07uniE087\x07uniE088\x07uniE089\x07uniE090\x07uniE091\x07uniE092\x07uniE093\x07uniE094\x07uniE095\x07uniE096\x07uniE097\x07uniE101\x07uniE102\x07uniE103\x07uniE104\x07uniE105\x07uniE106\x07uniE107\x07uniE108\x07uniE109\x07uniE110\x07uniE111\x07uniE112\x07uniE113\x07uniE114\x07uniE115\x07uniE116\x07uniE117\x07uniE118\x07uniE119\x07uniE120\x07uniE121\x07uniE122\x07uniE123\x07uniE124\x07uniE125\x07uniE126\x07uniE127\x07uniE128\x07uniE129\x07uniE130\x07uniE131\x07uniE132\x07uniE133\x07uniE134\x07uniE135\x07uniE136\x07uniE137\x07uniE138\x07uniE139\x07uniE140\x07uniE141\x07uniE142\x07uniE143\x07uniE144\x07uniE145\x07uniE146\x07uniE148\x07uniE149\x07uniE150\x07uniE151\x07uniE152\x07uniE153\x07uniE154\x07uniE155\x07uniE156\x07uniE157\x07uniE158\x07uniE159\x07uniE160\x07uniE161\x07uniE162\x07uniE163\x07uniE164\x07uniE165\x07uniE166\x07uniE167\x07uniE168\x07uniE169\x07uniE170\x07uniE171\x07uniE172\x07uniE173\x07uniE174\x07uniE175\x07uniE176\x07uniE177\x07uniE178\x07uniE179\x07uniE180\x07uniE181\x07uniE182\x07uniE183\x07uniE184\x07uniE185\x07uniE186\x07uniE187\x07uniE188\x07uniE189\x07uniE190\x07uniE191\x07uniE192\x07uniE193\x07uniE194\x07uniE195\x07uniE197\x07uniE198\x07uniE199\x07uniE200\x07uniE201\x07uniE202\x07uniE203\x07uniE204\x07uniE205\x07uniE206\x07uniE209\x07uniE210\x07uniE211\x07uniE212\x07uniE213\x07uniE214\x07uniE215\x07uniE216\x07uniE218\x07uniE219\x07uniE221\x07uniE223\x07uniE224\x07uniE225\x07uniE226\x07uniE227\x07uniE230\x07uniE231\x07uniE232\x07uniE233\x07uniE234\x07uniE235\x07uniE236\x07uniE237\x07uniE238\x07uniE239\x07uniE240\x07uniE241\x07uniE242\x07uniE243\x07uniE244\x07uniE245\x07uniE246\x07uniE247\x07uniE248\x07uniE249\x07uniE250\x07uniE251\x07uniE252\x07uniE253\x07uniE254\x07uniE255\x07uniE256\x07uniE257\x07uniE258\x07uniE259\x07uniE260\x07uniF8FF\x06u1F511\x06u1F6AA\x00\x00\x00\x00\x01TP\xc3\x17\x00\x00PK\x07\x08\x9a\x17<\x9c\\\xb1\x00\x00\\\xb1\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\x00\x00\x00fonts/glyphicons-halflings-regular.woffwOFF\x00\x01\x00\x00\x00\x00[\x80\x00\x0f\x00\x00\x00\x00\xb1\\\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00FFTM\x00\x00\x01X\x00\x00\x00\x1c\x00\x00\x00\x1cm*\x97\xdcGDEF\x00\x00\x01t\x00\x00\x00\x1f\x00\x00\x00 \x01D\x00\x04OS/2\x00\x00\x01\x94\x00\x00\x00E\x00\x00\x00`g\xb9k\x89cmap\x00\x00\x01\xdc\x00\x00\x02\xc0\x00\x00\x06r\xda\xad\xe3\x81cvt \x00\x00\x04\x9c\x00\x00\x00\x04\x00\x00\x00\x04\x00(\x02\xf8gasp\x00\x00\x04\xa0\x00\x00\x00\x08\x00\x00\x00\x08\xff\xff\x00\x03glyf\x00\x00\x04\xa8\x00\x00M\x17\x00\x00\x94\xa4}]\xc2ohead\x00\x00Q\xc0\x00\x00\x004\x00\x00\x006\x05M/\xd8hhea\x00\x00Q\xf4\x00\x00\x00\x1c\x00\x00\x00$\nD\x04\x11hmtx\x00\x00R\x10\x00\x00\x01O\x00\x00\x03t\xd2\xc7 `loca\x00\x00S`\x00\x00\x02'\x00\x00\x020o\xfb\x95\xcemaxp\x00\x00U\x88\x00\x00\x00 \x00\x00\x00 \x01j\x00\xd8name\x00\x00U\xa8\x00\x00\x01\x9e\x00\x00\x03\xa2\xb3,\xa0\x9bpost\x00\x00WH\x00\x00\x04-\x00\x00\n\xd1\xba\xa3\xe55webf\x00\x00[x\x00\x00\x00\x06\x00\x00\x00\x06\xc3\x18TP\x00\x00\x00\x01\x00\x00\x00\x00\xcc=\xa2\xcf\x00\x00\x00\x00\xd0v\x81u\x00\x00\x00\x00\xd0vs\x97x\xdac`d``\xe0\x03b \x06\x10`b`d`d\x14\x03\x92,`\x1e\x03\x00\x05H\x00J\x00x\xdac`f\xe9f\x9c\xc0\xc0\xca\xc0\xc2\xcc\xc3t\x81\x81\x81!\nB3.a0b\xda\x01\xe4\x03\xa5\x10\x80 \x89\x1d\xea\x1d\xee\xc7\xe0\xc0\xa0\xf0\xff?\xf3\x81\xff\x02@u\"\x0c\xd5@aF$%\n\x0c\x8c\x001\x96\x0b\x0c\x00\x00\x00x\xda\xed\x94?hSA\x1c\xc7\x7f\x97\xa4iS\xc4\xc6\xfe\x89\x8dm\xdf\xbd44\xb1\xad\xd0,\x0eq\xcaPK\x83 q\xd2\x0c\xd2XE\x1a]\xb2(\x04\x142 \x01\x87.\xa5\xd4\xa9\x83]\xb4\x82 \"\x0eE\x1c\x9cD\xb7\n\xad\xa5\xb9\xdfi]D\xd4\xa1ZJ\xf5\xf9\xbd\\\x06\x11\xb5\xba8\xf9\xe0\x93\xcf\xef\xbdw\xef\xee\x9bw\xbf\x84\x88\x9a\x88\xc8\x0f\x0e\x81V\"\xb1F\x02\x15\x05\xa6pU\xd4\xaf\xfb\x1b\xd7\x03\xe2.\xce\xa7(\x83g\x0e\x92K\xc34O\x0bn\xab;\xe2N\xb8\x8b\xeeR\x7f{\xbcg`'!\x12\xdb\xc9P\xb2M\xf9UHE\xd5\xa0J\xab\xac\xca\xab\x82*\xaa\xb2\xaa\xaaY\x0eq\x94\x079\xcdc\x9c\xe5<\x17\xb9\xccU\x9e\xe19\x1d\xd4!\x1d\xd1Q\x1d\xd3I\x9d\xd6Y\x9d\xd7\x85-\xcf\xf3\xb0\xa2KC\xf5\x95\xe8\x17+ \xd5\xa4\xc2\xcaU)\x95Q9\xac4\xa9J\xaa\xa2\xa6Yp\x98]Nq\x86\xc79\xc7\x05.q\x85\xa7yV\x13V\n\xebn\xac\x14\xd7)\x9d\xd19\xbb\x92\xf7\xca[\xf5\xce{\x93\xaa\xa5\xf6\xbav\xbfV\xac\xe5\xd7\x9b\xd6\xbe\xac\xf6\xadFWb+\x1d+{\xdd>\xb7\xd7\x8d\xb8a|\xe3\x80\xfc*\xb7\xe4g\xf9Q\xbe\x95\x1b\xf2\x8a\xbc,K\xf2\x82<'\xcf\xca\xd3\xf2\xa4\x03W@\x15\xf5Ex\x1a\xcc\xa2\xbeD\xc4\xc8\xc3&\xc3U\x18\x19\xd8d\xb8\x06#\x03\x9b\x0c\x18\xcb\xc8\xc0&\xc3\x0d\x18\x19x\x0c\xf5Mx\x1c\x98<\xb7a\xe4a\x93\xe7\x0e\x8c,l2<\x80\x91\x81M\x86\xc702\xb0\xc9\xf0\x146\xf3\xce\xa0^\x86\x91\x85\xe7P\xbf$\xd2\x906{\xb8\x0e\x87,\xb4\x01#\x9b\xc6\x9e\xd0\x1b\x18{\xa0M\xcewp\xccB\xef\xe18H\xa2\xfe\x00#\xb36\x997ad\xd6&'~\x89\x1a95r\n3w\xc1\"\xd0\x0f[\x9eEt\x92\xd8\xdcW\x87\x04:\xfd\xd3\xad:$\"\x14\xf4\x99>2\xcd\xffc\xff\x0b\xf25*\xdf.\xfdl\x9f\xf4\xff\xe4N\xa0\xfe/\xf6\xff\xf8\xfeh\xfe\x8b\xb1\xc1]Gt\xfd\xe9T\xe8\x81\xdf\x1e\xdf\x00\x8f\xd1\x0f\x18\x00(\x02\xf8\x00\x00\x00\x01\xff\xff\x00\x02x\xda\xc5\xbd |\x1b\xd5\xb50>w\x16\x8d\xd6\x19m#Y\xb6e[\x92%Y\xde-YR\x1c'r\xf6\x84\xc4Y\xc8j\xb6\xb0D%\x10 \x81,@\xd8B\xd8K\x1bZjH\xd9\xda\xa4@\x1a\x12b\x02\xa5\xa5\xa4-\x85R\xf4\xca\xeb+\xa5n\xfbhK\x9b~\xaf\xe5\xcb\xeb\xa3\xbc\x96\xa6\xa5$\xd6\xe4;\xe7\xceh\xb5\x13\xe8\xeb\xff\xfd\xfe^f\xee\xdc\xb9s\xef\xb9\xe7n\xe7\x9c{\xce\xb9\x0c\xcb\xb40\x0c\xb9\x8ckb8Fd:\x9f%L\xd7\x94\xc3\"\xcf\xfc1\xfe\xacA\xf8\xd5\x94\xc3\x1c\x0bA\xe6Y\x0e\xa3\x05\x8c>,\x1a\xc8\xd8\x94\xc3\x04\xe3\x13\x8e\x80#\x9cp\x84Z\xc84\xf5\xd8\x9f\xfe\xc45\x8d\x1dma\xdfd\x08\x93e\xb2\xfc ?\xc8\x9c\x07y\xc7=\x8d\xa4\x81\xf8I:C\x1a\x89\xc7 \x93D\xdc\xe3\xf5(n\x08I\xa4\x8dx\x1b\x88L\x0c\x91.\x021\xa2!\x14\x84P'\x99JD\x89t\x91Hj\x80@L4\x92\xec\x85P\x86\xf4\x93h'\x19 )\xc8\x11b\xd2)vH\x8eX,f\xf91\xd1c\\'\xb2\xe2cG\xcd\x0e\xd6\xc0\xb1\x84u\x98\x8f>\x06\x11\xeb\x8c\x1e\xf11\xd9\x1c\x0b~\xc2t\xfc\xe0?\xf1\xf1\x84\xe9\x18\xf8!x\xa1\xf8\x08\xc1\x03T_q\x04\xe2\x1e?q\x1bB\x01\x87\xdb\xd0F\x82\x91\xa4#\x10\x8cL%\xbd\xa9D\xc0\xd1\x9b\"\xaf\xe4\x14?Y\xad\xf8\xfd\x8a\xba\xc7\xaf\xc0\x83\xba\x07\x1f\xc8j??8>N\xd1Skem\x84\xb2\xa6AY\xb3\xb5\xb2Db\x10\xa14\x0d\xd9J)\xa4\x95;\x95@\xb9\x1aj\x93\xc5P$\n\x98\xef'qh\xae8`\x9b\xeb;a\x16\x8c\xea\x03\x06\x81X\xcd\x06\xc36C\x8d\xf9F\xe2*\x84d\xcb\x03\x16Y\xfdc\xe1\xb1\x18\xda\"\x18\xd4\x07\x8c\x82\xf9\x84\xcf\xf9\x15\xa3\xdb\xf8\x15'?h\xc6L\xacV\xe3\x97\x8c\xd6,\xbd>c\x91e\xcb3e\x11V\x03\xb9\xd2h\x86\x0f =C\xfb\x91\xc0\x7f\x9a\x7f\x86\xe9\xc7~\xe4\xf5xC\xbd\x9d\\((qb@\x0c\x18\x14\xb84\xf0\x89x\x86K&h\xd7\x81\xce\x11\x0d\xa5\x13\xa94\\2\x04\xba\xc7\xb1\x0f6N1|-\xd4;\xad\xcf\xebj\x9b\x14\xef\x96\xc9\x0dYu\xd4@\x86\xe1j\xea\xee\x9b\xd1\xabx\xfa\xa6\xf5\x86\x9ei\xb8\xe4\x8a\xa7\xfemK\xf3\x0c\xeb\xdb\xd9\x8dD\xf8E\xf6\x0ew\xa6q3\xff\xcc\xb7.\xbb\xbccAw@\xb64t.\x8eg\xe3\xf1\xec\xfckg\xb4\xc9r\xb0{~\xdb\xd3Wl~\xac{\xff\xd6lW2\xbb\xea\xf6\xf0}\x0c\xc327\xd06\x1ea2\xcc\\\x806\x10o\x10\x94z@\xb3$\xb4\x11\xf1\x13\xa1\x1d\x1a\xde\xebH\xc4S\xc9\xdeH\x88\x1c \xabg\xae\x9b\xd1\xed\xb1\x12b\xf5t\xcfX7\xf30K\x9at\xd4\xdec1\x7f\xc1,\xab\x7f\xd5\x1f7\x97B\xf2\x0boL\xeb\xe8\x98\xc6\x8f\xd46\x0f\xc4\x17\xb76[,\x96\xe6\xd6%\xf1i\xe1Z\x0d\xbf\x87,\x92l>T\x86p\xf2K\xb3\x9cSGg\xac\\>\x0d\xf1\x9d#\xbb\xf8A\xf6\x08#3\x8c\xab\xd8E\xb5\x8ey\x82k\xc2\xee6v\x14\xae\xec\x11\xda\xe7\xe8\x85\xc1\xba\xe7\x18\x86;\x06u\x0f3\xd3!\x1bZI\x1c\xce8\xec\x13\x98M\x00\x86k?\x1d\xb38\xb6C\x98\xa3Wq{`\xecC*\x89\xd0\x04\x11h>H\x90\x81\x04\xdc1\xbf\x12_s\xf6\xd9k\xe2\x8a\x7f\xech)\x9c\x9b\x9e\xaeoj\xaaOO'\xbb\n!~dX\xf1g\xcf\xfbB(\xf4\x85\xf3\x86\xfd\xca0<\x0dkOYx\xcae\xa7\xac\xa9\xb1\xc6\xa7\xc4\xad5k\xa6\x0c\x0f\x97=d\x0b\xe3\xb2\xf0\xd3\x03\xcf\xa7> \xed\xfc+\xfct\xc6C\xe7-o\n\xc7\xaa\x86\x02\x84\x1d/\xe4\xc3\xf5_ko\xef\xdc\xb6\x7f\xff\xb6\xbcs\xf1\xd5+f\xb0\xff\xceO\xdfz\xb1tp\xdbu\x077-\x99}\x85d\xf5\xed\xfe9\xc5 \x03s\xe0\x08\x7f\x88\xa9e\x1a \x8c\xb3\\9.H4\x92!0\x8f\xa5S\\ \xca\xb1\x7fk2\x99\xef\"?ip7\x9c\\\x02\x172z\xa7\xd9\xd4\xd4l\xde\xb0\xd1t=\xb9\xee\x85W\xf9\xe7\\!\xfbKyOXimU\xc2\xec\x1f\xbenov\x9b\xec\x0f\x0c\xdb\xc7\x1a6\xb2\x1e:\xbd\xe8\xe5\x0d2\x16\xc6\x0b\xf3LZkA\x18\xfdA\x1c\xcd^\xe2qC\x18\xe6\x99\xde\x94 &P\x05\xe6\x1d\x88\xef\x15aF\xc6\xea\xa5I\xaa0\xdc\xe3>\xfe&\x18\xef\xf9\x7f\x85Q\x0b\x17\x81#F\xa3Q\xfd\xbb\xd1l\xe6>\x0dA\xb3\x19\x1f\xce\x87q*\x98O\xee\x80\x84\x10\x82\xe1\xad\xfd\xe3\x18\xcf\xff\x8b\xd1\xc8\xa6\x8d\xe6_@27\xa6\x15\xccl\xc4,\xa8\x7f\x82\x8f\x05s\xfe\x17\xf8\x89\xd1\xe8\xbe\x05f\x07\xc8\x0b\xbe6\xe2p7\x02\xfc\xdc\xa9?\x9d\xfa\x13\xffM\xfe\x9b\x8c\x89\xb11v\x88A\x98\xdc2\x89\xa6]$j\"\x82\x8b;\x1a\x91v\xdblk~va0\xbf\xfbg\xec\xa5j\xa3\xfa\xdcz\xfe\x9b\xb6\xddRD:\xffg\x17\xa9\xd7\xe6\xbf\x08\xb1c\xeb6\x92\xc5\xc5yw\x04\xfa%\xf4g\xe2(\xc3\xbe\x83\x04\xd8#'\xb4\xceuB\xb5\xf2#\xec=\xf9_@?\x83>\xc6F\xd8\x08\xfdVb\x8c0\x9e\x1fa\x1a\xe1!\xacaL4t\x12Xv\xbc\x0e\xa1\xfc\x01:\xc5F\xf6\xf6h\xf7\xb29\x8b\xefj^\xb5x\xde\x8c\xfc\xbe\xa1\xe9z\xd0\xcf}\xb4Wn\x08\x8e}7}\xb6\xd7\x17\xbbj\x93\xaf\xc6\xce\x9a\x1e\xce\xefi\xbdH\xa9\x8d\xad\xbf\xba\xb6\xc6\xc1\xde\x1ci\xa5\xfd\xc5t\xea\xef\xfcK\xfcS\x8c\x9f\x89a\xcd\x1bXE\xf4\xbaE\xbabb\x1b\x1aBQ1\xd8\xc9\xf6f\x08\x1f\x94\x08t\x91x\x86\xf4F\xc8\xae\xdc\x0b\xb7-\"d\x11qA\x13\xf7\xd7\xc6\\\xea\x1f\xb7~F`\xb3\x10\xbb\xe86\xb2i\xe4\x81\x95+\x1f\xc0\x0b\xff\xd4\xa2\xdb^\xc8\xb2\xc2}\xd7\xa9\xef\xd7\x86k&\xbb\xdd\xc4\xb9\x10\xbe\xbb\xed\x05\xc8\xe0<-\x0d\\\xe8\x9c;\xc4\x1f\xe2g1>\xa8w\x86\x050\xdc0\xdfv\xf2\xc9^x \xec\xdd\xdd7l\xdb<\x94\x9ay\xdd\xce}\x99\xccS\xb7o\x9b9\xe9\xdc-\xdb\xae\xe3\xbe6k\x9e\xd0\xb1l\xcb\xb4\xbe\xebn\xb9\xf9\xf0\x8a\x15\x87o\xbe\xe5\xba\xbei[\x96u\x08\xf3\x10\x97\xa7~\xc2\xac\xe6o\xe0\x1f`j\xe0\xc1\x95\x8a{i\xd7\x14\\\x1eC4,\x18\"iW8\x92J\x90\x07o\x13\x15\xf1V\xf2\x84bp\xa8\xfbw\x88\xb2C\xfd\xaa\xba\xdf!\x8b;\xd4'\x1c\x067\xd7D.v\xb9\xd4\xc7\x1d\xd6\x8f\xd4\x0dn\x8b\xf5o\x1fZ-n\xb2\xeb\xa4\xd5\xc1\xb0e\xf9\x07\xcfP\x82\x10\xf2\x8aio4\x1d\x8a~LY\xe4/\x7fzm\xfew_\x9d\xff\xf2\x1f\x16\x9d\xbe\xd8\xecg\xfe\xcf\xbd\x9f\xfe\xfd\xa7\x7fR\xde\xcf\"t\xde\xa0\xd3&No\x02N\x80\xa5)4\x1d\xc6\xc9M\x0b\xb3C\xdbG2\x11\xab\x89\\j\xb2\xca8\x90d-\x1c\xc9@>#\xdbOt^\xb6\xc0\xec5\xbc+x\xcd\x17\x98e\x19.^\xe1]\x83\xd7\xbc\xe0\xb2\xce\x13\xdbG\xa08\x9b^\xe6 \xf4m\x17\x8c\xf7(\xd3\xc6t1 \x18\xf7s\x99\x05\xccbf\x19\xb3J\x9b\x01\xb0\xdf\xd2 \x0c\x97\x17%\x91\x8c\x91\xa0\x01\xe2<\x89\x00\xac4\x8e\xdeH\x02\xe2\x04\xf8\x0f\x94\xdd\xc5@e\x9a\xea\xf78C\xc8\x1a\xd4\xf2\x8a,\xbb\x17\x065<\xc2(\x97\xe5k\xb2c5Y\xaeI\xbf\xfe\xb6\xec\xcd\xd8\x7f\x97\xe2\xf9A\xf8\xfa]|\xc5\xd7\x9a\xe5l6+\x9b\xd5=\xf8HV\xe3c\x0eb\xb4K\xd5\x8bB\xb46\xdfi4\x95\x0c#\xb4\x8b\x19\x18_\xdb\x18\xa9|&\x1f\xf3>NvQ\x8ak#\xaepW\x95=\xe4\xbfu\xba7\x94\x1a\xdbH\xc9\xb0R$\x0d\xe7\xf7\x9d\xee\x0d\x03\xb3[5\xec\x8b\x99\x0b\x99\x0d\xcc\xcd\xccg\xa1\x16 \xad\x03\xb5\xe9\x1d%\xf01\xcf\xe4\x7f9}\xbc\xb0\xd0\xfb\xc9\x13\x13\x05\xc7\xfe\xfa\xcf&@$\x16\x1f&\xb8\xa8\x1b\xff\xb9\xf7\x8c\xa1l\xac\x94\xf0=\x0e\xd31RI\xf1\x90\x7f\xf2}9\xe6\xd8#\x18\xce\xcf\x82\xabz\xfb?\x16?1z&\xae\xf4\xc4\xb1\x1a_\x10a\xda\xf9c|\x13P\x07\x8cI[\xed\xdd:u\xd0;\xa0\xaf\xfe\xe4\xc7l\xbf\xd1->k4\x8e\xfd\x19\xe6G\xf5\x16\xa3\xf1Y\xd1m|Z\xf4\x90w\xe0\x0d}\x18\xfb\x93\xd1Hn\xd1R=-B\xbe\xeb\x99\xf5\xfc\x0c~\x06\xe5m\xbc\x90\xa7\x89.\xd9\x90\xb1 .\xc0\xc3\xf5\xa6Mz\x11^,\x97\xbb\xeb0\x90\x13\x7f%\x16\xa3\xf1\xb0\x11\xca8\xae\x1e\x17\x15\x08\x1a\x89E\xfd\xab\x16G\xac\xc4**|\xffsg|o\xd4\xf2\xb1\x8a\xc0\xb9zO\xfd\x17\xff\x1a\xbf\x1b\xd6\xac0s\x96\xbez\xe2\xda\xe9.\xac\x9d\xe3WN\x81\xb6^\xb1 \x8b\x84yHk<\x8d\x02v3t\x01{\x058-\xd9|\x02\xc3'\n\xf8\xabe\xea\xf8a~\x18\xd6\xec\xc5H94\xb2\xc8\x11x\xbc\xa2\xd7\x03A\xc5-\xb3@\xfcy\x0dbT4@0\x14\xecb#]D\x8cD\xd3\xd1\x08\x04\x93\xbd\x03lj\x80DSio:\x05Ag\xd0\xe0\xf6\xc4S\xbd\xecP \x16\x11z:\x84;\xa6\xb6-\xe1|\x12yH\x10\x1e\"r\x0d\xb7\xa4{\xd2\x1dB{\\\x88\xb45\x16RLi\x87\x146\xf6A\x83\xe1A\xd6\xe6\xe3\x96tM\xbe]\xe8\xe8\x81\x14\xdb\x0e\xdft\xd3\xe1\x9b\xf8a\xf8R\xf2\xb1K\xba\xfb\xe0\xcb\x1eC\xa4\xad!\xd8\x121\xf4\xb4C\xde\xedgC\xde\xec\x83\x82\xf0 +\xf9\xb8\xb31EG\xb7!\xd2\xda\x14\x80\x14\xddXz\xfb\x12\x96\x96\xae\xee\xbe\xe9\xd9\x9bnz\x96\x11\xb5v\x12\xdc@\xb1x\x99\x00\xf4\xed\x18\xf4\x9b\x1e\xca-#i^\xa0\xcf\x13x\xf1*\x81$)\xae\xce\xc0\x18W\xfc\x92\xe3\xa4=\xd6O\\f\xf9\xe4\xf3\x80[W\x7f\x8c\xb4\x93\xf6\xfeX~V\xac?\xab\xff\x08\xee\xfe\x18\xd0\x0f\x0b`Lei\xac::v4\xd6\xdf\x1f\x03\x02$?\x8b=R\xfe\x0f\xf3\x98\x95a\x04#\xffc\xa4\xf7]8Y\x01\xe5\xddFJ\xe2\x99b\x08&\x08'{%LC\xecE\xc0\xcf\x1c\xa2\xad\x89\xcd\x0c\xb7Cf\x81]\xc7^$\x8a\xf9/\x19\xcd\xf9\xef\xd2f\xdf\xaa\xdeM;\xc3\x80\x19\xda;\xab\xf3\xc9\x0e\x9c\x85 \xa5\xb0\xca\x08\x90\x846\xf9\xb0CX\x1b\x9d\xd5V\xb8\xa5\xc0\xa7\xf0#\xea\xc6X\x7f~\x0bF\xf0\x83\xda<\x7f\x02\xe7 :\xfevC\xbf\xafc\xba\xb5yB\x1bp\x13Lv\x8a\xa3\xac\x8f\xd31\xf0\x1f\x7f\xd0F\x94v#\xdf9\x86\x03\n/\xea\xf68VF\xeb\xa901\x14\xd3\xe0\xad_K\x03\x17\x1b\xf4\xed?\xca\xe6x\x81>\xa3}\xfa\xd4#\x80G7\xd4\xd1\x82\\W\x8cp!.@\x12\x1c\xfc\x12\xb8\x08\xee\xfc\xbb\xb1\x0fb\xf9w\xc9\xa1\x1f+{\xdco\xaa\x8f\xd5\x1d\xad#\xbb\xd4\x8dP\xc3Q\xd2\xaen\xc4\xab66\ncZ\x81\x11\xad\x8f\xe7D\x89\xa1\xa5\xe2\x92\xc2(. \xb0\x9f\x14\xba\x10\x1b\x81u\xef;n\x8bM}\xd1\x16\xc1\x8b\xc5\xfd\x1d?\xbb\x9c\x82\xaa\x11\x9e\x10\xe4v\xcat\x9e\xc9x\xed\xeaF\xbb\x9d\xec\xb2{\xc9+\xc8\x14\x96\x13\x9d\xb2\xf9\x10`\xa2\n\xd7=\x9f\x04\xd7\"\x07\xa0r\x05P\xcf\x80l\x98D\xeeV\xcc\xb6\xdf\x00\xa4\xbf\xb1\x99\x95\xbb?\x06\xe3\x00\xed\x1f\x01Z\x17@\xeb\xa2H\xff\xe4\xb0\x85]\x15\x9d\x1a\xba[\x1a\x98\xa53\xc0\xf6\x7f5\x80\xcc%\x10O\x0b\x1b\x01\xa8\xbc\xea\x1f)\xde\\^\x84\x8f\xd1\xe5 Z;\xfa\x98>\x8dF\xf7\xbatf\x1a\x9b-\x1aI\xbaz\xd3\xae\x0c\x11\x80\xe7\x00\x8cy\xfau\x811\xdcu\x99o<\x15\xe5:\xc9oa:uq\xdf\x14\x8d\x82\xd5w\xf2yk\x03k \xe2\x8b\x9c\x8b}0?jv\xb2\xabX+\xc0\xe8\x02\xe6\xdd\x05}V\xbb\x0f\x9b\xad\x10\xef\xe4G$s\xfe\xda\x1a\x9f\xba\n?2\xf26\x1f\xf9\xaa\xaf\x86\xfd\xb4Y\xcaI\x165c\x91$\xf3Cf\xfdb!\xafX\xa4*|F\xcf\xd4\xd6^\xba$\x7f\xdcp\xba7\xefp\xe4\xed55\xa7\xc5\xdd\x05\xdf\xb66[\xbfm\xb5\xe0jg\x81\xb9\xa8\xb0\xae\xa2l>\x9d*\xf6\xf1\x01\x12 \x15KO&\n \xb1\x898\xf7\xdc\x9d\xa2\x05:\xc7\xb0\x85\x17o\xbf\xea\xd6k\xf5\xa2\x87K\xe5m~\x99o\xbeS\xe4-*4\xa5E\xbc}P/\xfb\xda\xcd%\xa0\x0bk:\xa1e\xd7\"\xe51A\xe9J\x88\x96\xe8\xc2\xe2CAX\x9a\xb4\xb9\x8b\x8a8= \x00L\x83\xc5\xa2>\x08\xb0\xdc\xb1\x1ea\x19\xa6\xe5\xff\x97v{\xe4\x0e|K.3\x0b\xc6\xdb\xd7\xef\x80\xb7:\\\x0fB\xacx\xc7\x02\x1d\xa4\xfcw\xf0\xba\xe5\x0e\x98b\xc9e\x16\x1db\x80\xcf>\xd61\xdb\xbfv\x89H\xc5\x16?\xe4\x13f\xb9\x9b58\xd4\xf3\x0c\x86\xfe%\xcd6\x9b$\xc9\xb2\xdc'p\xa2L^H\xaf\xe2X\xcebp\x13I\x92V\x19qn\xd6\xe6\xbf\x07\x81\xde\xe9A\xe98\xa5\xe5K\x94g'i\x82!Uz\x86\xe1SE\x9a\x06\xaaI\xd7\xdf\x1d\xfb\xf5\xde5\x14\xd9\xf7\xfdN=\xf1\xbb\xfbhp\xcd\xdeV\xdc?\xa1\x9b(\xecE\xc5 \xff\xe0\x9a\xbd\xbf\xbeV\x1d\xa5\xb3r\xfb\xcd?\xde\xb4\xe9\xc77\x17\x9e\xae\xfd\xf5\xde\xfc\xcbV\xa3\xda\x8b\x89\xc9\xbf\xe2\xb5\x14.\xb4\xe3O\xf8\xb9\xdc;\xb0\x06\xf7\x95\xa4\xa0\xd1p\xa4\x00\x0b4\x9e\x90N\xe9RZm.\x04\x96O\xd4\xf8> Mu\xbe\x8fL'\xac\xc9j5\xa9\xaa\xe2\xd3\x08`\x1f;\xb4Mt\x8bAQ\xdc\xb6M\x84\x9b\x1b\xef\xf4y\xebV\xed\x99\xeb<`\x10\x91\x92\x16\x0d\x07$m)\xdey\x9a\xa4\xda\xb3\xd1X\x84\x99\xdbDa\xee:\x1d\xcc\xe1\x04\xdd\x81\x04\xaa\x9a\x04q\xbb1\x9dJ\xf6Fq\xb315\x01\xa4\xe4-\xdel\xb8\xe8\\\x83\x853\x89~X\xa1\xe6-2pF\xffD\xf0e\x89\xe9\xd6/\x7f\xf1f\x13!\x06\xaf\xe82\xae\xbdi\xe7:\xe0\x93=\x02\xc3h\xfd\xe0{\xfc%\xdc{t\x85^\x80\x14\x8d\xb6\x0b*\x1a\xc4P\x11\x88\x88\x06\x85\xf0\x0fB\xcd\xbd\x0e]\xee\xceY\xd3D3\xc0\xd8jd\x0d\x1b\xfa\xd3\x06\xd6\xd4*\xe6w|\xe2\xea\xbcGL\xcf\xbd}\xf9\x1b\xcb\x91k7\xb8\x8d\xc3\x87\xcf=0\xd7\xe46\x04\x84o\x7f\xa2z*\x06\xad\x9e\xb6\xe2zo\x82\x16\xf01~J\xe7w0\xfd0S\xcf\xd4e\x93Pw%\xa4\x04\x94\x90#@BJB \xc0\xa0\x0d%\x03\x02\x10\xd9\xf8+\x84\x92 \xfc'\xb8\xbd\x9c\xc0;\xa4\x0e%!&\xa9\xa7\x12\x0b)\xf0\x1eH\x06\xc8q \xee7f\xc9q\xf6H.\xa7\x8e\xe6\xb2\xec\xd0\xc9\xe7\x81!\xd8E\xda\xc7\x8ef\xb3\xec\x91,\x10\xfc9\xd5\x9a\x83$9\x94 \xe6H{~\x16i\x07\x9e\x80\xdb \x9fZ\xb3\xf0\x02)O|\x82\x8f!\"\x87\xfcD.K\x89\xd2Q\x0ca2\xd5\n%\xa9\xa3\x9a\x182W\x8c\x1f\xa5\x12\xc9\x82\\\x0f\xeb\x8e{\xe9\x16*\x99\xf5B{7\xe2,\x98\x0e9\x12.\xf8'ew U\x03^\xa8\x83W\xaf\x8f&\xde$\xbbr9\xbc\xb5\xe7rcG\xf5B\xf4\xe7\x1cwl\xac\xa6\xf2\xff\xe4\x92l\xf6<\xfc\xd1\xfa\xbe\x99\x7f\x12\xca\xb7\x01\xee\x19SQ\xc2\xe3\x82\x85\x88\x8fh\xec! i\xbf\xd1\xa8v\xe3\xee\xceJ\x02\x0d:\xb3\x7fY?\xfc\xf1#\xf9\x95\xb8_\xd3m4\xb2\xfbq[\xf0\x87\xfd\xfd\xcb\xfb\xfb\x0b}\xd7,\xbcE\xf3\xf5A{V\xe5\x1c\xd0\x9f\xba\x88\x89\x9c\xae\x14\xceP|\x9eD\xf6\xe5g\xa9?9M\xa9\xc5\xc7Id?{\xa4\x04\x03)\xae\xfb\x11\xca/\xc2\xf0\xd0 /\\[ \x88\xabJ\xf9\xd2\x8f\x83\xb9\x9d\xeb[\xe0\x9cf4\x03G>\xcb\x07\x01\xc1\xcc\x1e\xc1Q\x80K\xa0\xf3^\xa0\x0c\x17\x8cm\xd7\xe2\xb9\x17 \x86\xa6\x1c\x9bO\x97\xc7\x1f\xf9 \x01-7w\xea\x14]\xcb\xcc\x05\xea\xc2\x84\xf4<\xdd\x0eU3j\xc6\x8f,\xc4\xc9\x1d\xe4\x88:\x93\xbfY\x93\xb5q\xc5~\xa00\xb3\x99\x15\x08/\xa5m\x91\xac\xc5\xb5\x0f@C\x8a\x16\x97C\xdc\x15F\x80q<\xb7\xe9\x92y\xa4x\xcbh\x07\xfa\x8c\xf5\xc4\\\xf4\x8a\xa60=\x97RgY\xecd\x91(\xf5\x19\xbc(_\xf0\x1a2\x92\xb8\x8c\xd8\xc9\xcaa\xb3\x87_\xc4{p\xb7M\x0b\x14\x85\x15T*\xa1\x8b0\x9eU\xad\x94T\xb6\x98\xd9!\xb3\x05if$\x16\xd4\x9f\xd4(W\xe2\xa4q\xa5RC:P\xa0a3=b\xb2\xd1\x0b rK1'-\xbb{\xa0\xcd\xf6\x95H\xe8\xca\xbdH\x1c\xfd1\xca\xe1'`\xd9k\x1c\xcf\xafex\xa2$\x92\xbc.\xb9h\x8e{\xdc\x86`\xa4\x17F\xe9\xa4\x0bz\x9bE\xc30\xae\xf8c5x\x1dfM\xd1\xc4\x05\x04\x86\xe4\x8f\xbe}\xe7\x9d\xdf\xbeS\xfd\xef\x95S\xa6\xac\x9c\xc2\x0f\xc2K\xc5]N\x1d\x18\xebf'\xdbpP\xce\xb9\x13S\x1a\xa7`BmmH\xd2v9\xc44\xe1\x88\x84^\xecm \x8dD $\x11\xa1\x01\x98\x8a\x81,\x80'\xdc\x84\x1e \x84\x0bp\xecW\xc9\xad\xee\x16g\xd8dV/L\xb6;\x96\xaa\xd7M\x0eZL\xad\xf1\x06\xf5\xf3\x93\xea\xad\xb5\xa2H>{\x80,\xdf\xc3\x1f\x1a\xab\x8d\xb7\xba\x14\xc2\xce\x98\xb1\xaa\xe3\xd7\xf7\x9f\x13\xec\xac\xb7\xce\x98QSo\x0d\xdbl\xdc\xfb\xf9s\xc9\xbfh\xfd\x02\xf9\xff?A\xbf\x882q\xaa\xd3\xd0`\xc0\xdd5\xa0\x83\x80\x9c\x07Z\x80\x8f&\x05*\xea\x93X1L5\x01:\xd96\xa2\xeb\x04\xb4\x11\xf6\xcf\x82+\xd9\xdd\xdf\xeaO\x0f]ue\xb7j\x8d\xf5\x13\x83\xc5\x1b\xa8%?\xef\xdb\xbc&\xd3\xd8\xd1aW?{\xef\xa2\xcb\x1d\xad\xcb2[\xfe}\xc9\xd2\x1bW?\xce\xdeJ\xc4\x1a\x7fb\xf2\xd2\xce\x99\x9b\x17\xa5k\xcf\x11\x13-\\\x7f\x8d\xbb\xdeb7\x89sI\xec\xackf&\xce\x9b\xdcf\xea\xb9x~\xac\xbf\xb7n\xec\x99O-9\xf7V\xda\xe7\x01\xce\x0d\x94~c\x88W\"\xc8\x97y)b\\)\x842\x12MrW\xb1\xcbf\x88;M\xed\xf3U\xeb7\xa5\x83\x1c'[\x1b\x15\xaf\xcd\xc0\x8f\xf4\xb9\x92-c/\xf6\xb4\x18.\xbe\xd8\xbe\x94\x9b\xe6\xa8u\xd9M\x16\xe8\xfel\x9f&\x1f\xf8\x9a.\x1f\x08\xd3\xf59\xd4\xdb) G\xea\xd1\xda!\xc3!W* \xb86\x080C\x8a\xd1\x84#\x94\x16\xdcq\xa3\xc5\xe7\x8a\xd4\xf8rq\x9fO\xdd\xc8K\xc1ZO\xceW\x13q\xf9,\xc68\xcc\x81\x0f/Xp\xe3\x8d\xf0\xcf\xedT\x9a\x14\xc9\xc8\x91\xb1g<>\xc2\xc2\xa4)\x98\x14\x89[J8\xa3\x04o`\n\x1d;\xfa\xd4S\\\xd3S\xd0\x17\x02\xa7\xbe\xc6\x7f\xf6\x93\xc0\xe1\x12\x93\x01%\x9a\x86h~\xf0\xccp\xcc|J\xcb\xbe\x08F~K\x19\x18\xec=E0N\xee\xb8Q\x83X\xdf\xc7\x1a\xa9\xde\xc7*\xe7\x19\xfe\xd1\xe78;D7\x0f\xf6Q\xf1\xd11\xaa\xb5QC\xc3%\x0d\x0e*E\x82y\xeby}\x83\xa1 \x8dUG?>\xfcI`\xe6>\x06\xc6\xea\xf7'\xca6<\x9e+\x83\xed\x9d\xd3\xc4\xff\x7f\x013IV\xf2g\xd5\xcf\xae\x8fyO\x8e\xfb\x95\x12\xaaQ$WBv\xae\x1c\xdb\x08H v\xee\x85\xa2\xe8[\xfd\xcf\x8f 2\xff+\xfd\xa3\x0c\x8f\xca'\x08\xff\xc3\xb86N\xb8\xdf\x86<\xfa\xff\xfa\x9e\xdb\xc7\xee\xc9\x95\xb6\x1d\xa6\x97\x0d\x872\x94\xffS\xf1\xe5\xa8\x9a\x189\xff\xb3X\x861\xa0\\\x95\xe2\x94\xa3\xfb\xfd\xad\xccdf>\xc3\x04B\x8e~\xb6\xb2\xca\xd5\xcd-\x88\x94t\x16>\xa6W\x90]\xd9\xecp\xa9Pr\x9c\xeeZ[\xb1\xb2'\x9e\xca\xe5\xc8+\xa4\x9d\xc6\x8c\xe2\xb5l\x869]\xef8q\xe3\x82\xec\x82C\xa7\xe9!\x8c\xb6'\x0b\xeb@A\x7fA\xafOu\xff\xd0\xa8\xe4\xaa\n!?\x10M\x7f\\\x85JM\xcd\xad\xcdf\xc7\x9e)\xab\x11\xdf\x95\xcb=\xf5\xd4\xc9w?A\x95N>\xcf\x0f\x96\x83\x9e\xcb\xc2\xbc}\xfajQ\x1a<\xc7\x8fp\xc7\xa0^\xce\xf1\xf2(\x13\xbb\x80}\xbf\xb1\xbd\xbd1\xef\x84+\xbf\x932\x1d\xc7\xe0q\x0bF\xb2\xf74R\xbe\x84\xb4\x17iH\xc4\x8f\x97\xeeIT\xebr8\xd2\x02\xd0\xdf\x0e^\xa7\x99\xda\xf9!gm\xad\xf3\xe4>\xb8\xce\xff\x90\xb4\xce'\xe1\xf7\xc6\xde\xda\xb8h\xd1\xc6E\xfc`\xads\xcc\x8ao\xb9\xe3\xce\xdal\xbe\xf1\x85\xdb\x1e!\x19\xf5(9~\xed\xa2\x0d\x8b\x17o\xd0\xe0\xfe%#\xf0)\xfe~\xc6\x83\xfaj$\xde@\x80\xd5\x94\x88Lp\xbdG\xe5\x1fOa{\xe9\xae\xcd\xdf\xbff\xce\xec\xc3\xa9\x94)\xb0z\xe8\xaa\xd8\x94Y\xd7<\xfe\xf5\xaf\xef\xd8\xf1\xf5\x1d\xfc\xfd\xdb~\xb0\xf9\x9a\xcf^\xf5\x9f\x03\x19c\xe0\xd2\xcb\x1e\x9a\xb7s\xf3\xd0\xde\xeb\xe6\xbd\xba\xe3\x1b\xb7\xde\xfa\x8d\xb2\xfd\xc2\xdd\xb4N\x95RU\xe2\xd0\x04\x9b\xd5R\xd4\x05T\x94Y%8\x1d\xdb\xc0\xad\xa4\xf2K\xccs3\xc3q\xbf\x11d]^\xa9QTb'\x07 \x0b\x9c\xc4z\x1bx\xaf\x07)\xeeH\xb4\x93\x8dF\xd2\xa9P\x84mU\xceZ\xbcjQ&\x9cX\xb9\xf1\xc6\x81\xe5\x0fo\x9f\xdf<0\xb8j\xc9YG\xbc\xb1\x16\x8e\xd4\x05z\xea]\x8a\x87\x05\xfe\xd9$8c\xd6\xda&\xd6h\x14\xe4y\x9f\xdd\xbc\xf5\x9a\xcdw\xce\x9e{\xcb\xea9\x11^\x98\xbc\xe6\xc1sf\xdf\xb9\xe5\x9am[v\xe5\xd5\xc0\x82\x1d\x93\xc3\xd3\xa3!\xc9(Z\xedAs\x8d\xc8\xdb\xa7\xc7\xcey\xc1B\x8a\xbc\xc0\x1b\xfc\x95\x8c\x15\xfa8R\x1ai\xd4\xa3B\xadg6\x1c\x88{\xcbUm\xa6\x92tyW!b\x9ap\xc7\xaed\x0dn\xc5/y\xcc\x82\xe1\xca\xbc@v\xc5\xd3/\xa9\xbb\x90\xd4%\xc7c\xfd\xb9\x97\xf6\xaa\x1b\x9fx\xf1En\x92:\xfc\xe54Y\xc3\xfd\xf5\x1a\xc2\xb2\xbc,yZ-\xe6kr\xb6\x97\xfacH&\xf6\xc7^\xc8\xa9\xcfC\xba'\xc8\xae'^T\x7f\xae\x0e\xc7\xf7\x935\xba\x9c\xee\xf3\xd0\xc7\xea\x98\x14r)(\x8a(I\xe8\xd2J\x99\x1cU\xd7\x1f\x8c&#\x80\xdd\x8c!\x0d\x14+YM.\xffJ\xef\xabEX^|\x82\xc2\xc2\xdeL\x96\x01\xabw@\xb4\x90\xda\xbe\xec\xf2\xa1Zsg\xdfY\xd3\xe6\xb4\xba\x88\x85\x1f\xfc\\\xaa\x00\xd1\xee\xb5\x05x\xea\xd4\x9f\xa0\xb2\xc1\xbf\x19x\xd3\x1b\x84\xb5y\xba\x03\xfd\x97L\xef\xf5Cyo\x99\x85\x92<\xf1Q\xcaO$)\xf7W\x8d6\xa5\x1cm%\xdd\x86\xaer\xdd\x86\xd5d\xf5\x94\x99\x87\xd5\xbd\xd5\xf0\x92\x1f\x90{\xa1\xfcO\x89b\xfe\x8fp\xff\xbb\x08AE\xdc\x80\xca\x8c\xdeg\x0b\xd0\xce\xce\xfd\xb2\x88\xbe\x04\xa4\xa7i\xf2\xef\x89\x90\x9c~\xbaA\xa4\xbc\xa2\x99\xf2\xdfO\"mo*\xee!\x02\x83\xd3[T\xc0\x9c\x8a\x12\x8f\xf2m\xacdH\xd1T1\xd3$\x85\n \xc9\x10P\x16\x19\xd4\x904\x12^\x14\xcc\xfbsfcA\x033\xb7\xdf,\x88\xeaXA\xad\xf2P\x8e\xeab\xe2ks\xee\x90Y\xe0\x86\x8b \x0f\x9ayH\x88h\xfdP\xe4\xcd\x07\xcb\x13\xe2+b\x82W\x90=}\xa5\xd3\xfb;\x01\xbf\xbe\x82\x14\xb5\xa8\"Z&x<\x01S\x1cySV\x14Y\xed\xd6&=\xaa\xfe4\x9f\xbc&\x19\x91\xe81J\xe45u~\x81\xe8,\xd3\xbf\xa4z\xefe\xf9\x96g^QB\\/\xa1P\xca\x84%\x8d+p\x82r\x1fe|Pn\x02\xa5 \xa4T\x92\x86cZ>?\xa2\xe7\xef\x96e\xf2\x9eV\"_[\x91\xe7Q\xa9\x15/\x85\x025Y\x00\xe3\xe1\x13|\x8a\xb1\xe0\x1eqI\xa3\xf6/\\\xa7\xd39\xf6\x96\xd3\xe7\xe3\xe3\xaadi\xb0\xc0EBh$\xaa\x1b\x9d\xe5v\xd2\xf5\xa0\xb1\x80\x85\xd3\x0bwOL\xa4 \x85\xea\xd0\xfafp\x19\x05\x8fa\x0d\xa6,?H\xf3gH\xf9f2\xac\x17\x88\xb5Rb\xee\x85\xb2L\nv >\xdeU\xc0So\x99\x96\xb0^1/,\x9a\x88\x1f\xc4\x93\xa2vc\xab\xb0Y\xf2\x02\xa0Gm\x02\xf4\xc0\xc5\xa8\x90\x17\xd4~\xb8Am\x8d\x12\xeaz\xaa\x0c\xcb?\xc7/\xa6\x92\xff4\xd4\xce\x050\x89\x00\x13\x87yj\x17\xcc\xb8p\xe1k\xb2\xee\x06\x81\xb62\xab\xf6H\n\xab\xc6eE\x80R\xdfb\xe9\xc5\xcf/\"\x1aM\x0d7\xf25u\xb2l\xc2[\x9fdrC\x91&Y\xcd\x90\x17\xdc&I\x9d\n`!>p\xfb\x98;\xa6\xf5\xe5J-\x8fb\x9d\xe0\xc2--.\xe0\xb4V\xe4M\xda\xc54>\xa9\xbcFj\xbf\x96/\xee5\xc0\xba\xce\xcf\x83\xb6\xaf\xa3\xb2\x83\xa2\xdet5}\xc2>C\xfe*\x9b<'\xdf\xf7\x98d\x08\xb5\xe6?,c\xc2\xf8\xbe\xfc\xefd\x8fGf\xeb\xe5\xfc\xbc2\xd2\x810w\x9d\x1a\xe36\xf3\xf3\x19\x98\xf2L\xach\"\xdcf\xf5K\xe4\xa2\xd7\xf2\xff\xce\xb6p;\xd5\xc7\xd5\xc7\xbf\xcf\xb6P\xd5d\xa8c\xff\xa91\xbeEO\x8b\x9a\xd1i\xbe%\xff\xf6\xf7\xc9\xc5\x98(DC\xe4\xe2\xefW\xac\xb7\x89\xaa\xf5V\x08\xe9\x1b2\x9a\x84I)\x88\x9d\x12T\xf6\x14i\xc3M\x9b\xea\xb5\xf8F\x00T\xdbz\xa10\xeb\xc1\xaa\xb5\xf9\x9bU\xaf\xf5 S\xb5\xfe7V\xaf\xff\x01\x0d\x14mB\x1fW6;\x9b\x12n\x1aYZU\xa2zS\x19\x1e\xcfTg>(\x93h\x04\x85\x85\xeeF\"\xe2\x05\x15\xde\xbdT\xeb\xbd\xb7\x8b\xa4\xf1R]\xe7\xdf\xfbL\xdb\xb6\x99|\xa6\xfbLx\x87[\xd5s\x15,\x07'NU|\xae\x80\xaf\xb9\x1a\x07E\xe0<\xf14\x05)\x12\x14\xab\x01R\x96\x8b\x8a\x11\x16\xa0\x0fp\x06\xdf*\x8a\xb8vU#\xa4g\x05\xc4\xc3*\xf1g\x98\xf2\xb7j\xc9\x99*=\x81~\xdc\x83\xa8\xec\x11A\x89S\xdc\xfd\xc4\x93\xce\x10\xcd\xeeA\x0cJ\x82Hw\xe43@Nur\xaebw\x99\xb0\x8d\xc8\x80\x7f\xca\x8cx\xc0}[\x83`\xf07\xba\xbd\x92\x8b\xf8\xa4Z\xa7\xcb\xca\x12\x9btPlh \xd5\x04\x13\xb3\xa6L.)NU\x82}\xbf\xa5\xa1kq\xdc'\xd8\xddv\xc5\xe9\xf5\x88FQr\xd7\xb7\xfa\x8c\x0e{\xcb\xa4\xf3S]\xf3Z\xebL\xf9\xff(\xd7@\xe3*\xfaSf\xc2\x1e^\x82\x96+u\xf6Pe_k#\x8e\xf1\x95\x19.\xc98\xe9\xce\xc9\x82\x05%\x8d\x90\x05\xd5\xa0\xaf,\x81\x85\xaa@\x95\x9b\xa3TK\xa3\xa4\x07\xd1\x85\xba\xa7\x1a\x9f\nt`\x91\xdf\x91\x88X\xd4AD;\xa6\x82\x1ab\x86\xa4\x81|p\x15\xdfA\xba\xe27\xf0}q\x07\x15\xd2\x15\xbf\xe9\x042\n@Y\xfb`\x1d\xdd\x07~\xa5\xee\xc1\xb5\x14\xb6\x8a\x88\x17i\xd4\xacK\xbd\xfb0j\x9f\xf7\xd2Y\xd5\x11(\x0d\x9e\xf8\xdbR\x84\xfa\xc3\xd3~^\x88\xa8\xd2\xa7\x188\x83>\x85\xe8=\xe3F\"\x9a\xcb\x9cA[\xe5\x8b\xd3Dq\xfbvQ\x9c\x06C\x02\xee\x8aX\xf5|Z\xf5\x8bsO\xf7\x85\xfe \\\xe4/\xcdf.\xc1\xd9\xd5\xe4\xb3F;\x04\xcc\xe6\xffk\xe1\x1aP\xf1\x95\xecb\x87d\xb3z7\xd4\x90e\xcd\xb6-6\x8bb\xb2y\xb6\xd8\xccb\xc0aWjnh7Y\xf4L\x0f\xe1F\xfb!\xbd4\x83\xc0w\xdf\xe2ssF\x8fC\xban\x8fh\x96\xcc_0\xf3\xfb\x1f\x85\x92>\x0b\xe1\xb1M\x05\xbdZ\xb2\xab \x1f\xb0\xf2\x87\x80\xefnC\xed\x7f\x9c\x8c\x0c\xac\xc4\xc3*\x1a\x11#\x125/O\xf6\x02U\x03\x7f\xd1N\\(3\x06o\xc4@\x85[\x12\x0f7\x0f`\x8bMg8x\xcf\xdfg\xace;f\\y\xf1\xbd\x97|f\xd6\xa4\xa9\x0e\xde\x91\xa8\xdb]\xebi5\xae\xa8q5q&\xd6>\xb9'\xba\xf4\xe2\xf3\xef\xbc\xe1\xe5\xb0\xd1\xdf\x18\xd9353\xe9\xfck\xcfY\xea\xad\x91\x9c\x1d\x01\xdf=W\x9f7\xe7\xe2+\xce\x8byx\xfeI\xcee<\xac\xbe\x93\xfb\xc2P\x8f\xc3h\xb1X a\xeb\xeav\xd7\xb8\x82\xd3\"\xc6\xf9\x8acJc\xfa\xc2\x9bo\x1fH\xccO\x86Cu]\xb3L5\x91\xae\xab\x03\x81\x86\xd4\xd2\xc5k\x93\x93\xa6\xf0\xd1\x96\x86\xf3\xa6\xf5\x84\xa7]x\xf3\x8a\xb8\x1b\xe6\x8a\x0b\x98\x06~\x13\xff#\xaa;!\x91\x06\xde\xdb)B5\x1a8\xa8\x04/\x1aP\xac\x05\xed\x0d\xf5\xf5H\x1c\x9a\xd1F#0\x89\x18\x1a\xb0B(\xf4\xcap\xec}\xd1Fst\xd2\xdc\x19M|\x9d\xb9\xc6l\xe7\x88\xc9)]t\xcf\xbc&\x83\xdd\x96\x99\xbf,\xe3\x99\x97nt\x1a,\xb6h[\xd0\xe4Y4\xdd\xac$\x17\x9ewQ\xd7\x92\xb5,\xcb\x0b\x9e@\x7f\x8b\xd1\xe0\x8f\xc6k\x1a\xe5\x9a`D\x94\x83g]r\x0e\xff\xa3\x99\xb7|\xea\xfcY}\xcdVq\x92wRC*\xd4\x13\x8e9[o\xbb\xff\xa9\x07\xe7\xa7\xd7\xce\x9d\xdcd\xf0X\x0c6\x8e\x17\x9a&\xcd=\xff\xe2\xcd}\x97\xed\xdf\xb0\xc0/*\xcd\x8f\\\x00\x04\xcb\x94)\xb3\x83\xc95gO\xf2\x98l\x9d\xd3\xa6\xaf\x08\xac\xd8}\xd9\xd91:>O\xfd\x8aY\xc7\x8f\xf2s(\x95p6\x1d\x9f\x82[\x12\x82\x9dB/t\xe7\x88\x81*\xcc\xa0-n:\x15\x11\xb1\x01\xbd\x0d<\xd0\xa6\xf0\x14\x8d\x08\xf8\xe8)\xc8\xfa\x10\x0b\xac\xe1+\x8f\xde\xb0~q_}\xfd\xe4\xc5\xeboxt\x0f>L\xae\xaf\xef\x83\x07V\x96\x0dF\xdf\xc8\xdfG\xbc@d\xce9\xd7\xc6[<\xf1s/\xbc\xed\xae\xdb.<7\xee\xb1\xf0\xd6s\xf3\xa7B\xb2\xc9\x14\x9ed\xef\x90B\x12'\xb7wX\x82\xfc\x9c\x8e\xb3\xce\xbf\xe4\xfc\xb3Z\xe9\xb5\xa3\xfc\xe1W\xb2\xc1\xd5\x97\xd9>2\xb2\xbd?\xed2\xc8\xb3\xaf\xb1\xf78\x9b\xe7\xac\x9e\xd5\xd3={\xf5\xecfg\x8fcsC\xb3\xdc\xe5\x9c\xd5m\xe3\xe5\xa0\x1c\x96\xdb\xcfr\xe2\xbe\x0e\x0f\xf5e\xa0\xbe#\x8c\x9b\x89\x02E>\x88\xf2\xfc45\xb5\x14\x12qo:\xe1J\xa3\x14\x16\xd6\x04\xec\xbcX\xab\xb0^io\xba\x93P,x\x8b\xb5f\xb5:/y\xa0\xf1\xbc\xafn9\xa7\x9dV\xf3\xd1\xa5S\xa77=\x10\xe9\xba\xf2\xee\xe8\x92u\x0f-\xed\\\xb8%\x02\x95K\xe5\xcf\xa6Uv\xb6\xce\x1b\xbc,\x19\xbb\xe2\xb3\x80\x84\xed\xc5\xeaZ=\x1c\xd6v\x9b\xfb\xe2k\xaf\xbf\xa4N\xd1*+_\xa7.\xfd\xda\x8a\x9f\xf5\xd6\x9e\xbbi\xfd\x9a\xda\x83=w\x0d@\xa5\xe6l\xa2\x15m\x98r\xf6\xf4>\xdb\x17\x9dO\xad\xc7\xcao,V\xd4\xb2\x92\xd7\xeb\xc9\x9dz\x0c&:'\xff4\xf0\xce5\xa8\xd3\x1a\x85!\xea\xd59\xe8pI 0@I[\xcdP\x1eU\"\"\xa9s\xd9\x1d\xeeInv\x87R\x17>\xf1A\xb8\x8e\x98\xc99t\xe6$\xe7\xa83/\xab\x9a\x7f\xb3|k\xa3\x108y\xb4i\xee\xac\x9d\x9aE\x0d\xfb\x93\xdf\xf8\xddc8\xf3\xc2E\xd7!Q\xe8\\\xdb\x82}\x0b%A\x9a\x81f4\xec\xa0s*\xaeA8\x8d\xa6\x1d\x82A\xb3\xd8\x04\xce\x80\x1d\xdc>D\xae\x90=5\x0euw\xb6\xd6\xf5\xc3\x1f\xbaj\xb3\xeanG\x0d\x00z\x05?2\x94Q\x0f\xaa/I=\xee\x98\xdbf\xfdH\xfd\xe8\x02\xf74\xc5n\xba\xe5\x16\x93]\x99\xe6\xbe\x80\x88\x1fYm\xf0\xaaG\"\xb3\xc92\xe4P\x1cE\xddH\x7f\x99\xcdf\xb9v\x14Zn\x9e\x07<\xc2\x9a\x02\x97PiA_\xd2q/\xb3\x8eP\x12\xc9D\xd5\xbf\xf0 \xe3\xa8$$\x8e\x1b\x1d~%Nyhr\xdcOdM\\\x91-\xfe\x8cm\x9f(\x88\xe4@\\\xb3\xba\x8f#\xbd\x84\x06\xd4\xea\xc6\xbc\x93\xd2N\xef\xe7J\x9fO\x1c\xd4\xe5\xe3>a+\xff \xb5uJ\x17\xa8\x04*(%\x81\xc2\xa2FP\x84J\xc4\x02W\x94\x08\x94\xa5\x8a\xa6\xbd\xf8\x96\xdf\xf0\x91,\x7f$)\x8f\xe7\xf7\x01)\x07\xe5\xff\x98\x1d\xfa\xb3\x92\xff\xea}\xd7\xff\x98\n\x0eB\\\xad\x96\xe0\xcf_\xbb\xef\xfa\xc7\x15\xfe\xbew\xa5V\xe9] 0\x86\xd1T\x13\xa5OC\xc3\x8aQ}\xe8\xbe\xeb5\xb1\xd0\xc2\xe2{H\x0fo*\xe4\x99\x15;;\x04\xf5\xe8\x91\x9e\xc9r\xc7\xa8\xe2\xea\x01\xf5M\xd4c\xe5\x075\xad\x14\xdc\xe3\xd14S\n: \xb4\xfdM\x8c\x8b\xee\xe6\x16\x9b7\x90(kY:\xeb\x01\xa9\xaa\xb8\x95z\xd2`\x88gp\x0d\x9bJ\x86st\xcb\x89\xfd\xb1v'\xb2\xa1\xa6e\xf0\xd2G^~\xe4\xd2\xc1\x96\x1a\x03i\xe7D\x9b\xbb1\x9a\x9a\x91\x8a6\xbadA \xd8@'N \x02\xf0\x8a\xe6\x15\xb1\xb3\xd6\xadN.\xd6\xf4?\xcef\xb2\xc2\x85\xdc1\x98\xf3b\x05\x1a\x95\x8ezJ\xfd\xa1\xb1D \xec\xa5V\n\x01o@7R@6\x8a<\xc0\xfe\x01%IF\xa9\xd80\xeamj=\x0d\x90[\x7f\x8e}N\x7f\x89\xe2\xd2\xfc\xdb\x8a\xa457\xb9\xff\x1bp\x05\xf2\x05\xa9y\x8c\xc4v4@\x1b<\x06m\xd0\xad\xe1\xbf\x11\xc1\x109T\xc5\xf2\x06p?\x8d\xda\x03R7\x1d\xfa\x9d\xfa\xa5\x01\x8a0\xd2\x9b\x9a\xb4\xdaQ\xcfG\xb8[j\x1a\x84\xac\xd0\x1e\xc8\xdf\xd9\xcdzi\xdf\xf7\xb7b\x93\xb4\xfa\x1b\x9d~\x83\xf0/)wC\x03?\xe2\xb1\xef \xd7\xa8a\xa5-\x07/\x8aC\x04\xaen\x93\x99\xfb.\xc4\x95\xdbH\x0cj63\xa8\x80\x92\xd0\x04\xb8\x8a\x10p\x04\xf8\x91\x93Krh\x12\x14\xeb\xcf\xcf\xc2\x07\xeeX\x96\xeaI\xe7\xc6\x8ej\xdf\n\xbf\x82o\xad\xe81\xc1\xc59\n\x9f\x06\x14\x01f\xd4\\\x8e~\xda:\x19-\xa6\xd4\xd1\x93K\xa0\x8f4\xa9\x1b\xb17B\x06Y\xcd\xf6\x89\xcc\x86\x8ey\x05\x7f\x08%\x9bDC~e\x93\xe8m\xc0\xde@\xd1]\xd1\x08\xfa\x03\xff%\xc0r\xfe\xa9\x02\xd6\xd9s4T\x03\xae \x99\xc5\x1f\xe2\xae\xd0\xf3\xd6G-\xe2Ug\x90\xf5>\xcf\xed\xfbH\x82Op\x07V\xd6B\xec\x90\xc2]\xf4{9&\xff^6\xb9\x1f|\xd0m\x92\xf5\x07\xe5\x0b_PLLI7\x02\xc7\x92\xa6i\xca\x10\xae\x13\xf5\xe9\x0c\"'T \x14\x05}\xc6\xe3\xef? 4\xbb\xf3\x17\xb9\x9b\x85\x03|\x87\xa6[F\xc7\xad\xfat\x16u/\xd9_y;Z\x94\xbc?\xdb\x1f\xa3H\x86K\xae0W\x9dz\xa4\xe8c#\xa4\xd9\xd9\xd0)\x80\x8e~.\x1cr\xc1\x8a\x16\xc4\xa5+\xcaB\x89\xb0\x8a\x03\xb1\x1e&J\xb0\x93\x83G\xa00\xf9\xcb[\xfd\xd0\xe4\xd9\xfc\x05\x91.\xce\xa1\x08\xecr\xb7\x9fO\xb3\x16k\x8e\xdd;VC\x95\x89\xa0oX\xa8 \xfa\x19\xcd\xf6K\x19\xdb\x9dS\xdf\xb3\x1b\xeb\x15r\xb6t\x82\xe5\xed\xb2\xed\x84:z\x82X\\\xb1\xfaxm\x8b\xdbJh\xff\x99\x0fx\xb8\x14\xf0\xd0N\xf7\xc7h\xdb5\x10\xaf\x0b\xa5K\xe8`\x85\x0f\x05;yd\x03p.Ec\x9b4\xb2X\x9eD<-\x01\xb4ll\xb5\xdbip.\xbb^\x1e\xd8\xe2p\xd5\xd7:\x8d\x0d\xd9u\x8f/\xee\xbb\xf6\xfc\x15\x99.\xab\xc5Y[\xefrl\x19\x90\xaf_\xe64\xb8\x9a\xe3\xbd\xc9\x1ekz\xce$~Dq\x85]7/T_<\xe8\x8f\xb5\x88\xbc\xa5\xde\xe34K\x0e\xa7$\xf0\xc6\x96\x98\xff \x99\xbb\xf0&w\x93\xab\xf6\xe4\xb7\x0d\x86\x9e\xa9S\xa1\x1f\xb8\x197\xff\x08\xff\x08\xb5|K\x8b^\xfc\x8d\x8a\xf8\x9b\x8e\xe2\xaf7\xedMsMG\x17\xfd\xe1\xf9\xe7\xff\xb0\xe8h\xc7\xc3\x0fw\x94\x85\xb9\xc3\xe3\xa2\xb40]?\xb2\xb4\x0f\x04\x98\x0efja\xcd\x145a\x04\x8ei\x8d\x96\xd0\xa6\xe8\x016C\xbc2\xf4no\xc2\x95\x0c\xa4\x93\xd5f\xa0\xec=\xf1\x96)\xf5d^\xcb\x14\xff\xfb\xf6v\xef qNc\xe3\xd4\x8e\xb4\xfb\xfe\x8bl=u\xd9\xe1\xec]?;\xa1f\xe2\xf9-E\xd5\x02~\xe4\xec\xf6\xe1\x1b\xcfn\xcfv\xf8\xd5}5\x81\xb8\xcf\xd7\x1c%\xbb\x9a\xeb\xbe\xf7\xfd\xfa\x96\xec\xe8O\xd5\xbb\xb3d\xeb\x8d\xbf=\x84Z\x02%\x01v\xb9\x8e\xe8 \xd3\x04\xb3\xd0\x14\x0dn\xafK\x97\xa0\x14u \xd0\xcc\x93*J\xa6\x84\xea\x9d#1\xe4h\x84u1\x1aHr\xec\xd0 o\xbb\xfd}\xff\x94\x16\xf5\x1b\xf5SZ\xe2u=\xb6\x8b\xeew\xa7;\xa6n\xcf\x97\x14U\xd8\x0b\xef\x82\x1a\xc4\xe3\xe4\x95\x13\xfc`\xa0F\xdd\xe7\xef\xc8\xb6\x9f\xbdE\x1cn?\xbb\xa5\xfe\xfb\xdf\xabk&\xbb\xa2\xcd\xd9l9\xa0Y\xf5\xee\x9f\x8e\x1e\xfa\xed\x8dd\xab\x86gA\xe2\x17\xc38NS\x0fG\xe1\x14\xea\xb3\xcaD\x9f09M\x91\x15AK{\x1d\xde\x80K3\xdd\x8a\x84\x02\xad\x1c\xaa\x10\x00[_]\x05\x92%W4z\xd6\xdb\x88u9\xe9\\~\xe5\xfa\xe5\xebn3\xd4\xc9\xea~\xf1\xc9zir\x96\x84\xf1\xe9\x0d\xc5\xe5\xf4\xa8\x93X3k\xe2`Ps\xc4\xea\xf8\xdc\xca\xfc\xe2\x92n\xc7\xe5\xef\xda=\x1bm\x11\xf9\xba\xc1]\x8b\xd4\x83J\xcb\x1e\x1bks\x84\xcd\xf3T\xd39d\x99\xd2\x02\xf5eYN`}\x9e\xaf/\xfb]U#\x14\xc6\xf3b\x98;R\xcc\x1c\xa8\x93\xd0\xc0\x17t,\xa2\xdal\x9f\x08h*\xf6\xa2\xd7#JB+\x1b\n(\xb4\xa1\x02iGx\\}~I\xd6\xb3F\xb7\xddv@T\xf7\xcbu\x86\xdb\xd6\xad\xb8\xeaJ\xa8\xda\n\x0b\xb1\xad\xf7\xcc\xff\x9c\xc3\n\x90@-L\x9c\x99\xa8\xe1w\xe4z\xc3\x0e\x12Y\xa7\xeeg\x97\x94\xfa\xd3\xe2w\x89`wx-\xf9\xbf\xd8\xf6\xb4(d\xd9\xa2]\x83\xd7\xc9\x11\xdbF\xcf3_\xf7\xf8X\x81cY\x92mQ\xd4\x83\xe7\x90\x0f\xdf\x1bW\x1f\xe6b\xa4-\x04\xa9\xd0F\x0c\x0e\x02\xa1\x81\x14\xea\x04K\x18\xf55\xd5\x08d-0b\x11\x83\xe2\xa8\xe7\x90\x83\x04\xc2\x97\xd6\xa8\x93T+\xe6\x91_\xefZ\x84x\xdcc\xcb\xff\x05\xd0j*`\xfb\xfb\xe5\x88}\x8f|x\x0e\xe2~\xb8L\xc7F\x90*\xdaS*o\xea\x9fM\xd8\xaa\xeaA\xad\xcd\xf3\x96\xb2\xfd\x1c\xeeT\x9e1p\xc7\x047\xb51?\x8bR\x0ct>\x0f\xe9\xf3\xbb\xb6R'\x1d\"\xfa\x90\x8b\xc0\xe4\xc6\x17\x14\xc1E\xc2y\xd3)o\x83P\x8f\x067\x9e\x94\xe6\x82\xc5\x11%\xab\xcb\xc0$r\x11\xc3v\x1d\xf5\xac\x0dQ\x9f\xbf\xfb\xbdeE\x94\xc7\xe3\xd8\x16\xe7\x94+\x06\x8d\xbd\xbbn\xf9zl\xdd\xe5Vl\xddFr\x05kt\xc9\xd6'\xb5\xa6'?R\x0f\xae\x84\x06'Z\x83CE\xd5I\x1e\xa7Ky\x03\xbb\x05 ga\xce\xf60\x1e\xfe\x8b\xfc\x17\xa1^\xe1\xea}\xa0p\x04E;\x85\xf5Kq{\xd2\xe1T/\xf9?\xefi\"%\xed\xc6\xde1\xce\xde\x92\xffb\xf1-\xde\xd4\xbeq\xc6\x9b\xcc\x19\xcb\xb5\x11\x83\xdb+ \xa2\x8a8\xc6]\xc0\xcarI\xb8\xdc\xfa\x94\xda\xa3V\x95{\xacd\xc8\xaa\x17\xcd\x9c\xb9\\\xe8\x95A\xcd\xf4\xd3Q\xd0vO\xc4S\xd5\xf5]0.\xdb\xea\xfaN\x03\xa8\xec\x15X9s\xb9\x02\xd5v\xa1b?O\x1cE~\xdaFPU}\x1f\x1co\x9e[Y\xeeK\x13\x19\xe5r\xc0\xe9\xd6\xf0\x0f\xf1\x0fA\xb9\xc3\xcc\x93U%\x87\x81\xa77D\x80w\x17\x0d\x1e\xfd\xea\xf6\x88q\xd4\x0bb\xe1/\xed\x91h\xc8\x0d\xed\xe1A\xd9\x14\x1a\xb1\x91hPbQ\xd8\x93JB8\xd5I\x0c\xba\xe4\x07?\xe1\x1aI\x06%=\x04\xc5X\x91t\xdc\x13O\xa5;\xb9(P\x1d\x91h\x03\xbaL\xecd\xa3\x06\xd1\x0dS\x01\x04 \x95'\x0eh\xff\xdd\xb1\x9f>|\xfe\xf9\x0f\xffT\xbb\x91\xac\xdcV?\xd9,O\x95\xdd\x03\x1c\xe7\"\\\x8f`\xaa7\x04\x89\xd5\xe3\x13\xed\xc1.\x8b2\xd0\xe7\x17\xdd>\xce\xe6\x0e\xb9D\xe1\xb2\xcd\x0d\x81f\xce\xd6m\x95g;\x9c\x9d-\xed\xf6\x84\xcc\x13C\x8c'\x9c\xa5\xd6\xe9u\xb9,\x06\xbb\xa1\xa5\xce z\x14\x97A`-\x02\xc7\xd9\xbc\xbc\xc1$\xd6x\x0cvc\x93\xc3k2\xb7\x06\xa6[x\x96p\\c\xdab\x15\xfc\xed\x93l\xce\x80\xddi\x1ch\xb5s\xb5\x9c\xd9iv\xf0\x0f\x15a\xc3\x9b\xc3\xea\x10M,g\x10\xc4\xa8l\xf1\x8a\xc1M\x9e\xe0\x96z\xd3\xdc\x1a\xb3\x9b7Jv\xde\xd4\xd9\xc0\xcb\x91V\x87RW\xcf\x8bN\xb3\x95\xdd\xe4o\xce4\x19\x89(\xda-\x84\xb5XB^\xd2Cl&Vn\xf4\xb1n\x9f\xbf\xbe\xc1n D4[k\x1c6\x9e\xb3\x86\x9b\xecN\x8f\xd7&\xb5\xba}f\xa7\x933Y\xbcQw\xd0@$\x93U$(\xc7\xaao\xb6:-\xfcZG\xbc\x85#&\x8b\xc5\x12\x86/\x1d\x85}\x0b\x96?\xc4\xdfN}\xc6\xa5\xbc\x827\xedA!M\xb4\xe0\xfc\x04\x08h\xf6\xf8W>\xa3\xe6\xef?iX\xb2p\xca\x1d\xfd\xf9\x03r\x9b\xbcA\x96\xd9\xa1\xe0b\xfe\xf6\xf3?u\xcf\xb1\x9b\xce\xb9\xb3-h\xf1\xe4\x0f\xd8\xed\xeb\xe56;\xbb\xeaS\xffB\xf5#/\x82\xb5\xe9@\xd1\xbf\x13J \xc6\xe9\xfe\n!%Q\xad\x08)\x04\x94\xc0Dq:{JI^\xde\x91\xcb\xa1\xf5PY\x04\x1a7UG\x01\xe7\xca\x1f(\x11\xc2\xc0\xbc\x9f\xf6\xa1h\xb3?Hm\xff\x1d\x1a\xac\x1a\x04\xc8\xd1\x8a\x0e\x12\x18\xe6vRE\x88\xedH\x8e\xf4\xe7\xd4=\xdcN`P)Q\x9c\x9f\xa5\x8e\xe6\x80G9\xae\xd3F\x1aM\x87\xd6S\xe1\x02MG\xa7\xf5@2\xa4\x04\x1cE\x1f\x11\x89$Q\n\xb5$\xc7s\xb1~\xe4\x92TkN\xb5\"\xd7\x03\x1c\x0e9\xae\xd5\x86\x1f\x018\xfbc\xeaF\xa4\xf1\x01^\xea\x10\"?+G\xd9\xa0\n^\x02\xf7*\xa6\x17\xbcgUlFVx\x8d\xaa\xdaU\x99poC\xa8\xb0\x9e.XC\xc6\xb5\x8d\x8a\xd7\xb5\xcd\x89\xd7q\xef\x82K\xea[\x1b\xafk[\x1b\xaf\xe3\xf6K\x97\x1e(l\x81\xc0;\xf6\x0d\xba\xd3\xa1\xedn\xe8\x19\x00%^\xf1\x9dR\xd4j\xad,$\x1f)\x16\xa7\x14 \xf8\xb7\xca\xec1\x8b\x82n.\xbf\x8f\xdfG\xf7:C\xd0\xeff\x8e\xd3\x16(\xdf\xf1,\x98\xed\xd0;\x9a\x84\x17\xc4\xb4\xa9\x01\xf4R\xc2\x97\xbfF\xeb\x9e_~\xf0\xce\xe3\x17^\xf8\xf8;\x1f\xbc\xf3\xa5\xd5\xab\xbfD\xbe\x7f\xf4;\x1b6|\x07/jGGSSG\x13\x19\x84\x9bG\x92\xd3\x8e\xc4\xbcD\xac\xf1zbR\xef\xa4/X?\xbd\xf1\xfb\x8a\x1f\xc3\xed\xf1U\xda\xc7p14u\x04\x02\x98$`\xbe\xdf[\xdf\x9cH47\xf5\x087\x11I\xf2~\xa5\x89~I\xff\x18r\xea\xdf\x99s\xf9\x17\xf9#\xe8\x9f6\x8c\xdb+\x91h\x84\xba\x15e\x80W\xf26@wK\x93\x14\xcc\xb8h6,\x12 \x871C\xb5\"\xe0\x9d\x17\x87\xb7\xe6\xa9\x16\xda=\xadm\xce\xe8\xf1\x90\x14e\xb0A\xf2\xd3\xf3\x8c\xe2\x96\x0b=\xbe\xe0\xf4\xee\x81@\x93z\x97\xdc\xd0\xd0\xe1\xb2 \xa6s\xd2\xf3\x13ls\xb3]\xf4;kkl\xb5\xea\xd3r\xac^\"s\x83\xa1\xe9\x9d\x92\xdc>\xd5&\xd5\x84\xac-[\xd7\xdf{\xc5Ji\xd2\xb49[\x82\xdd\xb5\xb3\xc8\xa9\xb9-\xe7]\xb1d\xde\xa2\xb5\xb2c\xd2\x0c\xb5An\xb5\xdb\xb9\x83\xe4g\xe7}\xea\x92\x87\x1d\xbc\xbb6\x1ehT\xef\x96\xeb\xfc\xc9\x96\xb4?3\x03s\x8f\xda\xdd^k\xad\xfa\x8cL\xeac\x12Y\x18\x0c\x05\x881\x1d\xebZ\x13\xb2\xb4n[\x7f\xf7\xa5\x83b\xdd\xb4\xbeE\xdf\x86\x8c\xe7\xd5\xa4\x03\xcdw\xac\xba\xe0k3\xd4f\x99\x95\xe5\x88>\x86\x0b\xfe\xfffM\xe4\x01\x10\xd5D\x82\xe8\x85\xd5\xa0\xb8a\xe5\xf1\x14D\xf0\xe9\x0c\x8f~}&\xf6\x0f\x08\xac\xc1@\xbc\xa35\x10\x8du\x0c\x04gn\xa8\x9b\x1eO\xc8\xa2<\xb9\xad'`\x0b&b\xd3\xac\xbc\xb1-6\xe0\xf7\xae;\x83\x1f\xc1\x0eX\xf7\"\x91d*\xb2a\xa1w\xd2\xfc\xde\x1eY\x94\xa6\xb4v\xd6\xd4t\xb7L\xb3\xf0\xa6X\xeb\xac\xd6\xb8k\xf1U\x00\x7f\xdf\xa9\x0f\xf8\xad\xfc\xb7a\xee\xe8\xd4=HR_\xc3@\x1d\xa6\xa3\xc4+j\x932\x97\xf6T*\x89\xc2\xa3\xe8%\xec\xd3/\xcd\xb8o\xc6\xa4\xa0\xbf\xb1\xd3y\xe1\x87\xea\x0d\xa3\x97\xbb\xee\x9b1\x83\xd49/7\x9b \xa0\xfe~\xc67\xe1\xfd\x8c\xb4\x1f_\x93\xdb\xf9o\xc3\xd3\xea\xd6+\x82\xfe$D\xfc\x1d\xd2\xafs\xde\x07IH\x9d:\xe7r\xa3 \x83\xf0 \xa4yiF:\xdd\xe5\xb8\xf0\xef\xe4v\x8d\xf7\xcb\x15\xfd\xc0(\x8c\xbfd\x13\xaeO\x1e\x19\":\x02\x0e\xa6\xa0om\xd8\xff\xc5\xecdM\x948\xe5\xd2\x0c;\xa4\x9eZ9u\xea\xca\xa9\xfcHCg\\\x19\x9bK\x05\x18/*\x89\x8e\xfd\xd4\x99\x8dg*\xbe-\xf3I\xe8\xd1\xf6\x97\xb1\xa2\x04\x88_\xc1E\xa2\x01\x9c\x8bRq\xee\x0eR'\xfb[\xa5f\xf5?G\xd5\x7fU\x8f\xbdA\x12o\x91vb A$\xffe\xf5]\xb9\xd5/\x87\xd4\xbf\xaa\xa3o\xa9?|\x83\xd4\x90\xdeQ\xe2m\x964\x99\x15G\xa2\x8c\x837\xf1G\x9983\x8d\xda3+\x0d\xbc7\x044\x87\x03z\x04*\x0f)\xa1$\xdd\x8bJ\xc0\xd8pD\xb5\xee\x93N\xfa\x08j\x195p\xd0q\x1c\x19\xbe\xb7\x93e\x8fDf/\x9d\xd2\xce>\xce\xdb\xfc\x89\xe8\xb4\xe0\x94%\xe9\x96\xc3\x0fg\x87W\xb5\xf8\xb8{\x85U\xf3\xeb:g,\xean\x8a\xaf\xb8l\xb2\xbf\xb5U\x91\\\x11\x9f\xcft\x99'\xd6\x13\xac\xf1%\xd2\xfc\xd1E\x82\xab}\xe0\xf2\x19\xcd\x9d\xedu\xa2\xfaC\x9b\xe8\xea\x98\x92\xcd\xdc\xbb\xdf\xbap\xb1}U\xcb\x15+\x17^b'\x8b\x8d\x92\xaf\xabo(5g\xe3\xbcV\xc3B\xd5I\x9c\xbc\xe4OE\xfcm>\xb7\xd1\xc0\xbd\xa9\xfe\x805y\x9azg\xf6\x86}\xfa\x1e\xc1\xa5\xdc\x03\xfcA\x94\xa9P-\xf8P/\xfa\x0f\x80\x11\x8f\xde\xab\x00\xec\x84\x12\xf2\xa0\xd56\xcc\x07)\x14\xa1x5/\x03t;1\x9a\x11p\x931\x94L\xa0\xba9\xf5A\xdc\xb3|\xf7\xea\xdd\xd1)\xfd\xb5\xe1\xe5\xeeX]m\x8d\x81\xfb\xf3kFE\xe9H\x05/\xf14}:\xb8,\x19oLM\xaao\xf2\xae\xbb6\x12]Y\xe3M\xf35\xa2\xcb\xea0u[\x9b\xdfy\xd2\xab\x88fV\xeah\xbe\x98?\xb8\xe4\xdeE-\x03A\xa7_i\xef\xab\x9d\xdf\xddj\x12\xb2\xf1\x1d\x0d\x19\xd4.\n6|\xde\xe55\x92`#\xcf\xd5Z-\xf7sv\xc1fq\x98\xd3\x9f\x9d\x9b\xed\xeas\xb7\xcd\x9a>\x18\x18\xa2\xe3\xdaw\xea\x14\xff\xa0\xee7C\x97\x11\x15\xe5{ A\x93\xfa\x85\x8a\xd2\xd6]\x15B\xbe\x1c\xeb\xd5\xeez,i\xf7H'd\xf9\x84\xe4\xe1v\xda?\x92`E\x87\x95\x9d\n\xfcx,\x87\xf6m\xb1\xb8z\x87`\xcdF[\xef\xea2a\xf5v\x89hp\x99\x0e%\x03(\xd4\xcc\x92\xf6\xfc\xbe\xca\x82\xc9\xfd\xd95\xd4\xa6;G\xda\xd1\x8e\x1bh\x94\xed\xa5\xb2\xb3\xb9\\\xdey\";|\"\xab\xd9\x9d\x16\xca\x96\xa9\x9drx\x82z\x16\xcbs\xe8P\x89HCT\xd7v\xa8P$\x85\xe9\xf5ly}\x87iyhvM\x8eC\xf9\x85r)\xdd#\xc3x\xae\x9b\x08\xbf\x04-\xdc\x13.(\xf3t%fu\xbb\xf4\xca\xc2\x80(\xdb\x85e\xc1\x95\x16UU\x96o\xe2\xb2\n\xa5p\xb4\xd4qe\x1d\xd1\xcb\xa1\xe5\x95\x97\xa5\x15s\xf2y\xb8i\xba X\x96\xe6k\xa5`\xc9>\x1e\xa3X\xaf@2P\xf8\xaf.\x0d\xa82\xcd\x8c>\xabn\x84|\x91\xca,/4\x97\xa3\xd4\x1a\xfe}\xc2\xf2\xa0\x0c\xae\xf0?A\xed\xb8&\xcdJ\x9f\x16\x86\xc5\xe6r\xa7+\x8f\xad\xf0\xc3\xc9\x90\x8f\xf1\x1dCV\x93]{\x8e\xe8\x16\x17\xecZ\xcd0\x00- \xfa\xf9A=\x96\x1a\x08\xe4\nF\x03\xf8\xae$\xeb+\x94\xd3\xf6\xf1%U\xb8\x15Z\x8dy\xa2\xad\xc2\xde\x97\x8e\xb0\xd9\xb2R\xd5\x0d\x05\xb7B\x83\xba)\x03\x14\xbf\xf7\xa5\x8a\xe2wT8\x16\xfa\x1f\xc1\x12(\xe1\x1fa\xd9\xfbR\xc1\xce\xa3\x1c\x03*-\xc0\xaa\x97\xc6\x0f\x1e\xd9s\x1a\x80r5v\x0d\xea\xff!^tZ:\x15/\xc7K\x15,'\xb1\xc2\xeaF\x0d\x1e\x0d9\x80\xbb=\xe3\xda\xe6G\x88<\x00\xa7\xb8\x07C\xed\xd1\x1au\x1b\x93\"$\xba-\xbe\xee\xb2F\xf7\xf3S2\xe7(\xf3\x17\xc9F\n0Q\xa9\xdc+X\x90\x9a\x81\xf0w\xc8,\xbb]=b\xf7h[q\x9eB\x8bQI\x92\x06\x0c\x0d\xfe\xa7\xfa;)\"\xc1\xc5\x8c\x9a\xd39\xdc\xd22\xe3\x9a\xf46\xc3r?\xe7\x1f\xd3}l\x8a\x06V\x0c\xc7=b\x10\xb8[\x98\xb5\xa3\x11j\xaf\x1e\x08\xa24\x80Az\x94\xf3\x99\x8c\x1c\xf7\x10K\xdak\x1c\x12Q?T\xff\xc2[%\x93\xc9$\xb0K\xa2Q\xf5-\xaf\xd1l_@l/ &;\xff\x98\xc9\xec\xb0\xa8\xcbD\x12r\xaa?P_\x13d\xa3E1\xfd~\xfdz\x97\xe2^I\xba~b\xb0\x17\xf7\xa7\xd4r\xdc\xe8\xbce\xa1u\x8a\xbcf\xb9\xadP\x95/\xcd\xd5\xba#\xdc\x0cE\xff+\x9aS\\\xecG\x7f\x1e\x87\x1c\x1d-\xd8R4\xa8\x8d\xab \x91S\xae\xf3\xd6S\xae\xf3V\xe4\xbf\x91;\x0b\x9d\xce\xc8*\x11`\xdfG\xb8\x14\xe9*5'\xc7\xe4\xfbd\x99L\xba\n\x98\xaf\xff~\xa4\xda\xc6\xea \xe0\x855\xaf\x94F\x9a\x18\xd2\xc6hb`\xa0\n\xbd\xea\x81\x9c\xfa\xb3\xc24\x80\xfd[b$~\x0f\xbfG\xe7\xa3N\xcfA\x15X$\xf2\xdb\xf7\xdf~\xf4\x82\x0b\x1e}[\xbb\x91W\xdf}\xe5\xea\xab_\xc1\x8bz\xc3\x976m\xfa\xd2&~O\xf1%\xde\xe6j/\xe1r\xcd&|_\x1aS\x88\x8f\xc6\xd2y\x13\xe3<\xe7\x15\x9d-\xe2*L\xcf\x9b\xf8\xab\xdb,\xa9\xb7JQ\x9bz\xbb\xcd\xa4\x9c\xf0\xab\xb7\xa3\xd9\xc2\xc9\xe7\xd1|\x92V|\x8fGVW\x02~\xf6\xcb\x1e\xcdz \xb7\xfd\x0b\xea\x14\xa8\n\xe0H\xa0\xaf\x1f\x9c\x11\x86\xf3\xb3\xa8E\xc9 \xa4\xba\xb2Y\xee\x18n\x91\xa2\xe7H4\xfbr7P?99\xad\x1f\xce\xe6\xdf\xa1|O-\xb5\xb7\x145\xb3\x8a \xb2%\xe34\xb5 \x1a\xc7\xb3\xeaO/4\xf9L_Ps\xe5\x05\x92\x11T\xd4>\xbfLQ\x9b\xc4D(\xfa\x0b\x98\xda\xf4\xf9J8\xf2\xffF\x15\xb5\xcf+)jCb\n\x9bM\x10u\xd82Xc\x118$\xf1t\xb0}\x9c&\xa7@\x80\xabQr\xbc-\xda\xdc\xd6\xa4\xfe\xceU_o6\xc7\xd5\x1f\xcbq7\xf5P1\xea\x8e\xcb\xa4+\xce\xdb\x02\xber\x1fc6\xaaI\n\xeb\\ \xea(*v\x16\xb42\xbc\x814Uc(\x11A\xfc \xcc\xa39\xfa3\xf6\x8c\xe6]\xc7z\xf7\xd8\xfc\xbb\xad\x05\x0f;0'\xa1\x1b=\xe4\xdb\xd1\x01\x1d*,e5\xd56\xaa\xbbV\xd4a,\xccqh\xcc\x12*\xf4\xeb\xb2P@w\xc8\xac\xb0G\xb2\xb8/\xd3O\xd6j\xf7|\x1d\x12\xcc\x10F\x05Im\xc7 #Pz\x19\xeb\x1f;J\xa8w\x03\xca\x8e\x15}\x9e\xa0<\xa0\x91\xfa\x0b\x9f\xc0\x0f\x06z\x0cT\x12\x8e\x00\xfa\x07t\x04\x04\x87\x88~\xa3`\xc2\xc8\xb1GP%;?\x0b\xae5(\xde(\x08u\xa8\x94#\x0b\x94\xc7\xc8v\xd5\x8aI\xc8\xf1\xed#9,?G\x9d\xf9\xa1\xacb4K]\xfdQg\xd4\x9f\x18\x0e]\xd2E[\xe0\x0cph\xca\xaf\xa7\x87\x02\xdbG\x17\x9b\x14\xa1\xe0+`\xa0\xdc\xc4\xc4\x98p\xa0?\xc5@\xe1>!\x0e\x10\x03\xda}\"\x14\x14\n\xcd\xd2\xbd\xb8\xcar=\xd4C\xc0D5\xef 62\xbe\xa6ZY\xea\xe8\x04\xe5\x00\x05\x00?\x13\xe0\xd7\x0b\xb3\xd6i\xf0\xcbA\x8d\xa8\x8b\nT\x13(\xf8E\x0bU\x95Ju\x1a\xb3;\"}\xa9\xd8\x0e\xd5\xba#\x96\xea\x8bL\x88c\xa8\xe4\xca\xe9\xd3\x97\xa3V\xe4\xf2\xe9\xd3W\xfeO\xfb\x9b&\xd9CI\xd4\x99\xfa\xdb\xc7u8*\xe7\x83\x9eQ\xe6\xe7\x07a\xf0Q^*z\x1a(\xa8\x7fL\xad|J\xd3\x8f\x87\x01\xbd^\x8ef\xa9p1\xbf\x0f\xfb\xf5\x840\xc04~\x9c\x02\x90C\x88\xb3Ux\xa8\x07\xce*r\x9aV\x07\xb2*\x8dN9\xcf\x80\x84\xd7\xb3\xaf\x17P\xf0\xfa\x84\xc5\xab\x1bs\xf1\xdcp\xb6\x88\x01\x9c_L\x8c\x89\x7f\x8a\x7f\x8a\xf2\xcd\xe13\xd9Z\"}\x13\x88&\xd3\x01r\xf4O\xbf|l\xf5\xea\xc7~\xa9\xdd\xc8k\xbfC\xbe\x18/Wj><\x11\xcb\xfcS\xc5\x04x\x1b\xd4\x12\xc0\xe5M\xeabS\x93\xaf\x96\x97\x8d\x1a\xfag(]\x11\xbdJ(Z\x12#\x9f\x1e\x86x\x16\xa9\\$\x18O\x0fC6\x0e\xbf8-\xe0f:{\xea\x83S\x8f\xd2\xb3\xe8\x9a\xa8o\x16\xfd4:\x9c\xd3\xcb\xdc)\x17\x8e\xa5\xcb\x10Wb\xbc\"u\xc5iu\xb7h\x8e\x8e~\xbdd\xe3\xca\xe9\xec%\xfb\xaf\x1f\xaf\xd5B\x8f\xb1\xfb\xf3AM\ns\xcd\xe2\xd9WH.gv\xff%\xf9\xe74\xfd\x96\xe3v\xf8+\xb8\x9c\xa7\xf0=\xc2\xbf\n\xf0\xd5S\xeaG\x87\xcf\x8bjWH\x7fW\xc0\xe6\x1d\xe7u>\x85\x96\x7f\x7f[\xffB{[\xe7u\xfb\xc9\xb6s\xd5;la\x9bz\x9bi\xf1\xd5\x08\xddW\xdf\xad\xd4\\z\xbd\xc6\xe5C\xf0\xe4\x83\xdb\xd6|\xa5\x9e\\f\xb7\xab\x9f\xd7\x00te\xbf\xba&\xff\xb9\xdf\x95+B\xfdk\x8d\xab\xe8/t\x90\xbf\n\xcf\x1c\x0bC\x0fM\x90\x84 /@\x12S\xc3>Tm\n\xb1G`v\xfa`?\xfe\x8d\xaa\xa3\xf4\xfe\x01\xd9\x15G\xc6\x12(\xd9,zb\"\x0b\xd5\xe7\xf0e\xb6\xb8\xfcA\xde\xd7A\xdei\xe0\xff\xbb\xb4\x9a7\xd0\xd3\x0e\xe9Q\x81\xc1\xa8R<\xc6\"i\xa0X\xa0\xb6:\xb8I\xdc\x8b(a\x87V\x7f\xbe\xf6\x9c\x1b\xfa\xe3\xa6\xcb\xe7\xa7;4\x07R\x1d\xe9\xf9\x97\xf3]}\x97\xcf\x17\x9c\xec^\xfe\xe1\xab\xcf1\xdcv\xee\xd4\xb5\x8d\xfe\xda\xf9\x977\x9c\xd4\xca=\xd9p\xf9|\xa7[\x19J\xce\xbf\x9ce\xed\xc2\xb5{)\xad\x17e\xdc\xfc\xdd\xfc#\x8c\x83ief0\xf3\x99K\xf0\x14J\xe2q\xb2\"*\xf6F#\xac\x13(\xa9\xb8G\x08jJF\x8dh\x01\x8c\xb8\x87X\xe8#\xd1\x88\xb7\xe2\x05\xb5\x90\xa3\xf1\x1e\xdd\x8dk\x9a\xbe\x97\x085E\xd4R\xa4P\xcd\xce\x8b\x1b\xe3\x86 ^p\x1f\xa0C\xa9eo\xff\xea\xede\x85\x80:\x95\xe7\xaf{6\xc8\xdb\xac\xcd\x1c\xef5\xd4\xcd\x9ds\x91\xc5\xc6\x998\x8bX\xcf\x0bK6\xd7\xf0V[\x90\xe7=\x06\xe7\xd9}V+\x8d\xffh\xcd\xa7\xd7\xc0\xdfJ\x8c\x9bl\xd1\xe2\x8cZZ\x9b5\xc8\xdfW\x91\x89\xb1\x94;\x14\xae\xfeT\xfd\xe9\x86e\xabV-\xdb@\xdaH\x1b\x86\xeaI\xf0\xd9\xeb\xac\x1e\xceD<\x17\xcd\x99[\xc7)\xd6\x90\xcd\xc0\xdf\xcf\xd6l^b\xc3Xe\xf2\xd9NN\xb1\x84\xac\"\x7f\x02K]\xa3@\x8c\x83\xd7b\x8a\xa9\x9a\xcb?.\xe6H\xf7H\ngzXa\xc6\xf0\xd0\xd9\x92A\xee\x13\x88}MO\x05\xa6e\x0e\xfdX\xc2\xfcH\xa7N\x81r\xa0\xd0\xf3\xda\x9fW\x0f\xa8;\xf1h\x10t\xf1\xbbgttO\xf6yu3=\x96\xc2\xfc*\xd7\xa4\xee\xcc\xe5\xd8\xbf\xa0C\x01\xa0\xc2FGsh9J\xee\xcd\xbdZ\xb0\x1b-\x94k\x82\x92]L-\xd5~\x0fh\xceii\xa1.\xea49\xcdQr5\xa6\xf1\xbdI,V\xdd\x93\xff\x85^jf\x0f\x94\xbb_}\xd4,\x93\xed\xacQ6?\xcc5\xe5\xffNV\xe7\x7f\xc1\xcf\xd5\n\xde\x9e\xcb\x8d\xaaY\xd9\x9cN\x9b\xe5%ez\xcb\xdcq\xc6\xa8\xef>\xc1Z\xe8\x0d\xa0\x93Nt\xf1\x8a\xd31\x9a \x00a\x0c\xd5%\x01\xfe\x9e=\xe8 y\xcfh\xde\x99\xab\xab\x0dH\xb8\xc1\x97\xcbJ\x81Z\xf6\x9a?\xfd h\xbdvr\x9ck\xca\xc1@\x1b\x1d\xe5\x1dm\xcdY`\xae^ins\xf0\xeaF\\\x94*\xf6\x08|L\x9cz!/?\xb7)\x14(\x930\x0b\xc9\xe2\n\x15MS4(\x9a\xc8\x97h\x1e\xf0\xd8{\xb2\xba\x99\x92\xe6\xf1-\xee'\xd7h\x8e\xe6\x89\xebo\xea7\xfbcC\x19\xd2\x9e\x19\xca?\x0f\x8b6\xb2\xe2\xf1\x92\xb2'|ub\x18\x88\xd5\xa3@\xb4\xfe\x8d\xcc!\xb3b\xfd\xc3\x99\xa1\xa1\xfc\xbb\xd0f{tz\xf8\x08\x851U\xd2A?=\x8c@\x9c\xe1\xe1 \x05\x19t%\x98\xe4\x95\x89\xc0\xcc\xe5\x00iu\x9a\x93[\x0dN\xf2\xc8i\xe1D\xc2\x0b\xb1\x88G\xddT@\x8a:\xcfp\x15<\xfc(\xf5c\xd3X\xe9\xc9\xc6Um\x02\x8d2\xda\xcf\xb17z\x9b\xdaO\xf2\xedM^\xf6F\xcf\xb4YUfwGs\xfc\xc1\x93#\x91t:\xc2/\x89\xaa\x83\x95\xb6\xce\xe5~\xb1Os\xde]\xb5\xc7F\xd7\xff\xcf\xe7\xdd\x91\xaf\x9e(\xfa\xfc(^\xc8\x17\x8b\xc1\xb1?L\x14\xfb\x8f$\xc0S\x04\xca\xbd\x85\x0cWzT>m\xec'\x7f_\x1a\xa7\x82\x1b\xfad\x8c\xae\xd0\xd4\xe7:\xa5\x0f\xc4\x005\xaeL\x04h\x7f;\x03\xafH7\xdcWgz\xeag\xc4Z\xc6\xc6Zb3\xea{2d5\x90Jj\xa6\xc49\xdec+\xe3\x1c\xf9\x91\\vqz\xe7\xb2Db\xd9\xce\xf4b\xa0\xa9\xc6\xb6\x8fg \xf9\"l\x02@\xd7\xa6\x9dp\xe6QB\xbdb\x00\xfd\xdbS Q\xed>\x11\x80\x05\x93+\x18d \x81p\xb2\x1a\xbe\xee%}\xafL!\x93\x9d\x1c\x99\x17\x13\x83\x87\xf2\xe7\x1e\x0e\x9acd\x1ewH\x03o\x98\xb8\xc6\xe6\x13\xd7p\x19\x80x(T\xec\xbep\xc4\xe8\x08x\xee\xbf\xdf\x13p\x18#\xea\x17\x00:dvQ\x0dq\x9f\x0edA\x1f\xf0QFd\xdcL\xb3\xa6K\xa4m\xd2PR\xcb\xed\x0d\xa4pU?\x03\xf2l\xc3\xeb\xd6\x0d\xaf\xfbzg\xb0-\xaa\x1e\x89\xb6\x85\xd4\x1fjP\xd9\xee\xa9b\xb7\xf9G\xd7aR\xf5\xbf\xfa&\x0f^q\xc5\xe0\xe4>u\xd38\xa4\x95p&\xc3\x1a\xd3\xae\xd1\x84\x0d\xa4`\x0f\xedMGS\x9f\x14\xae\xa1\xae\xf3\x12\x1b\x1e\xd9\x10\xdc\xb5ao\xb0\xf1\xe3\xa1\x13\xdcW\xdc\x9bZ\xb4a\xc3\xa2\xcc\xd9\x9f\xdd\xb0\xb7V5\x9f\x01\xcaR\xbfs2NX \xdbqGB \xa0\xbeO\xa0\x93\xcaK\xd2g\x02\x0c\xe9\xf4\xf0\xc0\xc0BW\xfe\xfd)Sg\\\xf5\xd9\xf9\xea\x9e\xd3\xa1l\xe4\xcb\xf7\xf6]z\xf6\xc0<\xdf\xb2o-_\xfe\xe5-\x0b\xf8\xb1\x9b\xc6A\xa3\x9fKMq\x16\xd3\xad!\x1a\xb4\xc3\xa6Si\xadgy\xf1\xfc\x01\xfd\xbf\xc4\xdb\xb0]K\xfb;S\xea\xc6T'\x90\xe3\xa9\xf3k\x0fPq\xc9\xe6e\xea\xc6e\x9b7c\x0fZT{~*\x1e\x877\x8bb\xea\\H\x85?\xf0j\x19\xd9\xb5l3\x95\x8fP\x1d\x0d\x9c\xef\xd0\xbew\x1c\xefT2\xb2\xf0j\x8aY\x01\x11;\xd6\x00)\xbal\x0dD\xcbueytO\x0fT\xf8\xef\x8f\xdaj\xc3\xb6\xa1\xfc\xf1U\xb6H\xad\xed\xa8\xfa\x1b\x9cX\xf6g\xc9\xac,\xf5\x8fW\xb4\x05\xc9\xcf\xa2^\x14\x8e\xda\xc2u\xb6\xa1![]\xd8v\x94F\xe8\xe7\x14\x94|\n\xe3\xaeQG\xe1\xach`(\xa8# \x83R\xbc'5X\xa9D\xa7\xcc\x07Q\xa0\x18\xcaqM\xcb6g\xfbc'b\xfd\xfau\xb3\x03\x05:'\x97\xf4\xff\x99\xaeH(\x0c\x84\x1b?\xb8y\x19\xd5\xb5\xa5\xfa\xb6\xcb6\xa3~.\xf2e\xa8\xac[n\x16 \xb8\x03\xaa\x8a\xff*\x7f\x80\xb1\x01U\xabyZs\xc3t\xa09\xef\x8b\x9b\xb8R\x7f!G\xd9\xfd\x8d\x8d\xb7\xa9\x8d\xf9\x93MM$\xdax\x8ez\x80\xac$]\xea\xd3{\xd0\xc3\x1dL<\xfc}\xf9\x15\x8d\xe74\xde\xc6\x1a\xe0JZ\x1a\xd5\xf5\xea~\xd2MV\x8c\xd5\x95hy\x16\xd7 >@u\x8d\xbb\xca\xed\x07\x0d\x9a\xe5\x85\xee\x13\x8d+\xb3\xbf\xc6\x03\xe8\xf4\xf5]\xe1\x03\xdf2FqO8j\xfc\x96\xd1\xa5\xb0WC\xff\xbbQq\xed\xcb\x1f\xa4\xebrw\xae\x8b.\xca\xec\x9d\xe4\x84\xab\xbe\xde\xa5\x1a\\\xf5\x9a_\xb4\xfa\xfc\xa7\xe3\xf4\x15\xf6y\x9a\xb6\\\x07\x19O\xbfn\xd4)I\xcdKGR\xa7\xeaH\xc5\x81q\x94\xb8\xdcI\xda\xd1\xc5\x06.\nd+\x10u@\xcf\xb4\x17\x1c\xf5\xd3\x08 \xea\xbek\x96\xd9\xc5\xa4}9\xa5\xeaT\xabv6\x1d\xf6*x\xa4g\xb6e\x057?\x97\x99\xc3\xac\xd4}\xf5S\xa7\x02\xe9\x14\xd5-\xf0\xa0\xed\x13A\x17U\x05\xdb\xfc\x01\x82\xed\x1f\x15\x0bO\x11MlJ\x0d\xd5p\xdb\x11\xed\xd5\xaa\xdd\xa7\xfc\xee\xd5\x05\x9fYw\x96\xd5\xe9h\x9ci6\xfb\\fA\xf6Zc,\xb7rjF\x9cT\x1f\xe5\x82\xd0Mj8kO\xeb\xab51\x8b\xfeT\xbb\xbc\xfdqW\xbd_\xd9n\xe9\xd8`\x17\x957\xae%\xb3K\xab\xe8\xdc\xe9W\xdcs\x96d0\x07\x9a\x82\xe1:\xbb\xd1`\xb4\xb6O\x1dX\x95\xde\x1e\xf9\x03\xd2\x1fs$\xe34\xbc?:\xffSI1\xa2\x00\xa2W-\xbePr}\xea\xc2\xb2\xa3\x8a\x0c\xa7\x1e9\xf5.\xd4\xfb&\xa0P\x9a\x99^f\n\xb3\x148(\xbaW\xa1I\xa2\xc3\x05\xdb`\x1c\xac\xa2`@5a}\x88z\xb5\xeai\xfeV \xbep\x84\x01\xb2P\xd4\xbd+:\x96\xa3\x12d\\j\xc3\"=\xfca\x80j\x15\xa3\x8a\xf0\xad\xe4)W\xa7\xd4$q\xf6{\xc7\xda\xd6\xcd\x9c\xd7p)\x97V\xfc\xf5\xae|\xa37hj\x9d\xac\x8e\x02\xcd\xf6\xe2\x12\x97\xb3\xf5\x0e\xbb\xa5\xc9\xe2$\xb7L\xeb\xe8\x98\xd6\xa1\xca\x0f9\x9b\x1a\\\xf6\xfa\x8e\xda\x81n[ \xb1\xa4k{lG\x8b\x89\xc4.m\x0c\x84m~\xc7T\x97\xc0\xbaE\x8b\x9d\xdb\xf9\xa1\xd1b\x8f\xc8\xad\xb6m\xb9`\n\xbb\x97\x12\x11w\xf2ny\xe6P&\x97\x19:P\xafLJ\xd8\xed\x96Y\x8e\xfa\x7f\xe9\xc0\x8c\xd5_\xeep\x99\x1dNW\xa8\x89\x13\x9e\x9bzV\xdb\xec\x16S\xd7\x83]7\xd6\x0f\xd5E\xdad\x93%i\x0f\xb8\xe7\x99\xac|\xf1\x0c\x03\xbe\x0f\xfa\xe8\xd9\xccE\xd4WM\xc1\x93\x99\x077r\xe2\x0d\xc0\xdeHB\xa8\xb4\xcb6\xfe`UG\xb9Z\x88\xe6\xce\x00\x0d\xf89\xa0N2l2\xdd\xdb\xc9\x85HY\x98\xbd\x1a(\x9a\xc5\x97\x11\xd2\xd4\xd9iw\x18\x9c\xbd\xdd\x93[\xfd\xde`\xaacZ\xa8\x96R;Yz=Tr\xd2vH\x8f\xfe9\x11\xebc.\x0b\xa1\xd6\xb2\xeaG\xe4\x8a\xd2\x13\xba6\xbb*p\xd5\xfc\xce\x85\x1d\x8d\x06\x03'\xbb[\xe2\x8b:\xfa/\x98\xd2\xaaX\xd8\x05\xa5\x9c\xb4CY\xd1\x85M\xf1\x98\xd6\x05t\xcc\xd0-'\xba]\xa3n,{@\xfc\xf0\x0c\x0bc\xf8\x19\x98Ob\xb8\xfe\x15\xe6\xfe\xc2I\x1f\xd1N\x16\x87.\x1f\x94x\x19\x17\xc1N\x81\x0b\xb9F9\xeb\xca\xeb\x9b\x9dN\xff\xadK\xf3\xdf[\xba\xa3\xdeX\xd3r=\xdb\xf4\xe9\x1bW\xcem\xf3\x1b\x0c\x8f\xb0\xdd\x8f\xb0\xc6\xa6\xd8Y+\x02\x8f\xbe\xca?s\xfdJ\xf5\xa2\xc4\xd7g\xccX\xb5u\xcbP\xfd\xb9\xaf%\xc8\x97V^\xbf\xdb\x1b\xe9\xe8\x08[\xad\x7f\xf9\x8b \xb7\xb7\xf4\x84\xd4W\x88\xa0\xaf\x7f;\xe9\xfaW\xe6\x0d\xb3x\x1ev\xb1i\xc8/\x87\xd7\x1eX\x1eS3\xb1\xe5\x07\xc8\xbc\x942\xac\xa4\x04\xf7\x01\xd4\xa9Z\xbe\xc1\xdb\x0cf\x03\xd72\x9d\xd4\xe5/y\xf2\xc9\xcf?\xf1\x04\xed\xbb\xe48\xf0\xaeM@\xd3Q\x0f\xd4\xce*\xcb\x84\xc2\xfd\xc8\xf6\xa1\xa1\xedC\xe4X\xf9\x8dk\x1a\xa2\xf7\xb2?M\x0fz\x17T\x00y?\xb1\xe0\xd5Z\xcb\xc3Yu\xd7\xb3)\xff\xe3\x07\xe9]\xcd\x95\xdf\xf41\x96-\xe3\x97\xfea\x8f\xff7j\xf4\x82~\x94\xcd\xc4\xe1\n.d\xe3\xc4\xe1\n\xbb\x18\x0d\x96\xc4'\xd3\x18\x1c\xb7\xbd\xb8\x9a\x1c\xa7\xba\x93V\xd4\xf8z\xa3tXK\x7f\xfe\xd92k\xcc\xb9d\x17?\xa2\xa7z\x82\x1c\xa7\xfaz\xa5\xb4\xd9\x02K\x8c\x16.\xba>,\xa1\xb8BZ\xa6\xbf`q\x84\x1e\x97'\xbak\x96H\xe8qy\xa2\xfb\xb0\xe6\xa45\x1e\xaf\xbej>a\xc7\xc5\x03\xc5\x05\\C\xd9\x7f\xd5#\xca\xe7H;#p\xc7\xf8\xbd\xd07l\xba4\xbf\x15}\x94\x03\x0b\xd6IR\xb87\x16\x8c\x84\x01\xb0\xde\x8a0\xa7\xfb\x9c\xf0$\xe2\xec=\xab\x06\x06V\xc1\xbf\xfa\xd1\xf6\x91\xed#\xec\x11\xb8\xe4_.\x06\xd1v\xaa\x85s\xf0{g\x1e>\x83\xc1h!\xad\xee\xdaA\xb7b\x0e\xb9\x13/p7\xbc\xd0\xee\x0e\x99=\xdcz\x97\xc1\xa9\xbemi\x94%\x97\xcd\x9f\x9a3)^O\x9aj\xbc<_\xe3U\x8f\xb6NY63dsIr\xa3\x99\xb48E\x97\x10j\xf1\x18\xac\xdf\xfd\xaeU\xf0\xb4\x84*\xcf\x14\x0d33\xcf|v\x0b\xbe\xe3\xe8;\xd4\xdbO\xc6\xe9\x1b\x0eB@\xf1\x9d\xf9,\x97\xfc,\x9f\xd1\\cwd}6k.\x9eu\xd8k\xccF\x1f9\xfe\xb1'\xcc\xe4\x9d2\xd06D]e\xb1\x91x\xdcG\xbeJ\x04\xb3K\x1e.\xd7\xbd}\x86\xf1S\xab\x7f\x86$\x83@\x0ct\x11\x12\"\x9a;2\x00\x1f\xc9\xa9\x00\xea\xb9*\xa4\x8e\x94\x8c\xdb4\xa7\x1e\xdc1_\x83\xe4\x11x\xf57\xb3\xd5Q\xa7bj\xb4X\x88\xa3\xa7\xdf9\x9b\x84\xa7\xda\x02Q\xf5\xbd;\xae#\xcf\x16\x8f{9\x86\xd4e\xbcI\n\x9a-\xec\x7f\xe5\xa5\x90br B<\xde\xd69\xf2dpz\x9c\xdcIV\xaa\xfb\xf3\xffQ:l\x84+\x1b\xebs\xe9i\x0c\xd5#=\x14\x14\xd1\xecT\xac\xf0\xac\x81\x07+R\x98\xd1(\xaa\xe2M\x14\xdeDC$\xe2\n\xb9\xca\x0f\x1da\xe7\xcc\xb1 \xeaO\x1dNg\xc4j1\x11\x07\x9e9\x98\xb6\x9b\xcd\x03\xc4\xdcgqXk\xe8\x19\x84}F\xab\xc5\xe8\xf0\xf9d\xa2\x1e\x15\xa4\xd7\xd6\xd2c\xb6\x8aG\x93,\xb5\xe6\xe5&\x97\xd3.\xd8\xe2\x98\x1a.^\xc9\xb7wwc\x8d\x83>\xc0E\xb4\x12_]3\xfb\x9eU\x15\x88\xb1|\x1e\xe3t{J\x06\xd5f\xaa\xe7\xaa\x82u_\xf9.\x17\xba\\\x81\xff\x1d\xba\xfe*\xea\xf6\xb2W\x95=\xcc\xc5}\xd4lN\xe7\xd5\x14\x8do+\x1d^\xae\xef\xdfV\xcc\xa3\x13\xee\xe9\x91 v\xd8P\xa3>~\x86\x82s\x08\x1a\xa2\xa6T\xb6j\x1dWz~_\xb6\xa4o\x87gS\xd0}-\xad\xfcD\xf1T\x1bd\x82\x0b\xdd-T\x0e\x17\xc0Aa\xc8\xeaYf\xb5\x02\xb2\x05\x98\x1e\xc73,PATcm\xb2\xda\x0d\xd5\xbc4g\xe3\xb8}\x82\x1b\xbd\x95m\xe0E\x1c$B\x8e\x84w\xa2\x9d\xc5\xaa8\xdc>\xab\xac9\x1a\x8f\x96\x18\xe4\x91\xec\xb8JW\xe2\x81\xa9\xb0O\xa5\xf5/9\xffP\xaaJC\xc9XA{,\x99@c,t\x0eE\x05J\xaf\x14\xcbT\xc8j\xbd\xbe9\xec\x00\x958Q\xca\xf1\x95\xd3\xf5\x08&\xa8\x7f \xe4H\x08\x9f\xa0\xfeP\xc1l~K%\x06\xc6\x9e\xf9\x071\x80\xbe\xd1\xbb\xb9\x0d\xfc-\xe8e\xc8D\x0cz\x1d\x13xN\x81\x9b\xbbXuz\x9d\x92\xdd.9\xbd\xe4}\x89M\xc2c\x9d\x0f&\x9c:\xaf\xea\x94Z5\xbf\xe3\xd3\x998\xb7\x13\xf8%\x0b\xd5\xb5\xd5\xbd\xf8\x03m\xb3\x98\xbc\xefom\xf5CB\xf6\x06\x08:\xd0\xe4\xd6\xe1\x87l\xb4\xef\x988\x99\xce\xdf\xc4\xed\xa4~\xf3\xcb\xdc\xa6\xa2E\xb2\x03\xbfj\x95T\xa7\xdf\xc1\xed\xa4YH\x98Y\xe1\xbbv\xf8n\x83V^IN]]\x08\xc2\x8e\xe5CXkg#\xc5s\x0bc\xc0S\xfb\x88B\x92\x1d$\x89\xc3\x9d=\x92\x1f$\x8f\xaak\xf8}cG\xb9&\xf6\xf7/\xa8\xdfz\xe7\xbb\xdf}\xe7\xe4\xbf\xe7\xca\xc6\x07\xb5_\xee\xd1v6\x14<\x81\x127\xb8\x13\xbd\x19\x01\xfd\x13\x18\xb4I\x1aVGG\x02\x99\x9d\x04\xfag*l\xf4\x9f\\RXS\xb2T\x8b\x8b\xae)\xd9\xc2\x9a\x02E\xee\xa4\xca%Y\xa0u\xf4\xf3\xc1~Q~>X\xa4\xb8\x88\x14\xcf\xd0\x85\xb1\x1a\xd6`9\xd3\x1aW\x82\x93k*\x87@_\xd5\x8a\x10pM\xb8\x03]0\xa6*\x83\x81%\xe3a\xc0\x933\x11X\xed\x8c\x81K\xb9M\xfc|\xf4{\x86\xa3F\xd4\x94\xbb\x83\xfa\x91\n\xec\xb7\xbed7[\xd5n\xf2\xa6\xd5l\xff\x92\xcd\xac\xc6D\x91\xfc\xdc\xcc\xcf\xf7@\xa4\xdam\xf5\x98\xc8\xcf\xf18\x8b\x9f\x9b\xb0\x0fe \xafc\xc5\xbc\xf4#\x07\x90gH\x9b\xd0\xc4d\x19d@~.\x8aj\xccl\xc3l\xc9\x9b\x90\x03\xe4eRcx\xeeE\xcc\xe4\x81(( \xcd\xed\x99K\x81\x9e\x99\xcfm\xc2\xbc\x18\xea\xefG\xebX\x94A7\x11\xbe\xd7S\xf5\x15\xb7\xb5\x1c@[\x01l\xda\xd7.%\x19\xc8\xeb\x18\xe6\x05\xd5\xa3n\x01MDs\x07\x88]n\x1d\x80_Q\x1f\xee\xb7\x95 \xda5\xc0i?z\xfb\xd4G\xfcT\xe8G\x023\x19\xb2\xa6T\x9d@e \xe8i\xb4\x81\xd7,\x88\x83\xb0r\x02\xb8\x9d\x1c\nO2<\xb0\xd0\xe8\x95\xe9\x18\xd2l+\xb3\xc0\xb0/\x16,\xc1\x96%\x89\xb9\xadm\xb2\xe0 \xbc\xd3\xdb\x9aX\xd8n\x9b|\x96E\x9a\x9b\xb6\xf7]\x98\x8e\x88\xa2l\x90\xc3\xad\x99\xf4\x9c\x9e\xc6\xcf[m<\x92|#\xb9z\xd7+\x845\x88\xa2 7&\x17\\5S\xf4-\x98{\xfe\xfc\x19\x0eA\x10E\xdf\x15\xd7^\xd9\x1c\x1a\xb8\xec\xact\x93K\xe4\x0c\xc2\xcb\xce\x1a\xad\xdf\xe7\x18\x05M\xf1\xa9^rq]\x82\xeeFm\xdeC%2\xfe\x84vJ\xd2\xf0)W-\xce}O\x17M\"`\xd59l\xb2+\xec=\x85%\"\xab\xe7\xeb\xa3\xf3\xadT\xf2\x98'8\xc2\x01zH3Q\xd2\x90\xdf\xfb\xd1\xa9\xf9Y\xecP~V\xb6\x91\xd8\xb2\xe8Ni\xec\xbc\xcd\xdc\x0b7\x8e\xd5\xf0\xc4\xdb\x9b\xbf \xeb\xce\xfe?w1\xea\xd7\x00x\xdac`d```d\xf2\xcd?\xcc\x12\x1f\xcfo\xf3\x95A\x9ee\x03P\x84\xe1BY\xf1t\x18\xfd?\xf0\xff\x1c\xd6;\xac\"@.\x07\x03\x13H\x14\x00c\x04\x0c\xe8x\xdac`d``\x15\xf9\xdf\n&\x03\xff]a\xbd\xc3\x00\x14A\x01\x8b\x01\x8a\x8f\x06_x\xda}S\xbdJ\x03A\x10\x9e\xd5S<\x91`\x11\x8c\xda\xd9\x18\x8b\xab\x04\x0b\x11\x04\x8b\x80\xda\xf9\x00\x16\xc2b)6\x16\xe2\x0b\x88\x8d>@D\xc8\x13\xc4\"\xe8\x03X\\o\x93\xa0\x0f!\x87\xad\x8e\xdf\xec\xce\xb9{\xc7\xe9,_\xbe\xd9o\xe7gg\x93\x98g\xda \x98\xb9#J\x86\x0eVYp>uC4\xd3&*\xd9<\x02=$\x8c\x15\x03\xec\x13\xf0\xbeg9\xd3\x1aW@.0\x9f\xa2q\x11\xea\xf2\x87\xfa- \xf5\xf1\xfc\xea\xf1\xdb;\xc6:p\x06t\"H\x8fU\xe5\x05e\xe8\xdc\x0f\xbe\x035\xe4\xd5\xe7\xfc\x0bVg(\xf7[A\xe3\x11x\xad9\xcf\xdd!\xd6\xde\xb4\xf7EM\x9f\x8f\xea\xdf\x97\xef\x154\xfeN\x86&\xd3\x9e\xd7\xf0wj\xb3t\xfe\x99\xb3\xd4\x9e\x14e\xcf\x83\xe8Lp\x8c>\xef\x8a\x87\x86w\x91\xef\x86\xf5>G\xe0\xd9pfz`\x13\xcd|\x08\xb4\x1b\xde\xe2\x04\xfc\x05^\xaaa\xd9\xc5\xbc\xf8>\xe4\x8c\xc7\xf0\xb6)\xc7o\xa9\x0bo\xa5\xb2\x82M\xdcg+R\x12\x8am\x1a\xb9Rq\xd1\xed,\xf7\xab\x8bRJ\xb9\xe0\x021\x97\xd4\xd5X\x89\x1bT\xaa\xe4N7t\x8b{I\x96E\x97\\\xeeF\x16\xff\x18\xeb8\xe3\x11U\x0d\xba\xc9\xcc\x1emb\xcd\xf9:f\x97N\xb1&\x92\xfd\x03j9\xccY\x00x\xdac``\xd0\x82\xc2M\x0c/\x18^0\xfa\xe1\x81K\x98\xd8\x98\x94\x98\xea\x98\xda\x98\xd60=avc\xcec\xeea>\xc2\xc2\xc1b\xc4\x92\xc4\xb2\x88\xe5\x0ek\x0c\xeb.\xb6\"\xb6/\xec\n\xecI\xec\xa78\xe48\x928\xf6q\x9aq\xb6pn\xe3\xbc\xc7\xa5\xc1\xe5\xc35\x85\xeb\x11\xb7\x01w\x17\xf7)\x1e\x0e\x9e\x08^-\xde8\xde\x0d|||\x19|[\xf85\xf8\x97\xf1?\x13\x10\x12\x08\x11\x98 \xb0JPK\xb0Lp\x9b\x10\x97\x90\x85P\x8d\xd0\x01a)\xe1\x12\xe1\x0b\"Z\"WDmDW\x88\x9e\x11c\x113\x13K\x10\xdb \xf6O\xcf~\x95\xfd/\x07\x0f\x87\x06\x87c\x8eL\x8euN\x12N+\x9c\xde9K8;9\x979/p>\xe5\"\xe0\x92\xe0\xb2\xce\xe5\x9bk\x94\xeb676\xb7\x12\xb7-n\xdf\xdc\xcd\xdc\xcb\xdc\xb7\xb9\x7f\xf3\x10\xf30\xf2\xf0\xf3\xc8\xf2h\xf38\xe3)\xe4i\xe1\x19\xe7\xd9\xe2\xb9\xca\x8b\xc9K\xcf+\x08\x00\x9fs\xaa9\x00\x00\x01\x00\x00\x01\x17\x00\xa7\x00\x11\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x01\x00\x00\x00@\x00.\x00\x00\x00\x00x\xda\xad\x92\xbdN\x02A\x10\xc7\xffw\xa0\x91h\x0c\x91\x84\xc2\xea\n\x0b\x1b/\x87\"\x82T\xc6D\xf1#J$\x8a\x96r\xf2qr|\xe8!'\x89O\xe13\xd8\xd8X\xf8\x08\x16F\x1f\xc1\xde\xa7\xb00\xfewY\x81 \x851\xdefg\x7f;73;3\xbb\x00\xc2x\x86\x06\xf1E0C\x19\x80\x16\x0cq=\xe7\xae\xcb\x1a\xe6q\xa3X\xc74\xee\x14\x07\xb0\x8bG\xc5A$\xf1\xa9x\x0c\xd7ZB\xf18\x12\xda\x83\xe2 D\xb5w\xc5!\xf2\x87\xe2I\xcc\xe9a\xc5S\xe4\xb8\xe2\x08\xf9X\xf1\x0b\xa2\xfaw\x0e\xaf\xb0\xf4\xdb.\xbf\x050\xab?\xf9\xbeo\x96\xddN\xb3\xe2\xd8\x8d\xbag\xda\x8d\x1a\xd6\xd1@\x13\x1d\\\xc2A\x19\x15\xb4`\xe0\x9es\x11\x16b\x88\x93\n\xfck`\x1b\xa7\xa8s\xdd\xa1}\x9b,\xec\xab0\xa9Y\x83\xcba\x0cD\xf0\xe4\xae\xc8\xb5\xc8\xb5MyF\xcbMv\xe1\x04Yd\xb0\xc5S\xf7\xb1\x87\x1c\xed2\x8c\xe5\xa2\xc4\xe90~\x99>\x07\xb4/\xe3\x8a\x1aqJ\x8c\x9e\x16G\ni\x1c\xf2\xf4<\xfd\xd2#c\xfd\x8c\xb40\x14\xeb\xb7\x19\x18C~G\xb2\x0e\x8f\xff\x1b\xb2\x07\x839ee\x0cKv\xab\xaf\xad\xd0\xb2\x05[\xda\xb7{\x1e&V(\xd3\xa81j\x951\x85M\x89Zqr\x81\x1d7\xb1,g\x12K\xdc\xa5\xfeX\xe5\xe8\x9b\x1a\xad\xf5\xe50\xe9\xed\xf2\x96\x9b\xcc\xdbQY{\xd4\n\xaa\xfd\x9bM\x9eY\x16\x98\xb9\xd0\xb6z=\xc9\xcb\xd7a\xb0:[jE\xdd\xa2\x9f \xac\xb2\x0bBZ\xecZ\xff=n\xf4\xfcs\xb8`\xcd\x0e+\x11o\xcd\xfd\x02\xd4\xcc\x8f\x8f\x00\x00x\xdam\xd5U\x94Sg\x18F\xe1\xec\x83\x17\xa7B\xdd]\xf3\xfd\xc99I\xea\x03$uw\xf7\x02-\x05J;m\xa9\xbb\xbbPwwwwwwww\x17\x98l\xee\x9a\xb5\xb2\xde\x95\x8b\xb3\xf3]<3)e\xa5\xae\xd7\xbf7\x97R\xe9\x7f^\x0c\x9d\xfc\xce\xe8V\xeaV\xea_\x1a@\xf7\xd2$z\xd0\x93^\xf4\xa6\x0f\xd3\xd0\x97~\xf4g\x00\x03\x19\xc4`\x860m\xe9[\xa6czf`(32\x1333\x0b\xb32\x1b\xb33\x07s2\x17s3\x0f\xf32\x1f\xf3\xb3\x00\x0b\xb2\x10\x0b\xb3\x08\x8b\xb2\x18\x8b\xb3\x04e\x82D\x85*9\x055\xea4X\x92\xa5X\x9aeX\x96\xe5X\x9e\x0e\x861\x9c\x114i\xb1\x02+\xb2\x12+\xb3\n\xab\xb2\x1a\xab\xb3\x06k\xb2\x16k\xb3\x0e\xeb\xb2\x1e\xeb\xb3\x01\x1b\xb2\x11\x1b\xb3 \x9b\xb2\x19\x9b\xb3\x05[\xb2\x15[\xb3\x0d\xdb\xb2\x1d\xdb3\x92Q\x8cf\x07vd\x0c;1\x96q\x8cgg&\xb0\x0b\xbb\xd2\xc9n\xec\xce\x1eLdO\xf6bo\xf6a_\xf6c\x7f\x0e\xe0@\x0e\xe2`\x0e\xe1P\x0e\xe3p\x8e\xe0H\x8e\xe2h\x8e\xe1X\x8e\xe3xN\xe0DN\xe2dNa\x12\xa7r\x1a\xa7s\x06gr\x16gs\x0e\xe7r\x1e\xe7s\x01\x17r\x11\x17s \x97r\x19\x97s\x05Wr\x15Ws\x0d\xd7r\x1d\xd7s\x037r\x137s\x0b\xb7r\x1b\xb7s\x07wr\x17ws\x0f\xf7r\x1f\xf7\xf3\x00\x0f\xf2\x10\x0f\xf3\x08\x8f\xf2\x18\x8f\xf3\x04O\xf2\x14O\xf3\x0c\xcf\xf2\x1c\xcf\xf3\x02/\xf2\x12/\xf3\n\xaf\xf2\x1a\xaf\xf3\x06o\xf2\x16o\xf3\x0e\xef\xf2\x1e\xef\xf3\x01\x1f\xf2\x11\x1f\xf3 \x9f\xf2\x19\x9f\xf3\x05_\xf2\x15_\xf3\x0d\xdf\xf2\x1d\xdf\xf3\x03?\xf2\x13?\xf3\x0b\xbf\xf2\x1b\xbf\xf3\x07\x7f\xf2\x17\x7f\xf3\x0f\xfff\xa5\x8c,\xcb\xbae\xdd\xb3\x1eY\xcf\xacW\xd6;\xeb\x93M\x93\xf5\xcd\xfae\xfd\xb3\x01\xd9\xc0lP68\x1b\xd2s\xe4\x98\x89\xe3GE{R\xaf\xce\xb1\xa3\xcb\xe5\x8e\xf2\x94M\xe5\xf2\xd4\x0d7\xb9\x15\xb7\xea\xe6n\xe1\xd6\xdc\xba\xdbp;\xda\x9bZ\xed\xcd[\xdd\x9b\x9d\x13\xc6\xb5?\x0c\x1b\xd1\xb5\x95\x18\xd6\xb5ykx\xd7\x16~y\xd1j?\\3V+wE\x9a\x1e\xd7\xf4\xb8\xa6\xc75=\xaa\xe9QM\x8fjzT\xd3\xa3\x9a\xe5(\xbbv\xc2N\xd8\x89\xaak/\xec\x85\xbd\xb0\x17\xf6\x92\xbdd/\xd9K\xf6\x92\xbdd/\xd9K\xf6\x92\xbdd\xafb\xafb\xafb\xafb\xafb\xafb\xafb\xafb\xafb\xafb\xafj\xafj\xafj\xafj\xafj\xafj\xafj\xafj\xafj\xafj/\xb7\x97\xdb\xcb\xed\xe5\xf6r{\xb9\xbd\xdc^n/\xb7\x97\xdb+\xec\x15v\n;\x85\x9d\xc2Na\xa7\xb0S\xd8)\xec\xd4\xec\xd4\xbc\xabf\xaff\xaff\xaff\xaff\xaff\xaff\xaff\xafn\xafn\xafn\xafn\xafn\xafn\xafn\xafn\xafn\xafn\xafa\xafa\xafa\xafa\xafa\xafa\xafa\xaf\xd1\xee\x85\xeeC\xf7\xa1\xfbh\xffQN\xde\xdc-\xdc\xa9\xcf\xd5\xdd\xf6\x1d\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xdd\x87\xeeC\xf7\xa1\xfb\xd0}\xe8>t\x1f\xba\x0f\xdd\x87\xeeC\xf7\xa1\xfb\xd0}\x14\xf6\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x0f\xfd\x87\xfeC\xff\xa1\xff\xd0\x7f\xe8?\xf4\x1f\xfa\x8f\xa9\xee\x1bv\x1a\xedNj\xffHM\xdep\x93[q\xabn\xee\x16\xae\xcf\xeb?\xe9?\xe9?\xe9?\xe9?\xe9?\xe9?\xe9>\xe9>\xe9=\xe9<\xe9<\xe9<\xe9<\xe9<\xe9:\xe9:\xe9:\xe9:\xe9:\xe9:\xe9:U\xa6>\xef\xf7\xeb:\xe9:\xe9:\xe9:\xe9:\xe9:\xe9:\xe9:\xf9\x7f=\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xe9;\xb5}\xb7\xea\xadV\xcf\xceh\xe5\x11S\xa6\xe8\xe8\xf8\x0f\xb7\xaa\xd9o\x00\x00\x00\x00\x01TP\xc3\x17\x00\x00PK\x07\x08\xea\x14{\xf5\x80[\x00\x00\x80[\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\x00\x00\x00fonts/glyphicons-halflings-regular.woff2wOF2\x00\x01\x00\x00\x00\x00Fl\x00\x0f\x00\x00\x00\x00\xb1\\\x00\x00F \x00\x01\x02M\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00?FFTM\x1c\x1a \x06`\x00\x8cr\x08\x04\x11\x08\n\x82\xa9$\x81\xe5e\x016\x02$\x03\x86t\x0b\x840\x00\x04 \x05\x87\"\x07\x95Q?webf\x06\x1be\x8c5\xec\x98\x8f\x80\xf3@\xa0\xc2?\xfe\xbe\x08\n\xda\xf6\x88 \x1b\xact\xf6\xff\x9f\x94\xa0\x86\x8c\xc1\x93\xc7\xd8\xdc,3+\x0f2q\x0d\xcbF\x16\xaeYO\x1b\xec\x04&>\xb1\xe9\xb1b\xd2m\xa45\x81Z\x8e\xe6H$\xb1\xaaY\x8a\x19\x9c\xbd{\xecH jd\x86\x0b\xd5\x89\x0c\xfc\xb2%\xf7\xb9\xd9\xa7\x7fy\"\xa7\x04\x03\xf7\xf6\xd7+\x8d@\xb9\x8c]\xbd\xabe\x9f\xfb{\xfa\xd2\xfbv\x90\xa0Nc\x8b)\x0e\xbbn\xf9\x93\xc9?~?\xe8\x90\xa4h\xf2\x00\xff\x9e_\xe7&i\x1f\xc6\xca\xc8\x1d\xd1\x9d\x05\xc1\xa4\x8b?\xba>\xfc\xe5^K \xfbv\xb4-\x1bc\xdb\x8d1\xed\xf4\x80\x12\x892K\xa0\xe1y\xb7\xf5,'n\xe4\xab\xe0\x00\x07(\xf03Ewi\xe0B\x05\x17\x82\xa0&\xa9\xff\x0b\xe9\xceT\xb4lh\xd80M\x1a\xd2\xd8\xda\xd2\x86d\xe5Y\xd8r\xf1\xef\xb2\xac\xb3nti\xde]\xddyur\x95\xb4\xbc\xae\xfb\x99\xee\x13VXsj\x0e\xa1\xa2\x9e\xe5gMn\x19\xaa\xd3\x99H\x81W\x95\xa9\xc3\xd4\x08 r2\xf4>iT`V7\xb8\xcdR(\xb1\xa8\xc9\xcf\x1a\xe0\xff+\xa0o6\xf3'c\xc5\xc8B\xb0\x8f\xe6\x06\xeb4\xe7\xb7\xd6\xce\xb9\xdd\x7f\x87\x83\x8b\xe3\xbf\x9a\xaeT ]a[Qd<3wq8,\x8e\x85\xeerTI\x16\xa18\xe1\x12\x1c\x8e0>E\xb8?\xf9*E\xa8\xe7\x97\xa6\x95#\xcf\xfa7'\x8d\xec\xdd\xfd\x86S \x0c\x0boc\xfb\xca\xb7\xed_\x8b7&#*\xd1+)\xbd\xb3\xa0\xd0+4a\x01\xb0A6\xb6c\x8c\xe7y\xb1\xd9\xa3\x86f(b\x19F\xe9\xe6\xe4\xff\xe9\xb4$;{ YA\xc31vP-tG\xf8\xff\x8c\xe1\xc0\xb1\x15\xcd\"\xfb\xb0\x95\xdb\x01C\xc2f- W\x82\xf7\xae\x02\x9a\xd4\x99\xceuK\xd6\xb0K\x08\xe3#\xad\xed\x0e\xe4\xac\xd6*K\x86<\xdb\xfc\x04 (\xd1\xdf\xeb\xd4\xf7\xd7\xffZ\xfe`\xd9\xab\x0c\xe5\xb5[\x97%\xeaY\x05T\x8a\xac{%\xaf\xc9\x8a$\x88\xad\x10\xa0s{o\xef\xed\x19\x9c\x17\xf2\x88\x83\x18\xd5\xbb\xefvt\"p\xe0\x9c4`\xc8\xea\xdf\xa9\x8a\xcf\xa4}o\x0c`\xe3\xb3\xdd\x14\xfa'n\x14e\xdc>\n\xe4G\x1e5s\x00z\xf3_N\xf3\n\x92PK\xd3\xa6vmU\xde\x0b\xc9\xbe{z\xbd\xdc\xf2\xee\xf3\xff\x9f\x19\xf8\x7f\xf8\x8c\x87\"3`l\x0d\x92\x1c\x03\x96W#\xd4\x90\xbd^\x18@+\x8d,\x07c\xb9\xc9ko\xa9\x8dAO\x1bp\x15nu\xb5\xa7\xd4z\xf3\x96zJ)\xf5\xdc\xce\xa5\x1e\xf7\x98\xdb1\xc7}\xe1\xe1\xffO=\xde\x7f\xad\xb6\x80x\xbeR\xc6\xc4`\xc1J\x89`\xa7q\x83\x15\xa5\x82Us/\xbf+\xf8k\xd2v\xc71xl\x1a\xdd\xdf\xe7j\x12l\x05\x96El\xec\\nD\x8c\xc8\xc3\xc6\xb6\xafV\x86\xb1\xe6\xff\xd0jg\x9b{Zd\xe5\x89\x08z7\x8f\x0b\x145\xff\xdf!\x04xm\xc25o\xc2[\x07\x1c\xbd\xbfu\xef\xfb&\xb7\xaf1\xda\x82H\x10BkA\xe2\xec\x08qr\xea\xceR\x90\xc4\x0d\xa3\xb7\xb0\x16\x8f(\\gh\xf4\x10\x14\xc87\x89\xec\xd2\x8ay\x8f=\x86H\xb5Z\xf3UPh\xe9\xd0$8\x10Rg\xd3\x05\xc4\x18\x80z\xc2g\xcd\xad\xc9N:\x8b\xe0\x001u\xac$\xdc\x85\xaf\xa4\xa1\x9e>R\xfd]\xe5\xfa\x14\x97\x0e\xa6\xac\"\x9f\xf4f7\xbc\x97\x0b\xf5K\xaf^\x7f'\x98\xaa\xeb\xbd3\x93+E/\xbc\xc4^\x89YU5]\xa8NB\x17.\xde\xca\x8b\xa5\xd28\xdf\xc5+\xce\xcd\x8f8\x8f\xf9,|\x89{M|\x8cA\x8a\x11\xe5ua|\xd0a\x88\x92\x7f\xec\x8e\xd5\xde\xcb\x85\xd5\x9d%\x00\n\x7flKG\xa2P\xed,\x17Nu\x14\xe6\xdc\xd4k\xfec\xef8mX@\xda\xd3d\xff\x17\xcc\x98?\x7f\xf1\xfc\xda\xf3\xa2\xe7Y\xd3&\xcf\xfe{\xea\xf6\x94\xf8\xb3\xc7\xcf\x0e?\xdbP\xdc(\xae\x14G\x8a]\xb6\xbf\xcf\xc6\xe4\xaf\xe4O\xe4\x8f\xe4\xb7\xe4\xd7\xe4\x97\xe4\x06r-\xb9\x92\\LF\xc9\x089\x97,&\xa7\x93y\xe48r\xac\xfb\xb4\xfb3\xf7\x1f\xdc\x9f\xb8?p\xbf\xef>\xea~\xdb\xfd\xba\xfb\x15s\xe8\xe4\xff\x05\x8b\x9e\x12\xd0\x10\xa2\xd3D\xc7\x08\xa4z\xf31\xfa\xa1?\x12\\U5\x16q=\xad\xd9t\xd1z\xd4\x92\x0e\x86&Z\x90nj\xa2%\xe9mM\xb4\"}\xbc\x89\xd6tk\xdeD\x1b\xbawh\xa2-=\xffm\xa2\x1d\xbdB\x13\xed\xe9\xf576\xf1\xf4\xae\x83&:\xd2\xbb\xc9qt\"\x0b\xd61:\x93\x15\xac\xeb\xd0\x95\xac\xa9u;\xf4\"K_\xa1/Jd\xd6c0\x96l\xb2\xf5\x1c0\x1d\xb2\xc5'^\x03\x16B\xb6\xf98VC\xb6\xd5zg\xb0\x06\xb2\xbd\xac\x0f\x06[ ;\xca\xfad\xec\n\xd9Y\xd6\x17b\x7f\xc8\x83u\x1d\x0e\x87\xbc\xa9u;\x9c@\x9e*}\x01\x06y\x89|\x0c.\xa9\xac'C>\x13\\g=\x1b\xf29\xe0V\xeb\xc5\x90\xaf\x01\xb7[o\x86|g\xf0\xa0\xf5^\x90\x1f\x0c\x9e\xb6>\x8a\xfcd\xbc\n9\xad\xcf\xc7\xfb\xe4\x17\xee\xd8\xc5\xf8\x94\n*E|A\xa5\x0e\x8f\xe3\x1b*M\xad[\xe3[*mO\xdc\x0e\xdfQ\xe9z\x1c?P\xe9n\xdd\x1b?R)Y\x8f\xc1oT&[\xcf\x01U*\x8b\x955\x10\xc0S\xd9\x0cMB\x0b\xfe\xec\x04\xa1\xa5\xf0[\xb3\x13\n\xad\x84\xdfoYDh\xad\x18\xd5{\xec\xd1,}1f\x12?\x9b\x00\xab\x93N\x8fN\x9c\xa2 ]\xd4O/^;\\\xda\x0e\xc2J\xca\x0d\xcfB\x06\xc7EsJr\x19\x04\xaa\x90\x19\x01\xa0\xc4\x9a\xf5\xc6\x0f\x14'\xf2\x85g\x12/\xe3\xe3\xd1B\x17%\x9f\xa1\x1eo \x1fC\xf5\xea\xban\x957\x8b\xdc:\x05|\xf5y\x83Kt\xb2&\xf7$\xa7\xd8s\xaf|\xb9\xf5wP\x88\xf9\xc4\\i]\xbe$Z\x12@+\x01\xcd\x0c\xb6\xb6\xd5\x8090x]\xbbr\xb8\xad%\xbe\xc8\xd5+\xf6\x07RU\xacEm\x87+\x16\xdc\xb0\xaa\xa3;w\xa0u\xac\xc09/I\xbc\x05\xd47\xc87\xd5\xa6\xb9Q\xfelu\\\xa6y\xd0\x0eW\x8b\x10N)\xeb8\x89\xdc\xb0vY\xee*u\xb4m\x02\x94\xa1\xb1\xbf\xe9\xe2\xa9\xe2\x04\xbam( f\x06\xcaE\xbd\xf2\xffG8\xb2\x0c\xa2j#I\xac\xbdR\x8e\xcc\x17\xf9z\x0c#q\xb8\xdf\xb7\xdf\x01\x06 \x84)Y\xa0\xd7$\x8b\x0b\xe1\xd0\x9b\x86c_%\xbfm-{!0-`\x0b;\xe1\xe5\x85\xac\xf1\x14hyV\xf5\xe4\xc1]Hv! \xcf\x1ft\x1ca\xbd\\K\xef\xc5\x10\x1e\x06\xa5[\xcc1{\x1a\x03\"\xe7j 6@\xec\x963T0%\xa5\x13\xbf\x0f\xe9\xce\x98\"\xc7\xd4\x99\xc6ZI\x86G\xe4\x06\x9bS\x93\xf7\x8d\x82.\xb3\x11\xc4\xce\xa3p\x07\x81\xc6\xd3\xacS\xc61e\xe9\xfb\xd9\x93\xc4\xd8\x9b\x9d\xf9\x0cY\xc1\x1e\xffv\xbb8\x1a\x08d\xb1\\\xb1B\xa1\x17l\xa1S\xfb\xfeR)\x06\xd2\x13\xd3\x86\xe7\xf9\xaf\x96\xae\x80\x0b{\x02I\x8a\xd3\x86\xf4\xd2%\x9d\x94\xaf>\xfb0\xd0\x8e\xda\xa6\xb3\\\xf0'\x94cg\xbd2%4\xa0Q\x8eD\xa1\n0\xcd\x923B\xb2\"\xc9M\x8e\xd5\x8e&\x80\xdb\x8ahI\x15\xc7\xc2\xda\xa7\xd2Rg\x10\xb7ME\xa4\xa1\xa9\xa1\xb6\x9a\x0dI\xbd\xce(\x07\xa9\xde\xd55U\x96D]\x0c}\xf2\x1d\x99\x07b\xed8$\x97\x87\xec8\xa8>\xf3\xe1X \x01\xb2\x07h\x95\"l\x1e\xb5\xce\x80\xe2j\x1c\x9d.%\x88\xdb\x80\x18HH\xc7-\x0bI\x9a\xa6\xdd\xb8#1\x92\x1dC\x014\xf5\xdeY\xde\x017\x84\xed\xee\xae\x12\xedY\xdd\x96\xa1V\x0co\x08\x1a>P\xca]\xa16\xbf\xb7\x07\x98\xf6\x99O4\xff7f\x1a\x0d\xbd~\x00\x18\x1eAJdYF\xd5\xc2\x80\xca.\x96\xdeo\xf5\xc3\xfey) \xc68l\xc6\x0b\xb622\x8ee\x8a\x9c\x1f\xa6\xd21H\x1b\xa1[\x01t\x89\xb0@!\xc8\x85\x0c2\\\x80@\xb8\x085\x06\xc4\xd9\x93\xca%Z\xd7\xee\xfc\xdbk\xde\x92\x08\x06a\xf5\x9d\xae\xdc@\xfa.`n\xa83\xcaOF\x8c\xa2\x10R(\xf3\x85\xa5\xb6\xf7\xff\xbdZ\x01kLk\x05F \xedHWjY\x1f\x0dI\xa4\xea5\xd7\xe7*\x13\xf16\xda\xcee\xb5Sbk.\xa4\xbc5F,\x17\xec.\x95\x02N0\xdf\xd4\x99\x92\xb9\x80|\x94\x86V\xa6\x80||~\x05N\xf4\x87( 4\xb7\x9d\xec\xda],\xec\x1aJp|~\xf9xe\xd3\xc9A\xa8\xd4\x1c\xa1\xaf5\x88\x88/\xbb\xda\xbbS\xa4\xfd\x04\xe4\xf4\x94\xd0v\xf2\xd7\xdcy?\x9b\xf9\xb2'_v|r\xea\x84\xcbX\xdc\x06\xf8\xeeH\xe9Q\xca\x05\xb0\xb5\x15\x05\xc4\x93B@=\x0b\xddX\xac\xdf\xfa\xacB\x13\x189\xcb4\xb3\x98\xab\xc3T\xa9\xd8B\x89B\xa9c\xa0\xad\xc1\x18\xabH\x91P\xa3\xce+\x82\xf2\x89\x81_\x93\x93\xd5YH\xd9#\xac$\xaa\xcb\xca`\xa0\xecF\xf8\xa3\xf1B;\xe3\xb5\xc2+\xdb\x11\x94\x85BPR\xb04\xcc\xbc t\x04\xad:t\x08\xf5\"Z\x13E\xbaJ^!X\xc2\xc7\x93\xe0\xacq4_dTW(5\xe4\xdc\x80\x04\xa7\xe5\xff\xe4\x1f\xb8\xdaI\xb1\x94U\xc5\x87\xd2A\xcdz\xfe@U6\xffn.WGX\xfd\xc0\xc1\x05\xe8H\xecRK\xc0\x16\xdb\x1a&\x08'swM\xb1j\x8a\xca\x8e\x98\x8b\xb1<\x9f\x94\xb1\x983\xa6)\x80\x96\x9d`#F@\x12\x0c\x18\x0cF \xd4\xa2\xe0\x08\xfe\xd8v\x8do\xcdb$x\x0c\x07\xef+\xb2\xe0\xbcu\xfb&\xb4}\x89|\xcaX\x18&[\xd9\xaa\x908F\x8b-\xb9E&/>\x8a/\x17\xd1G\xc5.a\xeaz^\x8a\xde/\x83\xd4\xfe})\x9c\x11\xb2\x13\x92\xf3'\x93x\xa9\x91$O=<\x01\xc7\xc2z\x99\xa4o\xe4\xa7\xe1A9M\x1f\xd8\x9d&\x1d\xf2~\xee\xa1\x99\xb93\x19r\xb5\x053g\x9f\x9c\xa6'\xbf8\xd2\xa3\\\xb0-\xb6MDz\xc8\xe8\x11\x98\xf8k\xba\xcd5\x86\xf9\xb1\xb4A\n\xdd\xc2\xfd\xaaG9\xa9\xe4|1-\xd8! \xc787\xfb\x10[\x9c\x7f\xf4\xa4,mR\xeeu|\xa657\xae\n\x1c=X\xfe\x91\xa4,\x98aJ\xa7\xd9\x1b\x16\x9b\xb8^t\xb4N\x9b4\xd3\xd8\\f\x10\xd0\x84]Az\x1bH^\x027\xb7\xaaF\x95\xe8\x1c\xfe\x95\x99\x84&k\"\x1bLU>}\xec>\xe7rB\xe5X(\xdb\x82\xaa\xeb\x82\x02T\x9a%\xbf\xab\x0bJ\xaa\x08\xbf\x84\xaddhK\xc4\x1f\x94\xf3\xb6P\x93\xb2K\xc1\x1eTFaA\x873HH\x86C[r;a\x9b\xc0\xb6d\x02\xb7\x99\x17\x87\x1a\x9554\xc8\xcc\x0c\x89\x97lL\x84k\x03jG\x0f\x1a{\xa1\xec8\x9fh~\xe4\x8f\x07\x0dfR@\x03\xd0\xec\xfc\x1e9w\xe0B\xa8\xa4\xe0\xd00\xeb\x0bzS\xf5\xf4\xb0\xe2\xff'\x97\xa0\x14\x02a7\x03\x82@\xdd@N\x9b\xb1\x8a\xdd\xc6\xb9l\x91b\x18j3\x0fhN\x04\x13\xee\xb2X\xf6\x8bF/\xc9\xede\xa2s\xa3\xd9'\xfa\x8aDsQ\xab\xf1<\xfek^\xd3\xed\x1d\xf2\xd7\xbc\xb2\xfd\x88ZASO\xa8i\x17d\xbaSJ\xe3x\x1cN4D\xbd\xb3\xc0K\xd3!\xb4\xb9\xd4\xf9 !\xd9\xabv\x89\xachA`\xdbE\x96\xb7X\x10\x90\x95\x00\x02\xbc\xca-\x0d\xa2P\x0d\xed\xc4:\x9f\x82\xdb\xd1\xa4\x1b\x7fC\xea:\xc2\xc2W\xcdzS\xbds\xc4dO\x0c:\x08\x10\xb6_\xbf\xab\xd4\xcb`\x8a:t\xc1a\xce\xb7\xd0\x91\x0b\x1e\xa5\x86\xb2\xcd\xd8\xb3\xfa\xa2\x16\x81\xae\x0d\xbcIY\xf0\x044\xc7\x0c# \x10\x90\xf8*\xd2\x1c\xcd+<\x03\xeaq\x00n\xb0o\xd4\x00\xb8u\nU\x08\x04\xcccww\x8f\xf3x$d\xb5\xb3\xf0\x1b\xc6\xbf\x0e\x9d}\xcf\x81\xfb\xd6\xc194\xcc\xfd\x9a\xed\xfc\x039p\xac*T:\xfa%G\x15Q\xe6\x85^a\x9f\x8b\x15\x87\x10\xe4\x03\xdd\xcb'\xa8\xe7e\x83\x1e\xf4b\xf6\xa8\x04\x14\x14\x8al-\xd5\xdc*X\xddL\xe7%*\xc5\xba\x08\x19\xb8.\x85\xda\x8a\xc8\\\x8a@pR$T\xe5*K\xd6\x18\xcd\x05\xbd\x08\xb8hp\x87\xc0\x88\xc8\xe8\xc1\x82\xdf\xe3\xa6m\xfa\xc1\x1b\x91\xe2-/\x93oS\xda3\x8e\xdf\xc2E\x8d\xe0\xcf\xceto\x7f\xb8\x07\x9e}\xe7\xd0\xb6V\x87o\xf5\x07eJ\x0b`<\x99$\xf6\xd9t\xed\xd8\x12\xbd\xdf\xfc\x1f \xaa]g*\xdfZ\x9b\x0f\x84\xb56q\xb0\xc0\xf9l\x92\x04\xf8~\x1b\xf7E\xaf\xab\n\x86S\xfa\xb8/\x8a\x92\xf9i\xa3\xc4T\xc6t\xcdk\x82\xc7\xae\xe0\xb2W\xdd\xc3\xbe\x0c\xdc=?j\xb9\x00G\xa2\xd2\xcc\xd4UU\x05AJ\xc6\xf5\x0e\xfd\x9b\x02\x1c`\x86\xd2b\xd4\xcb\x91\x97\x83G\xc2\x88\xf5Q\xcdA\xcf\xab\xa9\x81\xf1\xc3\x96\xce\x15\xc6\xda\xecc\x83\xfb\xea\xbdW\xcb\xef\xa9\x15\xe8\x03WSm\x08\xdd\xc0\xadg\xb3\xba\xdaF\xcb\xf1\xaa\x91&\xa9^\x16\xf5\xfa\xd8\x98\xd4\xa1\x886;C1:=\xdb\x88P\x89\xe4\xba\x14\xd5`\xca\xda\x9c\x90VV\xff\x93\x1d\xd4E\x9d\xc65\"\xb4hO\xabX\xe0~\x14\x1b\xab\xde\xd8N3_5\xd3\x81]\xbaz-\xfe\xad\xe4\x92CW\xd3\xddt\xd4\xa5\x8d\x82\xd3\x88\xb4\xd0\x18\xcbe\xdc]\xb0\\\xb6\x94\xa9\xbf\x1dV\xaf\x19\x13\x96\x8b\xffc\xd4#\x1dm[\xc6ku\xde\x97\xad_\xca\xb1\"\xa9\xc9\xf6s\x8dH\xb3\x7f\xf4<}x\xef\xb1\x02\x85m0b\xe5x\x01H\xd3qb\x1f\x91a3tf\xe7MT\xdb\xd1\xcf*]I\xbb\x18\x1c\n\x15\xde}\xd7(\x1c\xfa\xbe\x9c,M\x94\x84\x8d\x96=\x8a \xdd@\x8cJA\xdb\xc6\xd0d\x89\x90\xa0\xac\xcb?\xc2\xa76PV\xb1\xf6[\x0ddV\xe3v\xf4\xe64j\x9e\xd2\xdf\x9b\x91lH\\\xe2\xf7\xd4\x16\x8c\xc5\xe8{\x98\xd8\xf2\x02M\xf4\xe5\xbd\xc8\x98\\\xba\x9d\x8d\xe5\x01Y\xfe\xdc\x81\x80\x1c\x1d\x81`9M\xc3`D\x13b\x87<\xc1\x05\x12;a#z\xf2\x86\x91<\x8ax\"\x06\xf3,\xa8d\x82gCi\xbf`\x91c\x92\x9b\x16:\xc1\x01\x05\xc8\xe2I\xb4\xe2>jw\xb7\xcc}J\x05\xc8\xc9z\xa7\x8d^:V.\x1d\xfd:\xd7\xfe\xda\x8b\x13{\xbf\xcd\xbc\x06(\xc8\xb2B\xf7\xe6\xcb\x1c\xc9\xa6\x03\x92\xc9\xf3x\x8e<\x18\x1eDb#\"S\xa3\xa1{\xf4\x92P\xf9Hu\xbdN\x81/\x13\xeb{r6\x05;wU\xb6\x13\xe6\xd0\xf2\x12s\xd6P\x17\xd0\x9e\x93<\xa4\xe7X\xc0\xe0Y\xe2s\xd4\xc4\xf7\xdeMxu\xb0\x87\x14\\\x9ab\x00\x91\xdas\xb8$\x11\x8e\x98\x11x\x8e\xca(\xa2\x89/^|\x03^*\x180j~m\xe0\xac;#\xb7%J\x84\xdeM4\x9d\xc7p\x98QM\xd7\xac\xf0::\x1ab\\C2gf\x01\xb0\xb0]\xbdz\xd2P8T\x99 U\xaa\x93Qb\xd6\xe8t\xba\x83\x1dC\x1c\xbdT\x9f>\np\xa08+6g_2\x96l\xce\xa16\xa7H\xf2\x11\x08 \xda\xce\x00\xc7\x86\x0fH\x9d:\xf7\x0dd\x9b<\xe6C\xcd\xe96\xb3\xaf\xd8\xa4\xea/\xab\xbf6\x7f\xe3E:\xc2K\x8b\x94\"\xcb`kJ\xa9<\x9b\xcf\xc6\xa2\xe4=\x08\xf9v\x9e7\x84\xa5\xe2N5\xb7\xb5`\xb0\xf2\x9b\xb6Jt\x8b\x12\xd9\\j\xb96\xcd\x85%\x03\x0e\xcb\x9e7\xf4*\xa5'\x0f\xa5\x10\x95U\x95\xd94\xb1:\xe2\x1bX+\x0d\xe4\\\x02b\x00\xfc\x92E\n\x11\xe4\xec\xf4\xdeaf\xae\x8ax\x8c\x1f\x93}\xc1\x891+p\x99\x8bB\xa5\xcb0\xee6\xf2\x17\xe2\x94\xe5\xfd\x9e3r\xf5A$\x1bN\x14\x9a~\xb6\x12\xf4#\xb3d\xaa}\x08\xc7\xd7\xa4\x9fP7h\xf7H7b\xa3F\xaa\x9e\xc2\xa7\x85\x82\xac8\xf0\x11\x0d\x1e\xb3P>\xf8Bt\x19\x1eGN\xd0\x14\x15\x8e\xa2\xf0m\xe4\x01\x82x\xf4@\x8fj \x85\x91\xb8|{\xc0s9\x12\x19\xe0\xbb=\x0f\xfewR\xd9/\xad\xaboDJs5\x16z>\x93;\x8a'x\xdd\xf1E\x06\xd0q\x15^\x06r\xe9^=G\x1a?\xbd\x859A\xea\xa1\x06\x7f\xe6\x96\xc7A\xe4_\x97\xa3\x06\xedK%\xb5D\xc9\xae:uikjk\xeeI\x18e\xc9\xe6\xbd\x1a\xacG\xfa\xd5\x9d#*\xa1\x86)\xb5jm\x06\x91\xe1|\xbdt\xa3\xb9}`\x01J\xe6\xa9Z\xd8\x88\xe9\xd6\xf7\x17\x07\x03H\x0f=4\xee\x9d{g\xdf\x81\xa2\xbe)\xa1qX\x88MA,\x17\x9aH\xf8\x8c7\x18\xfb1\x89\xebV\"\xa5\xd9\x14\x0fo,\xe7Y#h\x12\xfd\xfc\xf7\xdd\xa8S\x17\xf2_\xea;\x8b\xcda_\xd4\x97Z^cn4\xa2\xad\x10\x8d\xa0\x82H\xb8E\xab\xae?\x89\xab\xb0\x11}\xd1\n\xc8\x9d\xa1\xbb\xad\xdc\xee\xbc\xd9\xa4=}B\x9eWv\xde\xaaUe\xf0\xe5h\x9a\x17\x83\xda\x00G\x1d\x8a\x93F\x90\x85\x89\x91\xe6;\x9d@2S\xa5\xd8\x01\x9c\x80@\xcbf \x05\xfe\xc2\xcb\xfcn\xcd\xe22\xd0#\x8d\x8e\xe6\x1e\xb9\xf1\x13f\xddY:]\xbfJy\xe5\x08H]\x1a\xad\x95-\xb7\x98G\xd7\x8cwgv'\xa1\x07\x04\xe2|\x88\xc40e\xf1\n\xc3_7\x88\xf0\xd2\x90\xabn+f\x1e\xdf\xd9\xb8\xe2\xe0Y<\xab\xe7(\xcd\n\xaa?\xf6\xec\x92\x10\xa9\x91y\xf2\xf9\xa1%wm\xac+j\x0e\x83&&!\xe8\xa2c\x93^\xa1u'\x9db\xfc&\xf1h\x9fm6\x10\x04\xc2\xa4\x9a\xce\xfb\xb9*2\x0c?\x1c\xb4A\xc7I\x1c\xab\xb6\xc6\xb25F\x06W\xbe\xd8\x99[\xe2\xc6\x9c\x06\xa8B\xf5Uz\xefI\xceE\x94\x93!\x92m:\x8b\xd2\xfb\x9cxh\xd4e\x97\xdd\xc7\xaen\xbcz|]%\x0c\x8d\x90m\x11\xd9r\xc1U\xb8F\x8d\xda\xaf\x90\x05\x94\xf9\x11\x7f\xae1\xc6\x0b\x1d\x89};!\x0fn F\xbe&\xa0g\xf2\xda\xf1\xffP\x89\x86\xdd\x01\xaf\xaf\x1b\x1b;&\xd7\xf6\xa9\x9c\xf8\xef\x18$$\xb8\x1a\xe9F\xab)\x1a.t\x9bB\xedQ\xa83\xa6\x8e\xbd\x15(\x9eC=\xfa\xb7\xd3\xe4\xa6X\xc0\xce\xd8es\xd4;\xddi\xfb\xab\xb6\xad\xd9\x8a@\xbc\xd1\x08~\xfcN\xed\xc9\x1b\x1b\xce\xa1E\x01\x7f\xeb \xd4SR\x8b\x14\x04\x14\xa1\x88h\x9d\\\xe9\xe8\xa3\xfa\xc1Be\xf1o\x18\xba\x86\xbd\x9c\xa0\xb4\xb9\xce\x01bT\xd1\x16\x84\x06n\xce\x92\x11ju\x02\xb9\xbe\xc0\x1b \x07g@\xe4\xf7'\x04qQ\xeb\x94\x8enx.u6bVU&\xd4\x0c\x9b]\xb9;\xdb\xef\xaa\x15!C_\x0c\x84\x16\x06\x0b\x08\x9e\x8d5\xc6*\xdez\xb9\x1d\xc9\xba\xfbm\x80RQu\x91\xaaq\x82\x92\xe0\x16\xa6\xb1P\xfc\xdaZ0\x83\xb6}m\xf1\xbc\xa1\xcc\x06\xca\xf5n\xbd\xa6^n\xd5Or\xc9T\xa6\xe2\xc1\x91\xb5\xce:\x9fU\xe3'\xd7h\xc0\xe6\xa70n\x0fZ\xa1p^\x19R\xe9|DF\xaa_b\\\xf2@\x96\xd6m\x81\x82\xe8DE\xeb8\x81\x00\xc6{o\xe8\x1eGM\x89\xe1\xa0\x9c\x01q\xb8\xdd\xcf\x06}\xdc\xeeSd \x16\x14\xae\x0fC,\x1b\xfb\x04i\xc0\xdc\x9aE\xea\x1d\x12\x8a\xe9/\xb0\xde\xc3\x8b[\x18d8]\x8e\xd7,MCI\x9a\xce\xd0\xf3_u\x97,]V\x99\x81c\xf1\"\x97\xe4\xa4p\x18\xb0\x06g\x1d@\xec`\"y)\x8b,;B\xb3^e\xad\x9el\x10\xda\x08\x04\xa1\xaa2'\x80.(\x9d\xcd\xd0\xc4\x98\xe2y>\xe0-\x1a|\xceh\xad\x18\xdc\x13\xd3\xc4w\x9a\xfa\xfe\xea;\xa9j\xe5\xa5\xe8\x1d\x92\xed\xd5\x8d\x18\xd1\xd2i\xd4\xbd\xb1\xe4\xaf\xea_\x19\x08o|!@\xf6)\xc9\xa2\xdd\x1e\xaa\xc4=\x7f\xd9\xcc\x8cSPz\x9d\x97\xe9\x1d\x1f\x88*!z})\xbc|\xc6\xa7T}\xbdj\x18\x91\x86E\xa3tC\xacZ\xe5\xa1n\x8c\x8d\x17\xbd\x7f\xc3\xbd\x9c*\xd5\x9e\xf5\xc94\xdb\x86\xd7\xbd[\xb9\xa9\xb1\x0b\xbe9\x1a\xbb\xd0\xae\x05\xa8\x8e\xe8\x88\x17\x91\x06\x92\xdd\x93\xf5\x8cz`W\x08m\x14e\xf4o\x82\x8b|j8j\xad\xef5\xe1\x18\x909\xbc\xf6\xb5@.\xfa\x9aE\xbf\x18V\xcc/\xccZW\x05@\x17|\x97\x81f_\xe7\x10\\\"$\x1d{\xe1\x05\x9c\x91v\x10\xa1\xe0\xf2\x83\xf3\x9c\xc0\xc8/\xb8\x06\xf2\xb6;a\xd7\x16\x1c:Se\xa6i3T\xe4G\xcb\x10*\xeb\xce\xfd\x83]\xb8\xc6\xa1/\xcch\x14\xc02\x1dC32$\xbf\xd3\x01\x0e\xf4\xbf\xfc1}\xbf\x8cD\xa4\xcdNX\xc9\xf8\xfet\xec?F\xcf\x9d\xed~n,Pj9\x0f.\xee\x96>\xd7\xa3\xfc\xa8\xcd{\n9\xfd\x7f\xc9EN-v|3h\x86\x14\xf2\x91C\xc5\xd0\xb8E\x94\x95\x87 XT\xe0\xe2\xcb;P\x96$\xc3=\x8dJ\xd6-\x0f\xdd\xd5g\xdd\x95\xfd\xf9igz~q\x97(A\xe9<:h\x131\xb19\xb33\xe1N\xec\xcc\xbd\xf5Q\x89\x8b\xe3\xc7}CL\xd8\x7f\x90W\xf9\xdf\xa7\xc2\xd7\x8e\xdc~\x83\x12\x1b\xe1\x0d\x12\xdab\x99\xa5\"\x86\x81\x87\xcd|\xc74u}\xee\xf0\xad\xd7\x01\xd9c\x99y\x80\x04\xe0\x8b\xc86\xb0\xa12\xff[ \xa5\xd8\xd6\\d\xb8,\xb5\xd2\x8e\xd5\xb3b\x81\x11k\xf9\xa4\xccD\xe5\xe4\xbe%0T\xdcx\xad\xae{=;\xf6\xd4\xb7\x8b\x86\x01(\x84i\x08\x87\x0b\x8d\xd8LS\xb7\xf8\x13\xdf1\xbd\x98\xf8\xfd\xa9\xd13\xffN\xcfh/\xa46?\xe6'E^\xf6~\xff\xc6\xd4P\xae\x7f{sZ\xec\x1b\x99Z\xd3K\x94\xc4\x9e\x13B{\x92D\xcct\xf8&\x89\xbd\xd4z\x92\xd3\xf7)\xb5\x17Uoa\xf95Q\xa63\x8f\x91\xc8\x97\x88r~\xbf\x8e\x91\xbe\n\xa6\x01\xf4\xd9\xf9F]\x11\xa0$\xba<\xe8\x04\xfatm(\x9a}\x0c\xfb\xc3\xcfMB@\x87\x9e[\x9cGx\xec\x08\xceF\xd3\x05h\x108\x00\x9b#}\x88\xf4,\xa3#\xc0\x98u\x0b\xfdLaz(\xdeQh\xb14%\xd3x\x01m\x04`U\x13\xe0\x95\xd5\xb9.E\x11\xa8\xe3v1a\x92\x9e4\x0e_'/[\xa8d\xb1{Fx\xa8I\xca59\x9e\x8c\x0c\x83D\xf5\x11<\xea\xec&\x908V\x88E\xf3Fg\xc9\xc8\xd9\x0b\xe8\x8a\x98#\xf5I\x91\xe4\x9f\x8d2S\xb2\xcd\xee\xff_\x06\x0f\xe3\xa9]QqA\x13\x9an\x88\xd1_\xf0Q\x19\xe7\x07\x12>b\xde\x984g\xa1\x19\xa8\xad\x13\xb1-\xae\x010&\x07E#c\xa6\xdci8\xbf vR/\xd54\xff\x18r\x9e\x8a\xeb\x08\x9aP7\x95\xa3K\x03sOW\x05\xb5N3\xd5\x8f\x1evE\\bq\x86\x7f\xfbQ\xdf5\x18\x12\x8dZ\xb9\xda\xbdVy5\x07]\x8d\x13\xbd\xd6\xf6\xe0h/ i)\xc5\xfb\x9d\xa6\x8e-/\xb4\xb0\xe3\xa1k\xb5N\xac\xd1\x8e\xbe\xc8\xc4\x1c\x13#e\xec)\"P\x17\xed\xf1\xfc\x08 {\xfdKSQ\x90x\x89\xb2\xb8\x9d\xa6>a\xe3&\xb5\xed\xb6\x8e,\x84\x17\xa0\x0e\x0d_\xe8g\xa5\xb4\xf1-m\xe1c<\xd7\x18n]\xd0\xa7-\x14\xae5\x912c\xa8\xb9\xa1\xae\xf1z\x0d\xac7d P\x9cz\xf3\x13\xe5\x01\xb5\x19\xe0\xf2\xf0\xb2V\x84\xf6\x93\xfb\xa5O\x1dPvf\xbcR\xfe\x0cR\xa4\xd3\xc6\xe0\xb0\x939\x86Z\x0d-\x9f\xc1\x1e\xded\x12\x86\xf7\xae\x9a\x88\xbfC\x9b\x9a\xf1\xf3\xc7`,\xf6at\xc1=\x13\x14\x87k?v\xa2\xed4#P\x0d\xe8\xa4B\xcc\x16\x08\xa1\x9a\xd8\xa5\xb8/[\x17\xf5\x06s.-bH)\x11\x11\xc9\xbaz\xef '}\xb6\xd7\xb6\xdd\xeew\x9c!\x04\x17r\xc1X\xce\x11Z\x8a\xb5 \xb4.:\x92Vn\xd7\x13;\xee-\xee>\x8a:\xe1\n\x1d\x026\x01\xe0r\xff\xbd\x87\x0e\x1dU\xc1cs\xd54k\xac\x11VW\xac{\x92\xfa\x89\xf0#\xad\x0f\x985\xdf\x910\xc1B\xdd\xe3\xec\xc7`\xdc\x9d\xff0u\x9a\xd1\".Q\xca\xc6\x1f\xac\x9bdB\xb4\x980\xa3\x94\x1d\xf5\x98C\x9e\xd0r\xf5\x1e]\xed\xef\xe0#\xed\xbaQ9\x08lq\x9c\xe0\x02N^\xf4\xd6\xb3\x8d\xaf\xe9\xf4h~\xa0NU\\\xb4 \xbc16\x9a\n~\xe9\xe8\x15\x93\x1c\xe1\x94S\x91n\x87T\xf2\x04l\xa2\x91\\\xfeTH\xd2\xb2\xda\x9b-\xd9\xc9~\xaaG~)$\x85oQ7\x1e-\xeb\xafC\xef\x11\xb0\xcc\x01\xc8\xec}q\x12%/\x1ca\x99\xa6\xb3vO\xb0\xaa|[q4\x96\x82\x8a\x92\xb3~Bc-$N\xdc\x1b7\x9e6\x1c\xbd\x9f\x90w\xd2{\x0f\x9cV\xe9\xa4\x83.&\x13\xa3\xf6(\x86\x0eo\xb9\xa9*\xf5n<\xa7\xd8n9\x06\xa1\x06\xc1J\x96\n\"a\x87\x07\xa9\x0b\xd0\x94\x08\xcd\xdd+\xa1\x86\x0ba\xca/\xbb\xfd\xba\xba\xf0;7zD\xd8Z\xce\xb7\x11{\xd7t\x8fM Mp\x97\xa3 i\xd8\x9ak\xbcNPw\x88\xd8\x91\xcd\xba\xf0H`T\x0d\xf6$23\x1b\x96\x86f\x1e\xd3\xf6\xc0\xd0\xda0\x9az\x1f\xc6\xc8;\xad\xa1\xa6\x9e\xe7\"\xf0]\x8c\xf2\xaa\x82*\x8eY\xb2\xa4\xe9\x10,\xe4Q\xa1W\x81\xd8\x1b\xe0\x0e\xf4\xb8lS\xe9\xb5\xc6\xc5O\x8erW$5\x06]K\xa1V\xd9\xbbB\xe2\x16\x85\xdc\x9a\x85I\xb8\x18\x8a\x18k\xda|\xdf=\xb2&\xc1[\xd4\xe8\xea\xde\xd1\xde\xc558E\xd3R\xa40\xde\x8bGk\xabs\x1bS\xee\xf1\xc1n\xe3\xf2n\x11nu\xe3\xfa\xf1ExK\x8d\xd0\x7fr\xf4\xa2\xac\x8c}\x87~m\x1f\xf1\xd9`\x10\x8eG4\x02u{\xd0\xd1\xf1=]6f\xa4\x00\xf7\xe2\x08\xd7\xa8\nBo\xd6\x1e&<\x0d\x99\xc3\xb1c;2\xa0\xae\x10P$\xc3\xc7\x83{mW_c\x1b\x9e\xf5\xc2\xaa'B6\xd0\x8a?$\xbd^z[\x8fC\x03\x97\x1dY\xb9\xdd\xad\x99\xe9\xa5j\xd0N\xf3~\x0b\xfe\xa6\xdb\xae0\x05\xbe\xbb\x9b\xfat\xa2\xaf\x06\xfa\xb0\xdb\xe2\x846/)-\x891\x7f:p$D\xea\xa5\x85\xc8\x97\n\xae\x10\x1a\x0e\n,'\xb3\x86\xfcy\xe0\xf5\xb1\xf8\xc1v\x98\xc4 \x85n\x9c\x8bF\xb3T\xd7\xd1\x81\x99[\x04'a\xa6Mb\xceJ]\xbb%\x90&\xc3\xae\x10\xaclc6\x02&\xc2\xe8IpF\xe5\x90\xc4\x0d\xa8\xdco\xf0\x8ci\x83\x06\xaa\xc4\xc9\xfe\xe3\x155\xb1\xa0\xf8'r\xa8\xe4\xcb\xe5r\x91(q\xbc\xef\xe8\x90\xfa\xae\xeb\xee\xbez6\xc1\xb0\xf6\x9e\xee\xc9(5\xf3\xd0\xe1E\x0e\xf4\xe0\xc9\xa2\xd5\x9fl\x13\\\x1d\x85L\xf1\x9dk\xba7\xab\xb01\xc5Y4^)\x02b\xd9\x97\xc2\xa68\xf9\xbcy\xf8\xc6\x8f\xaa\xe4\xdb\nN\x9e\xd8=\x83\x9b9zT\x96^[T$\x87dk\xc2\x19\x9c\x0dQ\xe2iK%\xe1\x886\x8e\xb5\x90\xfcq\xb5\xe9\xe7\xf5\xcc\xeefO|\x03\xe0\xda\xe0c\xd08$\x95ji^v\x1b\x04r\xfe\x02.QQR\"\xe2Y\xe1r\xc4\x8a\xe3\xe2\xf5\xb8\x0d\xa8\xc3k\xf8\x92\xa5\xder\x9f\xba\xbcK\x0e\x88\xa2\xb5\xfb\xa0\xe6\xe8\nN\xb7e\xed\xcf\x08Ri\x13\xfb4\xa6\xbe\xd5!3R\xf9\xa2\xba\x15\x90\"\x8d4\xa6\x88\xc8\xf1\xfa\x9an\x99b\xe1m\xc9-y[X\xa6\xa6\xff\xcf\xf1.\"\xc6\xf4\x19!\x94\xf8\xdcQK\xaf\xe5E\\\x07N\x8f\xeb\xb54g\xd5\xa0\xd7\x9d\xf8\x05\xa7\xbf\xa6\x83\xef\xb1aN\xa6p\x82\x0d>k)9\xc1\xc10\x88B\x8dZ\xea\xccBs\n\xf6\xa5y\x8a\x19r\x8e\x9aer\xab)v\xaf\xa2\xaeD\xf3\xebt\xe8rv\x81\\\xfbv\xf9[\xad\xfc>\xf3r\xb5Jm\x9c\x81\x02\x96\na\x9c\xf2\xab\xb5\xcc\xbc\x9b~u\xdd\x17\xea\xea\xd5\x8f\xba>\xb5rMZ\x9c\xb0\x98c\xedB<\xf8\xe3\x0f`)\\y\xd7t|\xdb\x8d\xef\xffr'<\x86\x18\x1a\x06\xf4\x98\xe0>\xf9\xd7\xd6\xfe\xe3[\xe6\xc3\x8e\x97\xef\xad\xe8h7\xeb\xfa\xad\xfeZ\xc5\x10\x04\x8c8\x7fcaI\x81!\xb9\x0d\xb4p\xe2\xa2\x9f\xcd\xcc\xae\x00,\xb6\x0fG\xc3\xed\x0b\xbbk\x07\xa05@\xc8\xd9\xf4\xf7\x08\x1b\xff`\xf4\xc9iw\x0d\x88\xd2n\xd0\x9e\x1d8p\x9fv\xbf \xe7\xc9\x13\xe9*\x8f\x9d\xb2\xc2'O\n\x0b\xdd\x1d\xd4\xfc\x8a\xb2\x14\xa0\xddA[\xc8.\xa1\xa4r\x8f\x16h\xf3T\x0dpR?+;\xcb\xff\xf3\\\x11*H\x8bsLq\xde\xe6\xebU\x9a\xb9f\x96\xe2:ql-\xc4\x87\xa4\x0c\xf1*6!\x85h\xe7+\xcb\xac\x04\x00{h\xb7\x89\xf6- jg\xb1k\xc9MM\xc9\xd7P#\xe3\xb6\xe4:\xbc}\x91\xb1\xb8{/\xdd\xeb\xeeV\xcb\xdf\xc5\xb6C]\xec\x98\x99\xea\xb7&[\xb3W$\xda\xab\x01\x03^\xdf#\xe0\xfb\xb8\xb64f\x1dWa\\\x0d\x8b\x05\xc15\xec\xf0\xe8\xba\xbaM[6\x9e\xbd\x1c)T\xa73\x97\x95\x1b\x15\x9b~\xfc\xda\x1d\xce\x0e\xad\xc9\x12\xd4\xc0\n\xe3:. Z\x94\xa6\xbc\x02\xa8`s\xdei(\xcdR\xf4\xa9Q\x85\xbc\xb2\xcc|/\xfa`\xf9\nil\xcf^\xb0L#\xa0\xa7\xf7\x1d\xf2f\xa4-\xf1\xd7;-C;\x13_\xdc\xde\xf9*\xf9{@EMCoo\xc3\x82_\xa4\x9c\xc6\xc3\xf57\x17\xbeT\xf6\xe3rqz\xdcF\xb5%\x17\xd7\xaf|\x99\x9aUE\xc6\xabUs^\x14\xdd\x9cv{\xa0\x7f\xeb\x04\xa4\xa6fQ<\xc4\x90\xa1\x89VP\xfc\xef\xc3\xcaT\x0ef\xcd\xa6\xee?\xa5\x90\xf5m\xd8p\xd9P*\x00\xc8&\x01\xa6\xf7\xd0Q\x87G\x89\xf9{c\x9c\x03J\xef\xf1EP\x18e2)\x8exP\xbd0\x05A\x06\xd1\xdf\xcd\x01\xfbM\xc9\xaaZH\xb6j\x95\"\x11\xd7\xbb\"\xd9A\x05\xac\xd0C+zq\x89m\x04V\x11z\xe1\x96\x9e\x05\xf3U%\xd8C\xb5\x00:\x16@1\x0e\xe6\xe3\xf0W\xb9\x8a\x0c\xee[\x11y)\xceJ@\xf7o\xb1b%\x0c\xf7j\x94\x15A>)N\x0f\xd2\xc7\x80\xd4i\x7f\xe7\xbc$\x92A\xf3\x88\xc0\xect`>\xcc?f0g\x00\xffH36p\xe86\xe1\xccD|\xfb\x04M\x9b\xe4\x0c\xe1\xf6\x104N\x8d\xa1\xb0\n\x9c\x92 4J\xbdJ\xda\x83\x1e\xaf\n\xb5j\x07\xa4\x9e\x98\xc6\x87\x01\xb4\xf3\\\x0d\xedp\xa03\xd3\x19\xf8\x1c\x8e\xa38\x8d\xc0\xa6\xaa\xd1\xe3\x96\xd0\xaf\xef\x94\x13\xbb\xad6p\xae\x11\xf9V?:\xac$\x82sD\xf9\xc0N\xfa\x12\xb5\x0e\xc6\xb9\x912\xef\x92n\x92,\xb6\x84H\xddO\\\x82[\xb8\xf8\xd5\xb8\x9b\xf6K\xd9-)\x9d\xabW~\xb9i\xe9m\x99?\x18\xff\xae\x83T\xd0:\xb0\xcc\xf0\xde\xbaU\xd6eY\x8c\xd3\xe3\x8a-#dJe)\xaf\xb1\xda\xcfZ\xaa\xd55\x94?\xf8$\xbb\xe6\x0e\xa1\\d\xa9W\x19<\xb7\xb9\xb9,\xc9\x86\x7f\x86\x9a;\xaf\xd8\xb7\xb8\xc35\xcd\xcd\xf2S\xb8\xd5\xb8\xaf\xbc\x1a\x97T\xabT\x96\xf1\x9a\xce\x08\xcc\x84f(\x81PY\x0e\xb0v=Q\x0d~DX\x7f*\x07\xbc\xdf\x0f\xdd8\xf8\xe8\xbe\xa9s- \xc2\x08\x05\xcb\xa8\xae\xce\x80\x0555\x02\x0c\xad\x0dX\xa2R\xa5l QC\xcf\x08\x18\xe1\x19\xa4\xce\x0c\xe1\x93\xf8\x03\xd1\xc0l|\xf2\xfd5\xce{\xfb\xd3\xa6T\\t\xea\xbc\x95+\xe9\xef\xa3e\xbbn\xc5\xdb\xb8\x8b\x92\xc2Ps\xd3\xe8\x1fl\xfb3\x15\x99\x9d\xf9UO\xa9[\xfc\xdb\xee\xc7Z\xbb\xc9S3\xe5\xc0\x9d\xee\x9f\x9d*\xe8\xec,\x98\xaa\x86\xdf:\xc3\x9bZ\x1f\xfe\xbe\xc6\xd4L\x89\x9b\xac\x86\xd5\x05S\x9d\x9d\xf4\x15'\xcc\xb5\xe6\xe3*\x07\xf5\xf2\x8c*@\xa8\xf8\x19\x9d\x1f\xc4\xb1~xgno2\xb1\xb9\x19\xe1\x16\x88\xe2-\n\x85\x8c \xb3W\xb3\x14\x8a\xab\xbdV\x1c\x00;\xe4pZ\xe5\x049\x8d?\x13~\x84\x18\xab$\xab6\x9f<\x94\xb5Qr\x82bQ8&\xf3\x14se\x1b\x95\xc9Eb\xaf\xdaQ\x14,\xb7\xfe^|B\xb2\xb5\xef\xe7\xa2\x98\xefVd\xacV-\xb6(\xdc]\xe3 .\xef\xe8\xcb\x8e8\x1f\x12/qhV\xa1\x1bnR\xae\xf3\xaeQ\xc8D\x89*\xf9U(*1h\xe71\x14\xc3`\xd8\x9dQL\x12{\x07\x85\x81Uj\x0b\x0b`\x17\xe0\xd2\"\x9ao\x1e3\x00\xdc\xbb\xe6\x99V\xa8\x02l\x05\x85\xb5:\x1c \xd1\xed\xea\xc2\xd8\xc0\x0dja\x04Fa\xe0\x10\x9eE\xad\xb6\xcc\x9eZ\x88\x8bg1\xb1\xb7z\xfc\xb0\xcc2\xd6\xa0\xd5:\x04\xcdAu\xd9ZIf6\x96\x83\x012\xd5tw+\x08\x8a\x89\x89\x17f\x19\x81\xa7D\x8f\x8b\x1e\xef\xc9\xe8\x11\xaa\xa9\x11CL-}\x14g\xbb\xb5Z\x18\xb30>\xd2\x84\xf3xJ\xa2\xfd\xff\xc5>\\\xee\x90\xc1\xebQ\x89\xb8A\xda_C\xb6i\x82h\xdf\xd2b\x0fl]\x0d\x05\x1a\x876\xde\xd5\xde\x1d\xdc4\x1e*\x0e\x9aA\xcb\xaf\x16\xc9\xb0\xfcqX\x89\xa4\x047\x84\x1b\xbb\x14Y\xf2X.\x80-\xbe\x8a\xda\xd5\xb8\x95a\xc9\x87\xc3V\xe2h\x17\xac\x96iKg\x95\xd4\xcfqN\xb3R\xc4\x86N(r\x11'\xc1]\xbc\xe0%\xd9\x98\x84\x8f\x85\xad\xf4\x8d\x88@3\xd4\xcd\x80\xce\xc2\x81j\xa7\x16Z\xac\xf7J\x86.\x1f;\xfc\xe3nm\xdd\xfa\xaf\xd5\xce,S\xfb\xfb\xd8\x040x\xaf\xb3\xc3\x1f\x01\xf5\xf8\xd8\xcd\xbb\xa3OF\x0733\xad\xd2\xa7\xb6\x85\xab<$'\xdb\x0e\xbb\xb8G\x97E+\xb4\xda}\xa5\xa5\xce\xf3\xfa\xf3\xa7\x85'1\xf0f3\x9b\xc6\xfd\xd2y\xd05\xdd/&\xa4Z\x9c\\\x1aRB\xf47dm\xed\xf4]\xe6\xbc8\xa7\xa7\xc2\\\xcd\xfd\x01\x843\xdf\x82\x84\xc8\xaa\x98@\xf7\xeco\xbf\xfbT\xe9\xbf3eu^\x12\x1d\xb7W@\x8a\xaa\x15\xfc\x94\x96\x8e\xa6e7l\x96\x1e\x07!B\xe3,\xc0s\x82\xe4\xe61\xdc\xcd\xdf$\x9a\xe4\x1b\xf2\xaf\xdbZ\xa7\xe3&\xd9\x14\x92\xe7?\x03\xaf\xe2dC\xe9\xe1 \x9d(Y\xd0\xa6Sm>\x82J\"&pt\xde\xdc\x88\xaaP\xe3\x87\x84\x03B\x1b\x01F\xac\x14\xf8\xb4\x9d\xda\x04\xb1\xe0\xd9\xf84\x8cG\xe15\x9c t^\x1d\xc4\x86$\xaf\xde\xf2j-a\xe3\xa0\x8dg\x1a^\xc1\x0e\xca\x90C\xa4\x12\x96\x85\x83A\x02s\xd6T=k\xa1TS,|\x0e\x80r\xe0\xe5\x0f\x9a9I\xb7\x0f\xbdB\xcf\x98\x07\xd0\xac\x86\xf6'\x01\xc2\xecvG\x00A\xb6\xce\x11@\x90\xcdt\x04\x10\xc0\xcchQ\xd5Nj\x86&\x18\x80\x05\xe5T=\xce\x03xt;2]\xaf\x12P\xa1|T-\x07 L\xc3\x9e\x83\x92\xc9\xe7\xbf\xf1e1\x14\xe3\xdd\xbd\x04W\xc3Z\xc5\x9a*MrH5?\x84\xf1\x82=\xe0\x00\xfd\xd1o\xb0\x03\xac\"\xeb\xc69\xd1K5\xce\xf8=\xab'k\xc9-*\x07\x95\x84\xe8A\xa6\x12\x0f\x16E| \xfe \xde q\xd2\x94\x13\x91_?\\\xa37%\x1e\xf6\x8e|M6\xb0f\xaa+\x93\x93+\x92S*}\xe7W\x00_\xdc]3\xaa\xa8\xfa\xb6fm\xdc\xae\xd8\xd2\xcb\xb3\xd2\xdcm w!\x96\x97\xee\xf7\xe5.\xdbR#\x89\xe9\xac\xaa;\xc6\xed\x1f\xfa\xa6\xb1q\xa0q\xf371\x8a\x01\xd6\xe4$\xc2\x95\xdd\x99\xd5\xaf_\x19\xc0\xf3iK\xe0&\xecJ\xa9\xce\xacM\xae\x8a\xac\xf6\xde\x1a\xc7em\x12\xe5V\xc45P\x7f\xcf0>\x14\xbe\xbe Q\x9d\x15\xe7\xb55\xaa\x86W\x95\xa9H\xadIh\xd7\x1a\xe5\xf9&\xf94\xd2\x8dIl\xd3E7}\xe2s\xc8\xe9\xc4m[c\xc8\xbe\xec\x01\xa2\x84|\x8dd^ \xa2\xec\xf7%Uv\xe9\x011\x0f\xa6D\x07\x93\xb2>\x93.\xe8T\xac\xd2\x1b7*\xe9=t\x83Z\xb8_\xa7\xe3\x9f\xbe1\xd0\xa5:=0pZ\xfb\x9a\x066\xd2\x8b\x8aN\x04\x84t\x17\x14\x00(\xe5\x17u\xed\x0e\x1e\xc6\x9d\xad; \xc7B\xd2]\xb3\x91$\x80k\xa1\xda\x8c\x80\xc2.\xf3{\xaaF\xc4*\x03\x12/U\x1dZ\x92\x1f\xc6N\x08\xfc\x12\xe7\xa0\xa6|oq\x8a\xca\x15K\x97G;^\xd6\xe4\xbe\x9e9N\xa7\xfbe\x1f\xff\xedxK\x8b\x0c\x92\xa1\xa8\\\x87wh\xfe\xf8\xf1~\xa1\xf2\xf2Z\x02pH\xd4b\x91\x12\x8b\xcd\xe4\x89\xb8\x1e\x0c\xbb\x88\xdb[k\xb68\xe2\xc9\xcd\xcck\xf7\xe2.bX\x0f.Q\xbeXp\x02\xb8x\x0eYa^\xb5\x18\x02\xd0\"\x98\xd1#\xeb\x9f\x18\x99\xe3B\xb3\x10wnb\xa4\xbf\x11\xf3\xe5\x90u\xd3\xf0\xa0\xe9m5\x9eF\xb2\x0f\xbd~>\xc7\xd08\x1f\x05\x80\xad\xc1b\xfd\xfa\x8e\xe4N:\xddp\x1b4\x0b\xb5\x01[gv^\nB\xbd\xd3F\xd0Uz\xfb)?\x9c\xbc60\x8f\xd0F\xc9\xc9\x078\x98\x82\x00\xc2/\x042\xa0\x8cC\x088\xa8\x8c\xae>\x18\x9dN8G\xee\x8f\xcd%\x10l\xf3%\xe8\x1f\xdd5\xbaFH\x98{4\x846h\x1c\x04\xa7\x8e\xef\xb84\x8e%\xc6\x01#\x0d7\xb8\xe8\x9b\x1d\xf1\x8e\xcd\xfa\xcbx\x0c\xf3o\xba\xdcN t\xaa\\\x95'\xdf\xc8\xa8\x0f\x0d\xa8 \xfa\xe6E\xa3\x8d\x8e\xe6\xbd0#\x83\x12\xefj\xb8N\xc3\xa3V\xef\xd3\xb9d\xe0?WlcW\x90\x81\xd7\xf0\x8b\x0b\xc4\xed\n\xc5\xbe\xbd\xd6\xb5\xb5u\xfb-\x93\x06\xbb}\x0f2\x842\xa8\xb6\xa5EN\x90\xbf}#\x87\xe4\xb5\xb52H^a3\xf0\xf5\xbb\x19\xa5r\xbb\xc1qs\xa7\xa4\xb0\x88\x84-\x8fS3&\xc8\x04\x14\xef\xf4\x84f\xb4\xed\x87\xa3\x0e\xcc\xeb\xcefwl.\x9a=W\xf88\xe5\x84,\xf5\xe0\xd2cH\x03\xe1\xaej\x9ecT\xea\xb1W\xae\x0e\x91\xd7\xa0\x05s\x819\x0e\xf20\x9e\xee\x94Z\xe0D\xceM\x11\x93\xfa\xbeC2\x92ZM\x92\x8d\xdd\xfb\x90dj\xb5\x8ct\x9f\x16\"8\xc0:g\xed{.\xc6\xb1\xb0\xd01Fb6\x831\xc78\"\x06y\xd4\xa6>\x98\xf5\xbf\x0c\x92\xadW\xc09\xa3\xeb \xd3V\x9a\x1a\x8a\x8a\xf6\x0b\xaf`\xd7\x12j\xbf\xfd\xae\x16\x9a\x8f\xbfT\xf2\x94\x94\x91\x91\x80\xb2\xb5r,n\x83\xa9i\xc0\xad\xc1\x0d\x0d\xa5\x16d\xb3\x85 \xff\xf4\xe9qN\xa7\xaa\xdd\x0c.g\x04+ \xefS\x07\xc2\x1a\xeb\xbc\x0dQ\xfa\xf9\xb7\x81\xbe\xeb\x0f \x0d\x81\xe1K\x02aB\x08\x01\xbe\x8c\xdb\xe0?_\xed\x8b\xdbQ\x8dE \x87\x93\xb5r\xb8\x86\xffj\xa4\xee\x82h>\xf4\x95E\xa6\x90\xd3\x9b;\x13\x8fC\x9d\xd7\xad7\x7f\x85\x8b\x1c\xb7^q\xaf\n\xc6`U\xafe\xfa#-\x9f\x98\x18\xb7;oJ\x98\xc4\x8b\xe3\xd2\xe6\x8e\xd4\x9d>)\x0b\xc7\xfd\x82;Jg\xed\xa3\xcf\x7f\xd7\xad9R;Og\xed\xa2\x1f\xc5\xfdiI7\xfa}\x97\xe28K\xa1\x92\x9c\x1f\xdb\x81\x04q\xa6j\x8f\xb9\xd1e\xd8\x93\xa3+\xd9\x97'n\xf1\xcf\xb7k3\x8b\xc1\xad\xbb\x07\xfceF\xcf\x81\x03\xfb\x05\x9e\xec\x0e\x850\xb1\x9a\xf2\xaf\xdfV#\xa9\xed\xc6p\xa5MAzb^P\x8e\xf7V\xcfu\xa4\xdb~\xde1u\xba\x97\xd2\x93\x95w\x1en\xc8 ^\x9b.II\x97\xa1\x8a_\x1c\xc6\xcc\xdavdW\xae\xc8\xf6\x8f\xf3\xce[Q,\xcd\xde\xe8+L\x0f\x04\xbeb\x82\x16\xed\xbe\xc9\x02\xa5\xe5\x84\xc6\xc4\x87q\xbc\xfe\x0d9\xabV\x1c}\xef \x94\xce\x8fV\xd1w4qU\xe43&j\xdb\xc4\xb1HYb\xbc \xfc\xb9\x88\x0b\x15\xbft\x15t\x8dT\x9c\xf5\x887\xcf\x81\xf2\x92\xab\xd9a\x16rBwP9?)\xdbu\x95\xe9\x8bT/\xd9a\xa3\x95\x8eA19\x96\x11\xb1k\xaaM\n\\\xd3\xe4P\xdd\xdfs\x9b<\xd8Ta\x9e\x8d\xd0\xe3@\x82\x85\x0e\xf5q\xb1\xd8+\x1c\xfb\xa3=\xd9[5\xc4\xcd\x94\xb7\xb6\xd6\xd7?\xb29\xa3W\xc0\xc9+^\xfdo\xaf^E\xe0\xe38s\x81)\xe5\x8ff\xe7\x0b\x972a\xf4\xe6\x01\x16\xadQ\x9f\x05x\x01\x1a\x7f\xa4\x17\xb7i\x87\xe9& NE>\"^Na\xe4a\x9f;f\x8a\xd9\xcc9]NE& t\x18^\xb0\xc0CL\x1az'\xe2e\x858ZR\xf1\x07s&6\xde\x02\xb27_\x00\x1f\xfc\xc3\xa3cyJ\x9e\x911\x1a\x0d\xc4\xfe@TZ\xb0?S\x00D2\xfb\n\x1a\x87|\xd4P\xf5\xd4\xcbO\xd3\x8c\xe9\\d\xaa\x1aR\xef\xde\xf0\xfb7zH\xd5\xf8\x83\xee\xb19i\xc8\x03\x00\x8bQ#\xb5\xbf\xb6\x82zr\xb3\xf3c.\xf24\xf6\x86G\xfdR\x9d\x134\xce\xc8qx\xa6\xf0\xbe<2~X\x92h\xb5\xf7n\x08\xe1\x19\xe3\x19\xe0\xa9\xb3\xa8\x06\xc52\x88auB\xadNC\xd1\x00+\x9b\xa2k\x97X\xf30\x0e\xd1\x0d\x18\x13aj5n\x03>\xde\x89\xa8\xf5\xb2\x12e3\xf6\x07\x11v\xde\xa7\x17\xd3\xf4\xe9<\xa5>\x81\xb0_\xb2\x9f\x90\xa0\x07\xdb\x0cuH:\xa0\x04\x90XR\x9d\xff%~9\xe1\x04!4\xf6\xfco\xd1\x0f\xd1\xbc\xa6\xe8\x1c\x02\x003\x85\xba\xe2\x948?\xb6\x82 \xce\x1f1\x01d\x1a#\xef\xd4\xd1\xfc\x1c\x96\x9aA&\x8b\x84\x84{A!i6\x0b\x8b\x8c\xed\xdc/Xa\x9d\x9f\xa3\x1a\xe1\x05\xe3\x87\xa4=\x12W\x16\x89;|\xef\x04\xf0\xe4\x1a)\x81 \xd0g\x16\xbe~\xa3\x0d?*\xbe\xe6\x82\xbd\xc3 }\xbf\xe3\xda\xa7\x88K\x11t\xcc>5|\xadE\x8e\xb5\xd0\xee\xd1\xdc\xd5.\xa7\xfd\xf4A\x07\xfb\x9a\x0bQ\xf16\xfc\xfa\x0d\x02\x07\xb2\x80\xbe(6\x1a\n\n6\xd1\x94\xc67\x00\x96\xda\xf7<9\xf9\xf9_\xf0\x95C\xc1\x0cf1\x0f\x19\xfe\xeb\xd0\x8e\xe9i8\x15\x06\xbe\xae\x86\xe5\xbb,\x0dV\xbb4$\xc0\x9fut\xc9\xf8\x9c\xf8\xa3\xc6i\xc1,.`v6r \xe2\xa3\xf2\x8eP\x0d\xaf\x1a\xbdgFB\x90\xc9\x8e\xc7\nt\x1b\xf2\xc3\xe7\xda\x0dC\x013\xbd;\x98\x0c\x1f\x04\x18,\xbco\xc3\x82\x93\xe6\xe3\x9cx| \n/K\xf3Mp\xa91S_\xbe\x91X.f\xf7V\x8d\xaa\x86#\xbcU>\xc8\x92\xc8\xf5\x88#B\xf6\xf1]\xb5\x0dA\x15\x1d\x8f\x91IVo\xc0\xcc\xd0\x86\xcf\xb5\xc0\x91\x14\x15\xbf\xfcGTV1nr+\xa3\xceOX\xc2S\x95%\x0b\x8b\x9b\x04\xc2\xb3\xb6\x99f\xa7OZ[\xdb_\xfd9\x9c\xfb\x11\x91P\xad\xdf\xb0\xf7 \x0f{Gl\x11n\x91\x05%\xdf#\xda\xdbh\xc0dw\xbfH\xa0\xf9=\x86\x7f \xb8y\xa9e/\xed\xaaW\xbc\x19\xb3\xb4\xb6>\x7f\xd2,\xd3\xf6\xac\xb0IP,*MV\x95\xf0\x84~\xc2\xbaK&\xe3e\x15\xa2\xc4\x8b\xbb\xfbM\x06\xec\xbd\xa3=\xf2)\x16\x8bqF\xbf\xa0S\xb6\xdf\x08\x1b\x05\xb4\"\xc9G\xd1\xebT\x9aF\x99\x8d*\xb9LX,h\x8a[\x97\xb4\xba\x93\xa7w\x8ew\xb4\xede\xf1WQE\xd9x\xda\xeb\xba\x0c?\xe1\xe8\x93{\x10^\xda\x86E\x8ex\xefh\xbb\x9di\xfd\xbb\x05\xa2\xd7\x82\x8f\x12\x84J\x94\xb3\xc3\xc0H\x8f\x07\xa5|\xf3^\xd9\xcd\x93\x8a\x85\xc9e*^\x89\xd0\xaf.\xe3u\xdaxE\x99\xe8\xeb\xcbb#\xee;\x9b\x92\xf4\xd4\x9d<]z]\\\xed\x83\xd7\x02\x81\xa8\x7fw\xdaN\xbbho\xbcchq\xb8E\x98\xa3=\x1e\xba\xe7\xe2\x134Q1\x1f\xb87\x0e\xfd\xdd\xe7W\xfc\xbd\xcc\x93l\xc3\x956\x8f\xe1\xa7\xbf\x14\xad\x12HE_\xcc\xa3\xa0\xf9qy\xff\x89\x1e\xe1\x0bY\x06R\x8e\xa4\xdb\xab\x1a\x18\xe49~l4s\xe6Vy\xf9\xad\x99`\x8a\xd7U\xdf\x9b,\xf9\x9f\xfe\xf1\xd7\xc5#_\xcau\xb4\xf9+De\xea\xea\xed\x99\xe0\xe9M\xd9\xfc\xa2~\x03h\xb3q\xaa\xeb\xb2\x87\x92\xb7#Y\xfa\xca\xe6\xf1z\xfc$;\xdb\x0c5\xcd\xaf9$\xb5\xcb z\xb2\x1d>\xff\n\xbe*j\xf5O\x9f\x8d\xf8\x8c\xf1\xdb$\x9c\xd0$O/\xaf\xc0\x0f\x15\xc3xR\xed\xbd\x83\xc2t\xfdf-}*\x9eo\xef\xc9\xa6\xf8\xd9\xcc|3\x86M;\x15x\xde\xa8\xafU\x94\x95\xb5l/.\x14\xf1~X\xc7\x8e\xafY\xe1\xbc4\x99x3&\xe6\xeb\xd7x\xae\";\xa4$\x8dKI\xf6\x925\xdbd\xda\xad\x0c\xbd\xca\xe1\xfc\xfa\x8b~w[\xca\xd4M\x19\x1f9O\xd8\xe3%4\xd2\xe1Q\xa8\x1c}\xe9\x19S^\x9e\xect\x96\x9d\xe8@\xd1\xcb\xe6\x0ew[\x9bY;-\xdd\xf6\xff\x8d\xc0\xbas;\xa2b\xbe\xcdwH-*\x05\x0c\xef\x96\x96\xc3im\xbd\xb6I\xca\x08-\xb6\x1f\x7f1e/\x95~\xf8\xa8TNN\xae.\xf2p\xb2\xdf\xf0)H$\xfb\xeb\xabW\xef\xdf~\x8f\xce\xf0\x03\x90\xb5\xc9\xc6\xa6\xed\xbcO\n(\xa0\xe99\xe8\x86,\xd9\x0d]gM6r\x1d\xea+\x84#\xbb\x07%\x83\xe0/s\xfcw\xafA\x18\x9c$\x11\xf5\x8cq\xc64\xcb\x11\x07O>\nd9}\xf7\x9d\x13+\xf0\xd1$\x8cs\xf4\x9a\xb3\xfd?\x030\xa3\x99\x14a,>\x9dy\x8b\xb9\xda\x88s<\xf0\xfe=\x86,\xf0\x8bc_*\\\xe2\x83D\xed\xbc\xdc}\xc82\x19M\xed\xb0\xcd\xccT8\x17/\x12\xed4\xe6g\xe6'\xda\xa6\x9e\x9d\xe28'\xfb}\"\x8bC\xe2\x03*\x84\\9\xbd#Y\x8f\x1f>z$\x8e\xe6\xf6\xd47c[s\x93|\"$}\xbb ym\xcc\x16\xef\xd5\xc0\xabzQx 5\xb7%\xe5\xa0o\xf9\xe5\x93$j\x86k\xd0\xcep\x12)\xf1x\xbd\xc4\xfb-:\xba\x86\xd0\x98|?\x9d\x98o\xe3\xf8f\xa7\x87gFr\xc0\x9e\xdf2\x15\x1a\x8aS\x91\xc7Z\xf7\x8c\x02\x11q}q\x88\x8a\xb5 \x85o\x80\x1d\x1f,wy\xc5O\xa1g\xca\x13\x90CF1\xd6l\x98\xe7\x9f'\x06\xedL5T3\xf5\xf23\xfb\xd1\xf0y\xaa\xa6M\x899\xda2\"s\x94\x98\xf25\x14u\x1bD\x95\x8b6\xc0\xd4-J\xec\x89U\x08\x86b\x05s\xba\xcf\x18\n\xabO)\x83\xcc\x16w\xb8R\x0d-2\xe6/5f\xdc<\xaaBQ\xcc4\x16k\x8c\x98\xea\x90\xad\x9dG\xb9 \x13\x1f)%\xdf\xbc\x0e\xe3\x7fr\x92\xcf\x9bf@\x07\x1f=\x9a\xe2BF\x99\x8b\xefCB\x00\xb1\xa9\x89\x89\x83\x87&'\x87F}\x19@\xa9&\xd3\xc8\xff\x19\x04\x84y\xd7ub\xe2\xd0\xc1\xc9\xc9C?'\xab\x1e\xea\xe5\xf1\xe7\x18S\x01\xe649+\x97\xc3\x93\xe4\xb1C\xf8\xae\xb3\xedI\xc3\xae\xf0\xa4\xd9\x1f\x8c+\xd7\xe7\x98\xebf\x05/R\xab\x06U\xea\x0d\xcc\x12\xeeC\x0b\xb0\x01Fu:\x14C\x11*\xa3}\x0c\x86T:\x1f\xba\xb6}{\xc2\xdc\xdd\xbd\x01\xe2\xb2\xb7\xd4u\xaa\xce\xd7\x17\xf8\x95\xace[!\xae\x96>\xfa?\x0b\x8b\xc4\x0f\xe5\xfd\xda\xb8\xb8\"\xc7M\n8gz\x83\xf40\\Hk\x83\xd4\x06Z\xb3:\xc4h\xe5\x02\xad\x1b~\x99\x03@\x13\xaa+\x1d\xf4#\xabN\x9d\xea\x0c\xd6fj\xbe\xe7\x18y\xb5\xee\xe5\x8bio\xc0!\x84B\xf7 \xfe\xf1\xf4\xb7\x93\xb1R'\xa95>\xda\x13\x03`\xda\xf2[!\x1a\xc2\xc4T\x18\x88`m\x1fC\x01\x9a\x9d\x0bI\x85\xd1\x9d\xc3}\xb8n\n\x8f>W\xdf\x10\xf4\x01!M}\x18U\xcbav\xf5\xb6\x014\x90\x803)!\x10\x04\xa7\x01\x0ek\x1bc\xc8\x82\x1a\xb2\xf3m\xfe?\x92\x8e \xdd\xe5d\xef\x18w\x95\xe3v\x17\xab!\xd7\x94;X\xcf\xa1\xdb\xa8}\xbd\x1d8\xedvt\x95\xeb\xd0\"\xd3\xbc#\x0ck\xc2v\x17X\x8aJ\x17\x99\xb1[\x93l\x9d\xb6[Z\xdd\x99M\xc3\x80\xa7\xf1\xc3\x16XC\x073l\xea\x96[\x0d\xc3Ta\xbcVj\x8b\xa1\x85\xda\xca\xbb\xa5\xe5\x18\x01\x8c\xd1\xac\"\xc5\x93\x17\xd1\xf2\x0bt:\x15\x82(\x9e\xea\xe0\xa6\xc8\xc1<\xbe\x08\x06cZ\xf3ve\x97\xfdQ\x9b\xc7\xeeT\x85\x9f\xa0qH\xe1\x91i{\xa0\xd2\xe9\x8a\x80Q\xe5\x9f\x93'\xc1\x93\xc3\x96\xbb\x93i\xa0\xf6P\xd8\xf6\xef\xbf\xad\xe8\xfc\xc8\x03\xaf\xe1\xedmK\xcaA\xe7I\x8a\xf5\xa4\x19\x8f\x90BF\x83\n\xa3\x15\x05=\x89\x89\xb5\x07\xe2\xd6\x07T\xe1\x85\xbd\xb6\xe0(\xe2\x0f\x9c&TS\x10\x9f?/\xef\xd8\x81A:\xd6\xb7\x1e\xac\xbb\xd0\x9eV\xa7(\xf8\xf7@w\xef\x0fF\x07a^\xe3\xa6]\x85\x8c\xb5\xe4o]*\xd3\xf199\x16\x16\xacR\xbfi\x0e\xe1\xf1_\xd4\xf2\x98\xc0\x99\xfe\xce\xcb2vM\x93\x80\xd8`P\xa7\x93\xf4f\x03\xa6\xb4\x06{Q\x06Y\xcf\x16\xab\xedH\x7f#V7v\xc57\xcc\xd2\xb0\xa0q>@\xe0\xf3\xab\x18~u\xc9\x98\xd7\x86Ax\xb0\xc3/\x83\x02\xabx\xd9\x08\xb0B\xfe\xe03\xa3\xc4\xa0\xd9\xf4\x89\x9dt\x83\xca\xc1y\x1d\x86b0\x8c\x9e\x16nG`\x04\xc1\x0c\x18\xf2\x04E\xda\x04D\xec\xd9\x8d\x1a\xe4A\x92\xd4:\x0c\xc6P\xd8\x10\x05wI\x1f\xd37\xcf\xec\x10nW\xd3\xf72ED}.(h\x95\xdc\"\x82\xf3\xe3U]\xa29I\xedh_\xeeV\xaf@\x87\x9bGZ\x100C\n\xdcp\x15\x13b\x0d\xf3\x1b:\xe3\x99L\x0c3\x8d\xa1tN*\xaaN\x0c\xbd2\xbf\xd3!\xbc3\x8c\x8d\x0dCa\x18\xb3\x97yn.\x95\xca\xdd\xc9\x8b\x7fW\xe2`\xcc\xb3\xdc}\x18\xb1QB\x1b\xccC\x8f\xaa\xc3i \xc1\xa18*\x08\x02\xc3\xef{57\x89\xb9\xec\x05O#aT\xa2\xcb\x1dB\xbd\x9f\x82\xbdU\xedo\xfei\xa70\xe7\x0d\xf3_\xcb\xf0\xd1\xf9^\nChrU}~r\xc8L\x19 1\xf2z\xff>..\xf6=\x1c%\x16G\xc1\x9a\x9bG\x8c\xa3\x0b\xeb\xfco \x99\x90\x87\x0c\x18\x8cE\xe9u\x91P\xb3\x05Ps\xd8\x98\xde\xb8\xe8\xf38\xad\xf0\x03\xfc\x05\xfeP\xa8\x9fu&;\xe6\xca*\x8c\xeb\xf9|i&\xa4\xffPb\xdb\xc8\x9b\xb0\xb2\x98h\xd2;\xb4[\x97\x80|\x18y*c\x83V\x9ah\x86\xd2\xbc\xd2(\xff\x94~\xce\x16_A\x95qU2\xb7\xbd\xff\xf4GIQ\xc73`\xae^\xcav\xfe=\xee@\xac\x0f\xdcK'\xa4\xb5\xd0\x87\xe8\xcbZ#\x084sJ\x02=\x0f\xba\xa4:sY\x05\xa9\xe8 \x14s\xda\xa5b\xc2yj\x0c\xaf\xeb\xb7\x1bS\x1f_E\xdc\x83\"\x8e\xaa\x8b\x8a@\x9e~\x82\x9f\xe7\xb9\x08>\xad86\xba\xf1#\xcey\xb1\xda\xe4\xe5[\xef\xe8c\xf2S\xef\x84\xa5\xd9\xc5\xac\x96\x90\x94\xf2\xf2#\xadSJ\x94GZ\xfb\xaey\x19vv\x12\xdd\xdfS\xf9\xd1\x8f\x1a\xe6\x89\x9dp\xb8waT\x9a\x9e\xcf\xf5\xc5/\x13,\x1f\x14\n9'Jkv%%.\x86~o\xf2[\xcc\xf3\x9f \xe8\xa1\xa7\xfc\x1c\x9c\x13\xc2R\xb1Bj\xa2\x9fS\xe8\xc8\x80*$'\xf8\xe8\x85\x81\x08\xa9p\xc3\xa7S\xa5u\x00\x0b\xe0+\xe7\x079\x18\\\xac\xed_f+\xe5\xf2\xfc\xf98\xf5\x10u\\,\xb6\xca\x14\x8f\xd3t\xe6\xe5\xf0p\x9e\xd1\x8d\xd0\x1ek\xd8\xae\x13J0h\xf3(]\x18N\x11\x84Q\xc8v\xf3\xb3W\x1f\xac\x81\xc7\xcc7\xf3\x0c\x958\xab\x836:\xdc\xd6\xdd\xa3\xcf\xe2\x0b\xb7\xf1Wc\x93\xafY_i>\xfa\xac\x8c\xdd\"\x91\xdfR\xa1\x10\xe7\xf5\x92\xe2\x81(\xd3e\x18]\xde6\xf8\xb9\xaa\x04RA%U\x976&\xb4F]\x94\xbd7@\xcc\xb3k3X\x0dh\x81?\x8c\xc1K\xef\xe0\x13\xae\x04\xde\x00\x1eQ\xa42\x99B\x08k\xbe[?\x0f.\xff\x1f\x80\xc3.K\xa1\xc8KAb\x8e\xcc\x7f6\xfd\xde\xc4\x14\x1f5\xbf\x7f\xb7k\xf2\x92e\x8e+]\xb2F\x16\x8e\xb0e\xf6WH\x04\xa9U\xf2\xab\xfb\x070O\xdd\xd7\xa7\xbe5\x85\xb4\xf8\xff\x0d\xa4\xfa\xee\xe4e3H\x87\xaeco\xc7>l\x16]0\xb62\xb1\x88c\xfd\xb9\x87\xddH\xca9\x9a{Z\x0d{sO\xf5\x04\x96!\xa5A,\xfe7\x86?\xc5\xb73\x00w\xe4\xbf\x8eA\x1c\n\xe0Fj\xda\xff\xb88\xb8B\x16\xed&8U$\x06\x11G\x85\xa1\xc2\x00\xb5\xd9\xdf\xe9$\x1b\xb5Y5\x98\x82\x18\x86\x0f\x0eF\xe2\x11L\x855n\xc7\xec\xb2\xd8\xf21\x96\x94>\x02\x0cq\xf3\xba2\xa3\xa3.\xab6\x93\x03e\x97\xe9\n\xee\x0d\xec\xbc\xda\xf5\x97\x9c+\x98\x96@/\xac\xa2\xf0\x99\xb5k\xedb{\xe0\xf7(\x8f\xc57\x07\xd0i=\x1b\xa5\xc9{l\xcd\x8d\xac\xdd\x82\xad\xe2\xce\xe6\xbf\xa6\xef\x19\xa08\xab\x03\x851g\x9d(\x03\xbb\xfc\x13\xe8%\xce\x02\xd3h/\xebEf\xb6M\xb9\xd2\x8d\xddt\x9f\x005\x0f\x1f\x14\xbd\x9f\x1a\xcc\xbcvg\xbdo\x03\xda \xea~\x01\xe0\xa9\x9bWKi\xe7\x88\xb6U\x07\xab\xdf\xd8\xa3\xdd\x96\xadw\xacRS\x82E\xecF\xed\xdfT\x1f\xac\xb2%\xaa\x0d`=\xed\x17\x18\x8b\xf8|*=1\x82\xf2*\x8e\xf9\x16\x1f\xbb\xfd\xb7S\xa7X\xf6\xac\x80\x1e\xb7\xd8^\xea\xf2\xd3w)l\x1f\x96\xc3\xd6fQ\x9dH\xe3\xfa\x8c(\x14YS\xfe\xf4\x84S\xcb\x8cK\x8f\xb7\xe61\xde\xd8\xe2\xedW]\xb5f\x99\x9f\xb7\xb07\xd7\x9a\xb3^&\x0f\xfdp\xf4@T'.\xec\xc0%3\x8d\xb3\x95\x84\xc3\x0d\x9a\x9d\xaf\xb4\xdf\xde\xf2\xbb\x11\xf95\xcfzaTf6\xd1\xeb\xa9A5\xdc\x19L\x95\xf3X\xcc\xa1\xe6\xb7\xc1|\xf1\x08L\xb8\x7f-\x9f\xe5\x96\xce\xb7\x07\xea\xc4\x1aT\x03\x16\xd7g{A)\xaa\xee\xad\xaeF\xd1\x15\x95.\"\x14h\xfc\x18\x93\xf9j\xa2\xa0A;\x10.\xf0\xe2~\xc2\xfe\xa0\x85o\xde%\xb0\xe8\x0c\xfdG#\x1a\xd1}&]\x02\x1a\xde\xd7\xbec\xdc`\x0fC\xbbhH9xnN\x86\xb7Y \xce\x12\x17l\xf0\xb2c\xde\x14\x03\xa4\\+v\\E\x9f\xa5\xab\xc6\xa71\xa6D9K\xb7X\xe1)2b\x01.\xbf\xf3\xad\x93N\xf4\x07\xda\x17\xfdW\xa2\xa7\x8e\xdcQ\xd7\xa9$\xa5/\x05\xa3\x94\x10|6t\xc3\xb0\x99\xc332\x14\xd4\x9b\xa3\xb47\xa1\xa1\x17\xa6\x00\x962\xb8\xd1\xdb\xd0\xb8yu\xfc0e\xd7\xf1\x80)\x92N\xd8uh'd\x9c\xb6\xc3\xee\x84\xf5\x81\x1f~xY\x89\xc9\x0b\x1e\x00\x90\x91\xb4>\xc4\xda#\x0cb\x88\"k3\xd3\x0e\xf0\xc2\x0c\x03\xbc\x83\xae\xcc:\x13\xa09\xbf\xba\xa2v\x13\x0f\x88\xfa\x06$\xd0\x9fC\xdd:\x14\xca)H\xb9\xcb> \xd5\xa6\x9az\xc7\xdb\x1c;e\x93d\\j\x17m\x1af\x9f\xe4\x18O\xcfa%\xf29\x9a\x91\x88\x04cK\x9bx\xd3\xd0\x0f\xdb\xa5\xc5!k\xa9%H\x81\xdeDn\x93\x10\x7f\xfc{Y\xdc\"\x93\x17{n_\xd6}\n\xdf)9\x83=\x0d_/\xfb\x1e\x87Z\xce(\xe9\xf9>\x16l\xfa\xb6\xad\xf7Y\x9e\x06\x9f\xd6V\xf7\x8b\x1b\x06gQ#\xa7\xdf\xad:Q\xc4\x01\x95\x17\xc8bw\x07\x83\xc7\xf7\xd5$\xbe\x1bzw\xb1\xf0\xd9\xae\x8b#\xbf\xbb\xe3\x7fU\x14\x98?|\xc5\xf0\xefG\x08\xd4\xce\x05\x84h\xc1\x1e\x7fz\xfc{\xf9o\xba$w\xf1\xd7\xcf\x9c\x9d\xb4\xba\x04\x1c)|Vh\x89\xda?\xbb\x8c\x0d\x14ZV\x8f7\xbe%\x9f\xfcG\xfbo/\xa3\xd7\x86\x87\xf4\xe9E\xcf\"\xb9K\xd3\xb2\x85\xa0\xec\x15\xb5\x08l\x06\xa5p76\xce\x00\x1f-z\x0d!\xc1l\x804\x17n>\x94\xbc$\\\xe1\xd7zV?sz\xfbqej\xecQ\xe7\xeb\xe9]m\x8b\x9b\x04\xe6^\xe6=^\xb5\x0d\xe4\xa7\xed\xad\xa5!\xa0\xb2\xbal\x85\xf4HB4sL\x0bi9}\xde\x1d2\xa2^\xd7\xfdK\xd05\x04\xc5OB\xfa)\xe8\xedO\x0d\xad\xe7v^~\xaa\x12\xea\xb5\xfd\xef\xdd\x80x\xa6\x93rm\\\x1eK\xda&G^\xd0\x075\x8fC\xe7\xd0\x10L\x1b\xbc}&F\xba\xf2\x94\xc0\xcb\xe2\xa8B]K\x86\x14\xa1n3\x86\x86|\xcdsGjy\x1a\xf0\x16k\xfeO\xaf\xb5\x16\xdab\xe5s\xdc\xbd\xe6aW?R6\xf0\x12\xa1\xb7\xb2\xbeJ\x90\xc2\xc7fh\xe3\xda2 \x0c\xc4\xcalBS\x1c\xa7\\=\xa2j\xd5\xd5V\x97\x1e\xca*\x94\x1f\xf4Y\xa6\x93\x14^\xa2\xa2\xc1\x99\x04\x84\xcb\xba^\x1aE)\x07\xd5\xe8*\x94\x03\\\xbd\x9f\x04\xa0\n\x03\xc2\x14\x82r\xd4r(a\xa2\x1d@\xf8\x05\x16\x08\x846n\xd4\x8c\x14\xa3?\xa5}\xdadL\xb0\xa9\x17\x1e\xa6\x81\x16\x9ag\xa2\x0eI\x1dvq\xd8Nc\xd0\xc7a\x8d\x91\xc6\xaek\x8c\xcd\xf7mL\x8c\xf6\x7fc\xc1A!\xa4\xb1hd\xb8\xa3\xb1V\xeb\xf0\xb5\xb9\xce\xf2wc\x03=\xa2\xa6\xe6\x86\x96\xe1\xe6\x16\x96\xd5s_\xd2:\x8a\xff\xd2\x97\x95\xa2sL\x1f\xcb\xed\xa3g>\x91\x9c\x971\xdc*4-%\xf0\x13&\xcb\xf50\x11\x08Ub\x16\x83)E\xdc\xac\x8d\x86*\x19b\xb8\xd4\xd6\x0651\x97\xc4\xef\x84\xe7 \xe4+\xe8+;\x98\x8e<\x85\xab\x86\x92`!q\xb0f\xcd\x1a\xceM\xf9*\xc6,[/GK+\x13\x07{\x17\x04\xc3\x12\x97\xae\x8d,>C\xe2L\x13\x81\x8c\x1b\x01\xf3\x9dR%%c\x8c\xb3\xc2\xc6\xc3~\x91\x92'EG\x8f\x86A\x89\xaeG\xba\xc2=\x87h\xc2\x12\xe4\x9f\x94\x8e\xb08\x1c:ID\xecN\x02)\xc5W\xcc\xbb\xe1AF)ucw'qh\xcdX\x1f\xc3\xa8\xb2L@a\x84\xbe~\x8c6\xccPc2L\xe3\"\xa5A\x08\x852b\xec\xc8U \x99\xc0&\x8c\x13\x80\x8b\xff\x0c\xaf9\xf6A#\xd3QLO\x07\x04\xac:\x81E\x14\x809k\xa7\xa9\x92\x91\xc6\x1af\xdeKF\x95b93t\xbaL$c\x89\xcb\xacp\xffLz\xff\xa0\xbf5\xd4d\x12\xdap\x08\xa2\xfe\xf0\xf9\xdb\xb0>$`\x9c.\xf7\xef\xab\x1d~X\xf3\x07=\xb6\x17\x00\xa1\xe3?\x84\xa0\x18N\x90\x14\xcd\xb0\x1c/\x10\x8a\xc4\x12\xa9L\xaeP\xaa\xd4\x1a\xadNo0\x9a\xcc\x16\xab\xcd\xeep\xba\xdc\x1e\xaf\xcf\x0f \x82b8AR4\xc3r\xbc J\xb2\xa2j\xbf}\xf8\x9f\xd0\x0d\xd3\xb2\x1d\xd7\xf3\x830\x8a\x934\xcb\x8b\xb2\xaa\x9b\xb6\xeb\x87q\x9a\x97u\xdb\x8f\xf3\xba\x9f\xf7\xfbA\x08FP\x0c'H\x8afX\x8e\x17DIVTM7L\xcbv\\\xcf\x0f\xc2(N\xd2,/\xca\xaan\xda\xae\x1f\xc6i^\xd6m?\xce\xeb~\xde\xef\xf7\x87\x11\x14\xc3 \x92\xa2\x19\x96\xe3\x05Q\x92\x15U\xd3\x0d\xd3\xb2\x1d\xd7\xf3\x830\x8a\x934\xcb\x8b\xb2\xaa\x9b\xb6\xeb\x87q\x9a\x97u\xdb\x8f\xf3\xba\x9f\xf7\xf7\xff\x00b$\x9c\xabtV&g\xae\xcf\x96\xed\x7f\xc1r>\xbf<\xd9y\xf3\xe5?\x8f\x92\x19\x93\x98\x8d\xfbf\xfe{\x9d\xe7\xb4\xb7\x9d\xa3\x00\x89\xb5\x11\x9b\x9e%\xee\xb5\xef\xc7\xcc~\xdfZ\xfb\xfea\xc5zW\xbe\xbf\xbc\xf7\xdc\xf5\xdd2\x9f\x1d\xb5sv\x90\xef\xc0\xcc\xf2\x1d\x90\x99\xd9eW\x1d\xab\x00\x89\xb5\x91\xe0\x02\x00\x00\x00\x00@DDDD$\"\"\"\"bffff\xd6}\x03\x90X\x1b \x0e\xd3O\x01\x840\xc6\x18cDDDDD\xac\xb5\xd6Z\x9b6W\xf208B\xd6\xe7\x10I\x1b\xa5\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x90\x13\x00\x00\x00\x83.\x01H\xac\x8d\x04W\x08\x00\x00\x00\x00\x00\x00\n\xa2\xdf\x88\x13\xc7\xd09\xa0\x00\x89u\x84*\xa5\x94R*J^}\x80\x1e\x14\xc4:M\x94\xb4$I\x92$I\xd2\x0eF\x82\x8b\x99\x99\x99\x99y\xd1\x9f\x9e\xfb\xde\xf3\xc0_W\xcd\xc6\xfd\xc1q\xb9\x8f\x97\x84 \xf6\x97j\xfb}%L\x05\x11R\x0f \x17\x966\xe3\x83\x1f\xcc\xfd\xf9\xdd\x03\x9c\xa9\xb5\x8f[\x9dR\xc0\xa5\xd4w:\xa9\xa6^\xd7\x81\xacZ\xf8\xd6X\x0d\x17+;\x9c\x08,\xf8>p\xcd\x86\xcdL\xe9=\xda\xd6\x05Wj;\xe0{s\x1d\xf8HtA#;\x0fR\x8c\xf6\x910\xa1\x87\x90\xf1Gs\x1a \x12\x89\x17\x98\x98SK\xc25\x93\x7f^27\xc6tMi\xeb\xe1\x98\xa6^n\xbd\xba\xa8\x07\xf3\x7ff\xacH\x1b\xb5FY\xe4X\xa7,\xec\xf4\x15S\xd1\x08\xfa%\xf7\xdaq2V\x9a\xeb\xe8\x87\x83\xd7\xeb\x80\x9f\xdb\x08!9\x8d0\xf3\x94!qd\x00\x98\xabw\xe2-\xe6\x8bUm\xa0l\xe8\x84O\x93\xadp\xd4\xdc\x809e\xff\xf6\xf3\x12\x7f9\xbb+\xaa\xb4\xd5\xe9uD\xe5@`K\x1e\x05\xef\xf9\x1e\xb5p\xea\xbbK\xb5\x9d\xdc\xd9\xf26X|!\x0b\xe2\xca[a\xf2\x894\xc1\xc8\x00x\xaa\xb1\xaf\xc7d\xe8\xd8\xa6n\x88.k\x85\x1d\xa6\x1a\x0ck\x8ds\x8c\x84~\x97\x84)gxah\xdf=\x9fU\xb1N]\xe4$\xc7\xa8\x8d\xb8c\x14\x112\x9f\x05\x88\xa8P\x1f>}\x80\x08\xd5\xaf\x0f\x9f%U\xf8\xff\xee\xac)M\x9c\xdf\xe4\x0c6\xd6\xb80\xfc\xae\xb7\xe2\xae\xe7\x13\x01\xfd\xca/s@\xc2l^\x1c\x9b \xba\x96>.\xaa\xf4\x06\x17 c\x0c\x84g\xf4\x0b\x903I\x895\x0eWy\xef\x00\xb7'\xab\x96Cr%\x10\x11\xfd\xaf\x8f\x12\xcc\xb7\x8b\xbb\xfa!\xad\xb9\x97\x9b}:\xbd\x02o\xe1\xe5\xae\x12\xafp\xb5c\xaf\xa9\xc69Y\xa2\xed'\x02\xddRr\x14\xc1\x89\xd8qOW\x01\"z\xe5\xea\xab\xd6\xae\x05Q\xc6{D\xa2}\x8b\x88U\xd3\x82\x94E\xce\x03\xa8\x05\xd2|;P\x18\xa1eIy-\xe3\xa5l\x8b\xbe\xf5&lO\xaf\x02~\x9e\x0d\x84\xf8\xfd)\xd9zkE\xb2\xf5\x0e\xdb|\x13.\xd5tp>2\xad\xf9\xd9k_\xc4.Z\xae\xb0\xf6b\xcd\x0fyd\x97\xa7\x9b\xb9\x8e*\xe5\xd6\xa8\x8aP\x8e\x9b \x1e\x95q9\x02\x9c\x1c\xa1\xeb\xd2\xa1\xa3\xc8\xd2V\xf8\x82.\xf5\x10m\xf6\x99\x0cXz1\x1b\xaa\xfa\x06\x81i\xee\x05\x7f\x11m\x1f\xd9\x91T\xfb\x07w\xa9\xcc\x8d\xcdRL\x91\xebBH\x18\x96\x1c/u\xe6+\x91\x06\x17\xdd\xd4\x0b\x07\xb1\xb4\x90\xd1;\x1e\xb0_\x15Sq\xc3\xe5v&\xce\xb7s\x18\xa58rL\x15\xb2\xd5P\xe9<^\xd4\x0c \xd5\x0e\xee\xbdxK#\x11\xb3\xf31\xcf\xeb_>\xa9/;`\xaav!\xc8\x12wD\x89\x98^Ax\xd7&=\xe6\xff\xd5\xd3\x86r\x89\xd5v\xf7\\e;\xa4\xd6w\xc2d\x19\xdb>*L]\xbdAS\xe2\xb2v\x0dp\x85\xdd\x83\xae1B\xaa\xd8\xa3\xefB\xea\x9f~rm\xe6o\xce\x98\xb4~Rz\x13\x91\x19'\x11^\xb9\x8f\xaf\x98\xf9\xeb\xac\x18c\xceKQF%\x11h\xffMv)\xfc\xefWT\xeaL\xec\xa2E\x8a\x9a\x0e\xd7fm\xd9\xdd\xef\xb2\xeb\xef\xf8\x89\x994\x8c\xee\xf4a\x00[u\x15\x83\xbb3k=a\x0d\xb3\xda@\xdbr\xd3u\xb9\xb2\xdd\xb9^S\xd2\xfd\x97k\x85\xed\x0b\xcef\xf6J\xf3l\xce\xb7\xcfe\xbb\xbe`\xcb`\x1dU\x02\xafF\x1a\xcc\x84\xa7\x9d2|Ht\xf4\x18o\x19\xb570\xde\x0c\xeb\xb5\xe5\xf0N\\\xd6m\"a\xd7>\x10\xa2\x9d\xa1V\xbb\x0b\xc9\x0c\x87-\x0d\x10Q\xd4\x04\x01\x11\xbaC\xc3{\xd1I\xcb1\xab\x1d6\xa8\x8b\xe1X\xae\x0d\x8e\xe6\xd9\xee\x92\x10\xe5\x7f\x88\x18\xeb\x9b\x8fz/f\x97\xcc\x8f\xc9H\xcfv\x0c\xb8R\x1a\xba`\x13S\n\x11\xc5\xcd.$\xdf\xa6\x87\xb5\xae\x9c\xbbF]|#\xbe\xba\x03\xb6P\xa5\x90\xd2\xd8\x0d\xef\xc7[\n=\x9a\x81B\x94\xd7y\x0bDx\xc1\xab\xf5\x0e\x08FA\xfa\xf5)\x83zx\xaf\x97\x80\n\xdec\xac\x1a0\xdd\xe8\x05w\xcbE\xe1\xb0\x12\xa1\xe2\xa5G\xd3\xf4\xc6\xd9\x8c\x04\xd6\xde\x03!x\x83\x9d\x18\xad\xa8\x92f\x98~*\xed\x10\x91\x9a\xfd=i@\xa0G\xccu\x03[t\xe5`h\xe9\x83M\xf8\xbb6\xb5\xa0\xf3~\xa7\xd4|\x89\xea\xe7\xdc\x87\xfb>O-\x04Y\xbe\xdd9!\xc2\xb0\xf3\xe1\xe9\xee\xc6\x10\xa6Z1N\xc6\xec\x1e\xcd\xa7\xb6\x10\x99\xf6\xe5\xff\x19HD\xcc\xf6\xfd\x19\xf5N\xb5\xad\x13(\xd4\xf4\x0dti\x9f\x97H\x1b9Q\xce\xeaf\x9f\xcd4#\x11\x7fX\x18\x9bi\xcfh\xeb\x03\x15\xde>\xa5\xa1\x1b~\x1bi\xd0\xdcO\x01v@\xa1&\xe4\xd3\xc4\xdc\xa1\x95\x97\xf3\xdc\xc0/x:\xe3/\x1a/e:\xb6\xd3\xd5\xbeRv% C@\n\x94W1se\x92\x8c\xb5\x8f\x17R6\"b\x92\x02/Y\xba\x8a{nA\x8e\xe3'\x90\x14\xda]\xde\x12:\xd9_@\xd9\x90#\x83A\xa9\x85\x03\x1e\xc6?j\xee\x11\x8e\x17=\xf6\xc9\xf5\x1fJ\xcd\xc6\xc3\xab\xe5H\x16,5\xd7\xec\xa8\xf4&\"\xc1V\xec\xb6}x\x9b=\xec8_)\xfdUr}\x17\xe7\x0ey\x88\xa5\x1eN\x94\xb6\xc0\xe2\xb8\xebz\xf7\xcd\x02\xc4\xda\xcb\xc9u\x1dC\xa3\xc2\xe9+\xe4\xab\xe81\xcf\xac?\x13N\xa6ED\x146A`\x82\x19\xbe\xd6\x96\x81\xb9\xfdG\xf0\x12iNy\xba\xe1\x83\xa1\xc2O@\x85\x9e(jq!\x08\x08L\xca\x05\xda\xde\x0d\xa8\x11\nw\xa4F\x0b4q\x17j\x1c\x9c\xd0j\xe7\xc6\x89\x99ZO+\xe7%\xcf\xf1\x97\xb2\xe9\x01r\xd4\x03)\x1dH\x08\x8c\x1a\x11\xc0\xe4\x086>\xc4\xd2\x08{3\xaa\xc1-.\xd6\xae[\xdcNC\xe5\xd0\xd7\xfa\xf2\xeeO\x14y\xf5\xbf\x8f77\x07\x1b\xab\x87\xd6*\x8b\\\xdbq\xfaq\x0f\xaa\x1f'\xe5:\x08_\xf5!\x8a\xda\x94%\xe7\xb3\n\xab\xfa\xee@vG?\xf7%S\xca\xedH\xab\xbc\xb3\xeaj\xc1\xe0\x11$p\x18\xaa\xe4\xdf\xd5\xc5\xbd\xde\x12\xe6\x93\x91C\xc0g>\xfcZW\xe1\xca\xc3\xdc\x9a\xdb\x7f\x82\x8f\x13s\xb81\x02\xea\xee\xa23&\x0e\xc1@e\xdf]\xee\xcdhc\x0f W/#\xe1\xaa\xb7\xb1\xce\xffL\xf6ctx{\xef\xed=\xebSAE@\x10EQ:\xd2{ -$$$\xa4\xf7d\xfb\xee|\xe7\xcc.\x1a @v\x130\x819\xbf\xdf%d2\xf7\xce\x9d\x99;\xe7\x7f\xce\xb9\xa7\x08xc1\xfeB\xeaD\xedJ\x88\x18\x06Q\x8c\xa1\xff\xdb\xa1\x10\x0e\xd2\xcfE\xd4\xbe\xa6V\x85\xd6N6'\x82\x83\xf5\x983\xa97\xa2\x8d\x1aT\xd9]\x90I\xa6s\x91\x92y\x9d\xef/\xc4?\x17m\x03\xb4j\xd0\xb7,?\x94s\x9cT\x7f\xd9\x95\x05\xe1Ux<\xf7\xa2\xc6\x06\x02\x0f@\xad\xf2\xfet\xb8\xba@\xab\x9a\x0c\xa3\xee\xdft\xe0>\x02\x97/\xe5\xd7$\x93L2\xc9$\x03\x88\x97\x14\xc2O\xb0:\xc6\xb1t\xde\xa7m\x0cF\xa7F\"\xce\xa8\x83\xd3#b\x7fE-\xe6\xec-@IQe\x14\x82\x0d_@\xa9\x88&`yU~U2\xc9$\x93L\xe7;\x80\x08\xc2[\xb0\x10x\x10X<6\xba+n\xe9\x99\x8c\x04\x9d\xf2\x8f?{\xa8\xdd\xda= \x0f/\xdf\x8bE;r\x81`\xfd+\x048\x9b\xe8\xf0\n\xf9u\xc9$\x93L2\x9d\xbf\x00\xd2\x0e\x1e\xcf\xddp\xb8\xf0/\x02\x8f\x7f\x0fh\x83JB\x8c\xf7v\x17`\xfe\x9e|\x98u\x1a\\\xdf3\x05\xa3\xe3\x83\xf1\xbf\x89=1\xd5\xed\xc6\xca\xddy@h\xd0\xe7\xa4\x85\xa4\xc8\xafK&\x99d\x92\xa9\xe5\x90\xe2,_\xef\xef\xa8\xb1\xa2g\xbb\x18\xdc\xdb;\x05\x15.\xe0!\xd24\xee\x9c\xbf\x19%\x16\x076\x1d\xa9\xc0\x84Yk\xf0\xf9\xde\"D\x10\xb4=:\xa8=\xb4!F\xc0\xeeL\xa6\xbe#\xe5\xd7%\x93L2\xc9t\xbe\x02\x88\xe4m%`rj4\xa2\xd4\x02>\xdf\x9d\x8f\x0f\xd7\x1c\xc0;\x17\xf7\xc4\xaak\x06a\xcb\xcd\xc3qQZ\x14n\\\xb4\x1d\x9b\xca\xac\xe8\x1bm\xc2\xa0\x94H\xc0\xea\xe0\xde\xa3\xe5\xd7%\x93L2\xc9t~\x02\x88\x16\x1eO<\x0c\x1a\xa4\x04\xeb\xa4\x03\x9fg\xe4\xa1Wz4n\xef\x16\x8f\xadE\xb5\xc8\xad\xb2\xe1\x8d\xd1\xdd\xa0\xd3(1{\xdf\x11\x98\x95@G\xb3\xde\xeb\x9d\x05\xa44\xe1\xda4\x08\xd2\xa9\xa5\xe2\xaf\xf4<\x93I&\x99d\x92\x01$@\xfd\x83\xf7\xc8 \x0cD\xdf\x01\xb5B\x80\xc3\xe5\x91\xfe\xafQ\n\x92\x17V~\x8d\x15\x0e\xb7\x08\xad\xd2;5\xb7\xf8G\x7fO\x00\xd7\x9cN\xedG\xb8=\xd9t\xa1\xfd\xd4\x0e\xc2#f\xd31v\x0d\x1e*\xbf~\x99d\x92I\xa6\xd6\x01 \x0e(\x14\x87aq \xb3\xd2\"\x1d\xb8\xb5[2v\x1d*\xc1\x13k\x0e\">\xc4\x88\xb6\xa4m\xcc\xdc\x9b\x8f \xb5\x12\x97\xa6\xc7\xa2\xcc\x05\xec\xae\xa8c\xcf->=\xd3\x8fk\x05A!,\x84\xd3\xfd-*\xea\xc6\xd3\xcfh\x8d^\x03%\x077Y\x1d \xa8\xb4\xfc\x8d\x80d%\x8d\xfb\xb6\xbc\x04d\x92I&\x99\x02\xa3\xb3k\xce\x11\xf0\x1b1\xf6\x9es\x0e\x14\xe2\xb6\xee\xc9\xb8\xbcC4\xb2.\xec\x8cg\x96\xee\xc6\x97\xdbs\xf1\xf2\xd8n\xb8\xac],F%E\xa0C\x98\x0e\xbf\x1e\xae\xc4\xba\xec\x12\xc0\xa0\x85\xa4I4\x8e\xd4t\x8d\x0d\xa8\xb1ud\xe0\xb9\xbc_\x1a\xc6\xa4F!\xc6\xa8%EDDn\xb5\x15\xf3\xf6\x17`IF>\xa0U\xdd\x05\xbd&\x96\xc0d\x9a\xbc\x14d\x92I&\x99Z2\x80\x00o!X\xff\xc0\xde\x83\xc5xf\xdd\x01\xbc}A{<:0\x15\xdd\xa3C\xf0#1\xf4\x12\xab\x03\xfdb\xccl\xe7\xc2\xf6\xd2:<\xb9j/\\u6\xc0l\\\x07Q\\\xdf8\x90\x12f\xa1\xd6\xd6Q\xa5U\xe1\xbdq\xddqe\xa7X\x18\xe9p\x0d5\x8e61 \x14\x97w\x8c\xc3\xeb\xb1\xa1xny\x06\xa7\x1a\x99J@r\x0f]\xf2My9\xc8$\x93L2\xb5\\\x00\xc9#\x06\xff\x18i\x14O\x7f\xb8z\x1f\x94\xa4\x11\xdc\xd5;\x15\xd3S\xc3q\x19\xb5j\x0fPbs!R\xa7\xc2\xb3\xeb\x0ez\xb6l\xcfU >\x8c7\xd1\xefl\xe4\xf8\xbd\xe1rO\xe5 \xc5\x17Fw\xc5\xcd\x04\x1e\x87\xadn<\xb1)\x0b\xf3\xf7\x1e\x81Z\xa9\xc4\x8d=\x93qC\xb7D<=(\x15Ev'>^\xb9\x07\xd0\xa8^\xa3\xbe\x1fS\xab\x95\x97\x84L2\xc9$S\xcb\x04\x10\x06\x83g\xa0S\xa5C\xc05\xef\x91\x86\xb1\xe8P F\xb5\x89D|\x90\x1e\xd56'r*\xeb\xc4\x07\x07\xa6\xe3\xae\x1e\x89\xc27\x19ybM\x95E\x80I\x7f-\xf5\xdb\xd2\x88\xd1o#\xed\x03\x83:'\xe0\xfaN\xf1(\xb4\x8b\xb8\xf9\x97\x1d\xf8\x85\xb4\x9b+z$K\xe3\xdf\xff\xe3V\xe4\xd29/\x0eM\xc7=\xbd\xda\xe0\xc7\x83E(,\xa9V\x10\xa8]\xee\x03\x11\x99d\x92I&\x99Z$\x800y\xc4kI\xea\xdf \xb5\xf1\xf1\xec\xc2*\xd3\x7fs\xcbh&J>\x0eT\xd5 5\x80\xfb\x97\x89\xdd\x15\xcf\x8e\xe8$\xfc\xfd\xbb\x0d \xad\xe2\xefP*\x96Q\xcf\xdf\xe1\xb5D\x15\x9e\x04\x9cz\xf1\xbe\xc7\xd8\xe4\x08\x84\xd1\x9d=\xb1%\x07\xbf\xec\xca\xc3\x82k\x86`|R\xa8\xd4\xf1\x99\xc4p<\xb6t7F\xd0\xcf\x8bS\xc20\")\x02\xb3\xf2\xcay\x9fe\xa8\x1f\x002\x9cZWj\x9cA8\x9f\xda6jkO2'(\x94\n\xc91\xc0-\x8a\xf2\x8a\x93\xe9\x9c%\xb5B\x81j\x87K\xca@\x0d\xbdF~ 2\x80\x9cQM\xe4\x15\xfa\xf7\x0b\x18\xb5S\xa9]@\xbf\x87\x11\xf3\xd7\xc2\xa8\x19\xb2x\xeb!\xc5\x87mc\xbd9\xd4^;O\xd7,\x83v7x\xe3\x82\x92\x1bX\xc3u\xd4\xaa}-\x8b\xdar\xdf\xf1\x9e\xd4\xde\xf1\xe3:?P{Af\x11\x8d\xa7\n\xbb\x13}\xa3C\xa0\n\xd1\xc3\xc5\x9e\x96aA\xf4M\xfa\xed}\xaf\xf0\xad\xef\xa8\xb39u\xb0y\x1d(\xa0\xb6\x8bZ\x06\xb5=g\xf0zm|\xdf\xfd_AVj\xa5>\xc1\xf7\x08\xbc\xde\xade\xad\x0f@\xbcTD\xed=\xa9 >\xae\xaaQ\xcd\x82\xc51\xe3\xa1\xe5\x19\xc2\xa5\xa9\x11\x9e\x8fGuR\xb4\xcf.\x86XP\x01el\xa8\x8eA\xc0Z^\xdb\x1bjeo\x18u3h\x81\xde\x0e\xde\xfb`\xe6\xeet\xc5\xa3\xb4\x06\xb9\xd56\xd4\xb8\xd9M8 qA:X\xe9\xff!\x04,Q\x04,G,\x0e\x94\xd6\xda\x90h\xf2\x063\xda\xdd\x1e\xf8\x02S\x9c\xa7^\xd6\xc2l\x02\xb0i\xa8\xb5C\x17aB\xef\xd4H\x98i\xcc2\xab\x03\xeb\x0b*!\x96\xd7u\x87I?\x8bPj\x08iRw\xfd\xd1O\xa3\x82\xa3\xac\x06\xef\xef\xc8\xc5\x17\xf1\xdd\x1b*\x91\xd0\x9b\xda ?\x9f[\xdey\xc6\x9bB\xa8]Em*\xb5\x81\xd4t\x8d\xec\xb7\xd3\x076G\x81\xc7\x9f\xe7\\\x06\x99\xfc\xfb\x98IH\xea\x12i\xc2wS\xfbc\xda7k\xe1\xaa\xa8\x05B\x8d^\xcbB\xe3\x89\xbf\x90)-\xe0v\xf6Q[Cm\xa1\xaf9\x9aq\xec\x8e\xd4\x1el!\xaf\xad\xce\x07\x96\x1b\xa8\xfd\x02\xaf\xb7\xab\xfb\xaf\x06\x10\x96\x1eb\xa9q\xc0\x07\x07\xee5\xbe\x8a\x12\x9b\xb7B\x0c\x93*\xf3+\x0c7/\xdd\xe3\xfe~\\\x17<>\xac\x03\x0e\x16T\x8a\x7f\xeb\x91\xe2\xd1\xaa\x14\xc2\x86\xc2J\xc5\xb3\xab\xf7\xa1\xa6\xd6\xda\x0d\x06\xed\xef\xa8\xb6\xb2\x99\x0bZZ\xac\xa3\x06\xb4\xf5\x0c\x8a\x0f\x15\x8eX\x9c\xc2\x94\xb61\xd8E\x80\xf2\xc4\x8a\xbd\xb8\xa8],*IB\xbar\xdeF\xc4\x98\xf4\xb8\xaac\x1c\xca\x0826\x95\xd4x\x97\xacWZ=\x19x|\x8b:\xfb4\xfe\x10n\x1d\xda\x01Wt\x8eG\x07\x92\xae\x824\x02i\x14\x1e\xe9\x1a\x1fo\xcb\xc57[\xe9V=\xea;I\x0b\xb2\x93\xa6t\xdfQ\x13\x16\x08\xc4~\xcb*\xc6\xd6\xa2*$\x93tVn=\x06\xabJ\x03x\xbe\xc5\xe7\x11_b\x01\xe1)j\x91\x01\n(\xf5%\xaf@\xfb\xca\xd4\x18fB\xd2QV\x95\x05\xc3\xe2\xcd\xf8n\xc6@L\xfdz-\xdc\x1c\xc7\xe5?\x88\x1c\xa6\x96\xf8\x17\xdfN{_\xbb\xde'\xb0\xb1e\xe1C\xf8\x17\x8fv2jI\xce:\xec\xa4\xda\xc7\xd7\xee\xa0\x96C\xedSj\xec\x95Z\xd9\x18u\xb19\x89'\xb0\x8a\x98y\x0el\xcem\xb0\xbb\xf6\xd3\xc2\xc9\"\x06\xfd\x19\x1d\x1f\xd0\xc81\xd86t\x0d\x81\x08\xe6o9\xa4\xfc`w\x81\xe7\x9f=\x93\xf1\xf4\x98\xaeB\x9a\xd9\xa0\x8c1h\x15\xf7\xf7L\xc4g\x13{\x91\xce@@YV\x8bN\x89\xe1xpT\x17\xfc~\xf5\x10,\x9c\xda[h\x1bj\x14Km\x0e(I\xab)&\x8dc\xee\xfe\x02\\5w\x03\xc6\xcdZCZ\x83\x06_O\xec\x89\xb4 5\x16\x1f.\xc3\x9a\x03\x85\xf4\x08\xa58\x93oO2\x9f\xc9p\xb8\xa6\x834\x95\xa7\xc7t\xc3;#;`P\x8c \x07HE\xffao\x11\xf2H\xea\x1a\x15\x1f\x82O&t\xc5]CI\xb0`pp{\xee=\xe6~I\x0bA\xb5\x05+\x0e\x97#\\'\xdb\x86\xfd\xa0\xaf}\x1ajd\x80\xfd=\xf2#<\xbb\xa4\xa0on\x07 e\xc3b D.\x1f\x08%\xaf\xf7\nKk\xafN\x98@\xed~j\xfb\xc1\xa1\x08@\xc49\xfc\n\xd9,\xfc\xa4\xef^\xaf9[\x1aH;IJ\xb78\xbaK\x89\x0fM:\xa9\x8c+\x9b\x87\xecU\x96Db\xa8\xd7J\x9eTJ\xc5\xf3$\x91?|\xda\xd1D\xcc\x81N\xf5=jmS\xde\xdezH\x18\x9d\x12\x892\x02\x02\xbb\xc7#)\x0bk \x1c\xbaE\x98\xf0\x001\xf4\x18\x95\x02\xb7\xf7I\xe6dW\x9e\xadeV\xcfC\xab2\xc5Y\xdbr\x14i\xb1!\xf8\xfc\xe2^\xb8\xbc}\x14\xbaF\x05\xe3\xf7\x9c\x12hUJ\x8c!\xad$E\xa7\xc0\xb6J;\x1e]\xba\x9b+ \x02z\xc3\x1a\x9a\xd7\xe6\x93(\xd4/p\xd5\xc4\xa9\x03\xda\xe2\xc1>I(w\x88xi]&\xde\xdd\x94\x8d\x10\x8d\x02\xb5.\x0f\x1e\x1a\xdc\x0ew\xf7J\xc13C\xd3\xb1\xa3\xac\x06+w\x92\x00e6\xfc\x9bz\x8f\xa9\xa7Y!\xa7\xc6\nA\xdeGo,\xb1*=^~\x0c\xad\x8b\x84\xa3 BZ\xf9\xf083f\xcf\x18\x80i_\xaf\x83\x87A$\xd4\xe0\xaf&\xd2\x12o\xefnx\xcd\xa9,$~v\x0e\xbfJ\x16\xda>\xa7\xd6\xd7w\xcfg\x0c@\x12i\xc5lA\xa5\xc5(\x18\xb4\xb8\xf3\x82\x8e\x18L\x1aA,I\xf5\x0c \xfb\xca\xeb\xf0MF>~\xdfw\x84=3\xfe\x0f:\xb5\x99\x16\xd1\x1d\x8d0e}M:\xf1\x14%\xadF'\x01\x87\x07G-M\x90\xbc\x99\nI\xf2\x7fi@\x1b\xb1\x8a\xce\xfc:\xa3\xc03w_\xa1\xf0[n\xa9\xcaF\xd7#\xa0Bny-\xae\xa4\xeb\xffk`\xba\x14\x9cxg\x8fDi\x8c|\x8b\x0b_\xee+\xc1\xa3+\xf6\"\xa7\xa0\xd2\xab^\x8b\xe2\x95'Uc\x1d\xee\x0eJ\xd2\x86\xae\xeb\x14\x0f\xd6\x1d^\xdc\x98\x85\xd7\x96g\xe0\xf1\xd1]1,%\x02s\xe8\xde\x1e\xfbi;\xdd\x96\x12\xf7\xd35n\xe8\x92\x80\x95\xfb\x0bY\x0b\x19E\xf3\x08\x85w\x83N\xdaL?DZH\xa5\xc3)\xa9\xfa.\x8f\x8c$\xa7\xa0\x17e\xf0h\xdd\\V\xe9\x03\x91\x0b\xe2B%\x10\x99\xfe\xf5Z\x02\x91\x80\xccY-\x91B}f\x9e\xe1>\x13\xd7\xb9L\xbc\x9f\xab\xf4Y\x97\xce\x00\x80py\xda*\x8b1*<\x08\x9f\x92\xc4?6\xc9,]\xad\x98=\xf9\xd4$\x82\xc7\x87\xe0\xb2\xf6\xb1xn\xed\x01\xbc\xb5j/\x9f\x7f;\xd4\xca\xa5\xa4e|w\x9a\x91\xeb\x98\xe9\xf2\xc6\x9c\x93\x80\xc0\xacU\xa1\xd8b\xa7\xc5)H\xdeTI&=\x9e\xdc\x94\xe3\x99\xb5\xe5\x10\xf6\x97T\xab%M\xc2\xa8\x03\xc2\x8c\x1b!\x08\xdbh\x91\xde\xbczo\x01V\xe7U`hr\x04\x12\x83u\x12\xd3\xdeO\x8bx\xdb\xa1Ri\xcf\x84\x16\xb3\x95\xc0c\xbc\xcf\xee\xd7\x10\xf5\x82\xdd\x894\xea\xdf>,\x08\xbb\xab\xecxcs\x16\xee\xb8\xa0\x13\x9e\x1a\x98\x8a=\xd5\x0e\xbc7\xb2#\xf2\xacN<\xb7\xfe \xae\xee\x18\x8f\x8et^\x18=\x8b\xf2\xb2\x1a\x81\x00\x937\xcb\xd6\x1c\x1d\xccN\xda\x8a\x8c\x1b\xa7%v\x8f~P~\x0c\xe7\x889\x8b@dD|(\xbe\xbd|\xa0\x04\"\"{g\x99\x8d\x81xg\xb5D\xba\x8eZgj\x17\xe2\xdc\x0eB\xe6}H\x0e\xa1\xf8\xea\x84w\xdc\xc4\x81/\x83\xcd\xd9E\xa5S\xe3\xbd =1\x9e\xc0#\xa3\xd2\x8e\xab\x17\xedD\xefO\x96c\xf0\xa7\xab\xf1\xfe\xae\x02\x18\xd4\x02^\x1a\xde\x0e\x97\xf4N\x05\x9b\x83HFy\xbf\x11c/\x83Ac-.\xaa\xc2\xab$\xf53\xdf\xd5\x13\xa0\xb41\x1b\xd0/\xd2\x88\x05\x99Ex\xea\x87\xad\xca\xfd\xf9\xe5J\x18\xb5V\x84\x9bfA\xab\x1aG\xa7\xf5#P\xb8\x85p\xe6B\x02\x88\xdfy\x13{UF\x1ef\xae\xcd\xc4\xb7\xc4\xe4\xb7\x1d,f\x97[ \xc40\x97\xfe\xd6\x05\x7f\xbay6D&\xeeo\xd6(\xa58\x0e\xd6z<\x0e7\xd2B\xf4\xc8\xb1\xb8QL\xbfs\xc2\xc7! \xa1\xb08\xe9w\xab\x0bF:/D\xad<*e\x1d\xe3-\xc4\x9a\x87 \xc8\x8c\xe54$\xa7\x949\xd7\xccY%5\xb8\x90Ad\xc6@\x08\xbc\x1fXI\x9a\x88Bq\xae\xdc&\x9bx\xd8\x83)\xe8\x1c\x7f\x9d\x9f5t\x8fM}\x8b7\x83\x98\xe8\xd4n\xc9\x98\x98\x1cJ\x12\xb9\x1d3\x16l\xc6\xec\x8c|LJ\x8fA3\xd4\xcf\xc7\x00\x90(\x0e!mg\x08\x81\xc5\xbf`6\xbc)5\x93\xee.(\x15]\xe9o\xec\x12\x9au\x9a9\x14\xb19,\xaf\xce\x81R\xd22\xc2\x08(\xa7uN@(\xcd)D\xabD\xd7(\x13\x0ctc\x1fm\xcbAr\xb0\x1e\xa9\xc1\x1a\xe9\xbc\"\xde\x07RI\x8f\xf6\x18/\x06\x87\x9b5\x10\x112\x86\x9c\x94\xb8^\xcb\x88f\x1c/M~\xa4-\x04DH\x13\x19I\x82\xd67\x97\x0f8\x17A\xa4\xa3OB?\x97\x89Y\xf8\x03\xcdi\xc2\xd2\x107\xec\xc1L|dB\x98\xb4?\xf0\x95>\xfc{\x13^\xee:\x020\xcb\x91\xe2j\xc3\xda#\x15\x18\xdf&\x12\xcf\x0dm/\xb9\xe1\xde\xb34\x03A\xa4Q,\xcf)Cfy\x0d\xe6O\x1f\x00#}\x0f\x1b\x8b*a)\xaba\xd7]\xce\xdf\xb8\xbb\xde\xbd \x88>\x1c\x8e\xd6\x95]\x83NJ\x97\x06\xd0\x87\xfd\xf3\xffGm+5\x9b\x8fg\xf1rL\xa1\x96+?\xd2\x96\xa5\x89\x8c\"^\xf15\x81\xc8\xe5\xdf\xac\xf5\x82\x88\xd9\x80s\xc4\xae\xcb\xf1Fs\x03\\\xc3\xad\x858\xf8\xf1\x89\xe6\x02\x103\xdc\xa2 \x1a5BI2\xe7@\x8f\xa5\xb9e\x98\xd8!\x0e\xed\x08'\x8d\xe47b\xb4\x83\xa3\x83\x10\xc7\xa9\xd9\xbd\x0b\xe6\xf4\xae\x99\x82p;\xa7E\x18\x9d\x12\xe1I6\xa8\x15\xf7\x1e(\xf4\xc0\xe1R \xd2\xc4\x11\x94\xcf\xd38\xef\x9c\xe1\x07VD\x1a\xcc\nT[\xc7=\xbf>\x13\xc3i\xf1+\xb9\x08\x16i\x12\xec\xef\x9eYP\x85\xa4P\x03\xbe\xbfl\x80\x94*e_\xad\x13\xefo\xce\xf6~2\na&D\xd8\xff\x18\xc9\xe3\x91bGB\xe8y\xb1)L\xa6\x06i\xa0\xdf\xef\xc7[\x18\xec\x80\xfc\xe8Z\x07\x88\xec$Md4 \x91__6\x103\xbe]G b9\x97@\xe4\x12x\xf7\x0b\xde?\x03c\xb35c\x96\x9f}\xe8\xc1Jr<\xbb!'\xa1\xe9\xb15\x1c\xdf\xc7\xf1\"\x9b\x9a\x03@,\xb4*\x9cp{\xb4\xecm\xc5\x03\xb1\xe7\xd5\x9e\xd2\x1a/\xba\x10x\xf0\x9e\xc5v\xd2H,v'RB\xf4R\xa8\xb7\x857\xaf\xbd6\x9c\xd3sQ\xb7g\x02\x9b\xb0\xc6$G\n\xe5NQ\\]P\xa9\x807M\xc9?q\xf2\xb8\x8d\xe6ehnO\x1f\xb6;\xb5\x0b\x0b\x12\xf5j\xa5\xc2E\x93o\xcb\xa01\xa5\x8f\x94\xf7'\xca\xa0\x92\"qxC\xfd\xd6E\xdb\x91u\xb8\xdc\xbbI(\x8a\xc7 5\xab\xeb\xa9!\x068EY\xff8\x05\xb5\xf5\xf3\xfc\x07d\xf0h] \xc2\x9b\x80;I\x13\x19C 2sz\x7f\\5{=<\xbc/\xca\x16\x85\xa6\x81\x08\x7fo\xd9\xa79G\xe7\xd3N9\xf0+\x9eZ\x1c\xbc\x9b\xe0]\x80f\xb3,s\xdc\x12\xa7bi\xee`_\x0ef\xbc\xa3 \xfd\xd5>\xe6\xcfIc\xefi\xc28\x83\x9b\x0b@j\xa1R\x1cB\x95\xa5KFy\xad\xa4P<\xd4\xbf\xad\x94\xc2\xe0\x9a\xef\xb7\xe2\x91\x0b:J\xdeRW\xff\xb4\x1d]\xa3C01-\x12\xd95Nla\x80\xe1M\xe6\xfa\xe6\x9dc\x89m\xe0\x13Hz\xbf\x00\x16\x87\xde\x1ci\xf2\\\x9c\x1a)\xfc\x98]*\x16\x17W\xb3g\x13\x9b)\x16\x9eb^\xc3\xe0\x8d\xbfH\xf7-\x18NI\xb1\x9a\xda<\x1cu\xa9m\x1c\x85\xd3b_\x8c\xd2\x9a\xa0n\x1d\xe3=\xbfL\xee\x81e\x855\x9eg\x97e\x08\xc3\xd2\xa2\x85\xa1\x89\xe10iT8P^\x87\x1d\xc5Uxwk\x0e\x0eqRF\x02J\x08\xe2t\x9a\x7f\xd1\x1f_M\xad\x1d!\x91\xc1\x92\x19\x8f=\xc9d:)\x85\xf9q.#\xf1/\xf2#k\xa5 B|`|J8>$A\xec\xe6y\x1b\xd9E\xf1(_\x08\x94\xd8\xf9\xa2*\xc0\xbe\x9c\x9bj\x14\xd8)\xc8\xfb\xb3\xa9\xf4 \xb5\x8b\x9b\xf9\xd155\x02\x99\xe5\xf7\xb5\xbe\xf6\x11\xb5E>\xcd\xc4_j\xd7\\&,H\x8c\\\xa9\xe82sw\x1e\xae\xed\x9c\x88\x89m\xc2\xf0\xfe\xc5\xbdp\xf7\xfcMH%\xd0\xb8\xbaS\x02\xa6\xb7\x8b\xc1\xf5]\x12\x11\xa6\x12\xf0\x9f\x83E8\x94[*\xa5\xf6 Zp\xdcXq\xa4\xe3~\x00\x87k\"3\\\xc9]\xa9\xda\x82\xc1\xdd\x12\xc5\x18\xad\x02\xf3\x0f\x16\x8a\xecR\x0b\xa3\xf6[\x92\xee-\x0dJ\xaf\\\xa2\xd6\xe9\xba\x887\xf6\xffHt\xc8\xb9\xae4\xaa\xeb\xa9\xdfK\xb4z\x9f\xa3\xe3\x8dKB\xa8\x10~Cy]Ph\x8c\xd9\xbdpJo\xc1&Bq\xc7/;\xc4\xbd\xbb\xf3\x84%\xd9%0\x85\x1a\x11B\x00Rju\xc0\xc6\xfe\xed|\xadP\x03\xcf\xeb\x86c]\x94\x05.\xa3\x8bk\x87v@\x87p#id5\x92\x8f\xbcL'\xe7/~\x98\xafJ\xe4G\xd6\x9a5\x91Z\\\xd5>\n\xf3\xba%\xe2\xa7\xb5\x99$\x83\x06{S\xff\x04F\xe9\xf5%c?\x895\x97\xff\xfa\x1a\x0b\xa0O\xa2i\xce\x1c\x13\xe0M\xde\xb9\xb5\x85\xbe\x02N\x189\xc8\xa7\xbdk\xfd\xec\x1b\xdd\x9c\x00\xf2:\x81\xc1C\x87\x0e\x97\xe1\x81e\xbb\xf1\xc9\xf8\xee\xb8\xadK,\xba\xc6\x8cDa\x8d\x15\x9cN\xe4U\xd2D\x18\x00~\xca\xab\xc2\xf3+\xf6x=/\x14\x8aOi\xa1\x14\x1d\xf3\xf2\x15\xc2ZTY\xc3Y\n\x19\xdb# }\xa2\xcdP\xd2b\x1a\x98\x10\xa68T\xeb\xc4OY\xc5\xde\xd8\x0d\x01\xdd\x89A\xb3\x17W\xfd\x1cR\xfdhA.'\xc0\xd1C\xad\xc2\x05\x04X]\"L\xd0\x92\x06TB\x12\xffr\x02\xad\xdc\xbc\xf2p\x02\x91W\xa1U\xf7\"U\xf9\xaaS\xdc\x13\x07F\xbe\x8e\x1a[\x0fhU\xe2\xac)\xbd\x91hP)F\xcf\xdf&\xee\xcd,\x12|\x05\xae\xb2jj\xac\xa95\x0cNJ%\xab\xdfYt\xfd\x054\xaf7P?\xa6D\xd2>l\x08\x89\x0b\xc5\xb5\x9d\xe2q\x98Tu\x85\x0c\x1e\xa7\xd3*\x1aK\xac\x89\xcaQ5\xad\x18D\x18+\n\xea\\\xb8\x95\x84\xcf\x9fv\xe7C\x12\x105\x7fu~W\xac\x847\xae\xe3_hZ6\xe6\x97\xa8\x8dn\xc1\xaf\x80\xf3\x8d\xbdL\xedQ?\xfb\x19\x9b\x13@\x8ai%\xdc\x0c\x93\xfe\xbf\x0b\xb6\x1e\xc2\x0c\x87\x0b\xf7\xf5KC\xaf83\x12\x0cZ\x1c\xaa\xb6\xd2\x02\xb1\xe3\xeb\xbd\xe5xly\x06j T\x10b(&\x06~\xe71\xb69\x85\xb0\x02\x95\x96\xf0\x98\xf0 \xbc5\xb6\x1b\x86%\x86#R#H\xd9\xef\xb2*lB\x91\xcd\x89W\x86w\x12\x1e\\\xba\x0buU\xd6\xee\x08\xd1\xffJc\xf4\xf4\xf5\x8f\xa4\xfe\xabPQ\xa7I\x8e1\xe3\x95\x91\x9d1\x90\x98|\x84N!\xdd\\\x1daNN\xb5\x0d\x1fn\xcb\xc1\xbbk\xf6\xb3\x9d\xf5o\xd0i\x8e\xd0\xea=1XM\x10^\x82\xc7\xf3w\xd2<\xb4py\xf0\xd2\xf4\xfe\x9e\xb1\xf1!\xca\xbf\xaf9\x88\xdf6g R\x14\xad\x80\xe9\xf4\xcfw\x04D\xdd}\xcf\x8f\xa7\x99q\xd2\xcf\x84\xe6~\x0d\x01Z\x9aY\x8fm\xadO\xfbP\xf9TV\x96\xee\x0c\xf5\x8e\xb3\x1d\x8e\x1d\x19\xf2\xd0\xbc\x99\x81\xdd\x90\xe9\xfc\x01\x11\xfa\x14\xf2j\xad\x18\x18\x1b\x8aK\xe8\x1b\x99\xb7\xe1`K\x00\x90\xa3\xc4\xd9\x10hB\x98\x1d`\xffQ>\xd3Xv\x0b~\x05\xdf\x04\x00 \x8a\xe6\x04\x10\x96\x01?\x82J\x19C \xf2\xf4o\xbb\xf2\xf0\x1bI\xfb\x03\xc3M\x98\xd4%A\xbc\xb3{\xa2\xe7\xc6\xc5\xbb=\xb3W\xecQ#X\xcf\xe0\x91C\x0c|$\xeagF\x15\xf08I\xe9\xb1\x9c;\xeb\xabK\xfbb@\x8c \x15v\x0f\xbe\xc9,\x85\x85\x98x\x9f\xe8\x10\x84\xeb\xd4\xb8\x8e4\x9b\x08\xa3\x16W~\xb7\x1en\xab\x93\xb5\x03N!\xf0)\xad\xc2\x0fHs\xd1\xc4E\x85`\xf6\xd4~\xe8\x1c\xaeG\xa5C\xc4/9\x15(&\xe9\xbf7\x81I\x9b`\x1d^\x1c\xd1\x1eZZ\x9c\xafq\xfe+\xb5\xea\x01\x02\x9d\xef|*\xaf\xc2\x07b\x1f\x13\xb3\xbf\x82m\xb1\xa9\xa4JO\xeb\xd5\xc6\xf3@\xb7x\xc5\x93\x1b\x0e\xe1\xed_v\x90\x96\xa1\xe7t$\x8f\x12\xf0\x1c5Om?\xad\x88E\xd7\x0f\x8e5\xe3\xda\xce\xf1\x12\x88\x9da\xed\x83=$:\xc0\x9b\xf9\xf8T\x929\x1b\x9a\xebN\xa1^\xf3\xf3 \x90\x94\xb6\xe6\x05\x10\xe9S\x15\x9f!\xee\xb8\x8d$\xf4\xc7\xe0p\xf5[K\xda\x88\xa0\xd7\x08\x0f\xf5J\x12\xcbl\x0e'I\x15j\x04\xe9Xk\xe0\xe0\xbd\x9ac\xb4\x0f\x0f\xee\x81\xc3\x8d\xbbG\xa6c0\x81G\xa1\xc5\x8d\xe7IS\xf8p\xd5^\xbc0\xa5\x0f\xac.7\x96\x10(\x8dN\x8e\x90*\x08^\xd77\x0d\x1f\xaf\xc8\xe08\x92\xcb\xa4\x85\xee\xf6\\\xca\xfb\x0f\x8f\x0f\xef\x80\xae\x04\x1ey\xb5N<\xbdz\x1f\xbe!\x95X\xa7V@\xa7R\xe2\xe5Q]0&%\x12\xffG\xda\xd1\xca\xfcrl\xda\x9d\xc7\x9eR\x9c@\xf1\xb0\xc4\xcc\x04AC\xc0\x11\xc3\xfb%\x0f\x8e\xe8\x88\x9bz\xa4@\xa7\x10\x14\xb9\x16\x17j\x9c.\x91\x83 %\xff]Q\\\xed\x97\x92n%\xedch\x02\xda\x86\x9c\x15\xed\x83\x9f\xed{\x8d\xd6\x1c\x8f\xb3e\xfa\x88\xbd\xdb\xee\xf6IN\x8dc\x00@'_\xbb\xde\xa7\x89\xb1\xda\xff\xc5I\xce7\xf8\x807\xb41\xd2\xcdi\x88\xbdh\x0e\x9d\x86\x11\xa6\xfa@\xa4\xb9h\x81\xef\x9e\xfd\xd1\xe0\x9a\x02`\x13\xe1\xcd\xfe\xdaXz\xe58\x86ERO@ \xffn\x81w?\xc0_b\xc0\x1c\xe6\xa7-\xeb\x05\xfa.fI\xae\xedl\xa2n9\x1a\xc8Q\xe2\x9cW\x17\xc1\xbb\xc1\xee/]\xd9\xc2\x01\xc4\xed\x13t\xfc\x01\x90\xc2@?\xd8\xd3\xd1Bb\xb0\xfd\xa1V\xbe\xce\xa6\x1e\x9dW\x8aPF\x19\xb4:i\x83Y\x14\x97\x1d\x07\x1eL\xfd`w\x9a\x82\xa2\x82q\x11\x01\x84\x93d\xb5\x0f\xb6\xe7\xe0\xc3u\x99x\xfb\x92\xbe\xb8\x86\xeb\x9a\xd3\xc2\xbao\xc9n\xfc\x9c]\"\xa9\xbc\x93\xdbD\x82\x93\x1b\xc2\xe9f&w\x1f,v\xb4M\n\xc7\x05 \xe1\xa8s\x02\xcfR\xdf\xffm\xce\xc6\x9bc\xbb\xe2\xa7+\x06\xa2#iC\xd7\xcc\xdb\xc4\x91\xec\x08W\xd3*h\x17\xebu\x17\xb4;\x95\xb4`S\xa8%\x11\xe8\xc5pP\xd3\xe5\xbdR\xf0\xfc\xe0\xb6\xa4\x88\xb8$\xf3[N\x8d\x0dO\x0cl\x8bk\x87\xb4\x97\xb4 \xbf\xd4=\x9awH\x1ci\x1f\x1d\xe3\x91{v\xf6>\xfc\xd9?8\xbe8\x0e\x9b\x03\xb9,\xefk~\x80GC\xc4@\xc2A}?\xa0\xe1\xd4\x0e\x82\x0f<\x14'i\xfe\x92\xe2\x14M\x87\xe6/W\x90\xef\xc7\xb9\xcd\x01\\\xfe\xe6W:\xbe~\xc3O\xd4\x1e \xe0\xba\xff\x81\xff\x15\x01o\xf2\x1b<\x80\xc5\xd4\xfeOM\xfc\xe1\x07\xde\xe3d\x0fEe\x8b4\xf1^\x87\xc0\nJ\xc5\xf9\xa9A\x9emR\xe3X\xd3tchO\xf3k \xc7\xd2z\xe6\xf4V\x97Gdx\xd3\xab\x14\n\x9foMC>\xfe\xed8 b\xba\xd9\x88\x18\xa3\x16\x956\xb7\x04 \xd3\xfb\xa6\xe2\xae\x1e \xf8\xbd\xa8\x16\x83\xe2B\xd1/!\x0c\x1f\xed<\x8c\x9b\xbb%\"\xde\xa4Cl\x88\x11y\x85\x15\x11\x04V\xe3Yj\xe9\x16\x16\x84H\xbd\x1a\x99\x95\x16|\xb6+\x0f\x0f\x8f\xee\x8a\x9b\xbb\xc4!\xb3\xc6\x81\x8f&\xf6\xc2\x92C%\xb0\xb9\xdd\xd8Vn\xc1`\x1ao\xd6u\xc3|\xde\x1eB}-\n\xa9\x04|\xdb\xca\xac\xa8%\x00a\x86/\x95h\xb7\xbb\x85 \xa4\xbd|\xbe%\x9b\xd5\xebNRB\xab\xd3\x99%xl\x9a\xd7?\xe9>8fd[q\x8d\xaf\xdb\x19%\x97\x1f\xe7\xda\x8f\x93r\x174\xf3\\.\xf6i\x1a\xecw^q\x9cY\xa9\xea\x14\x1aHs\x92\x03\xcd_\x0f\xc4}\x86\xce=\x95\x19\xae\xa9\xe7?\xe73G\xf6\xf1s,\x8ee\x18\xda\xc8s#\x02\xd0XX\xf2\x9d\x12\xacV\xe2`U\x1dV\xe7\x97{\xeb\xa8\xb7L\xb7\x08\x9e\xeb\xab\x0cv\x01\xf4e\xb3\xe7\xc6\x16\n I\x01\x00\xc8\x923\xa5\x81\x1c\xa5j\x06\x10\x8b\xd3\xcd\xae\xafB\xa8V\xed\x92<\xaf\xc4\x93\xd7\x1cg\xe1\xbc>WV\xf9x\xadV\xa9D\x94\xc6\x9b}\xd7Y/\xc8H\xf0~*\x1b\xab\xf7\xe4\xe3\xee\x9e\xc9\x04(\xc0\x91Z\x1b\x8eT\xd7\xb1\xad\x94\x03\x04W\xf1\xfe\xc4\x81*+*\xe8b\xedI\x83\x18H\xda\xca\x83\xbf\xee\xc4\x81J+\xda\x87\x1b\xf1\xde\xa6l\xf4\xfax9\xcalN\xf4\x8f1aKQ5\xfa\xffg)\xfa\x7f\xb1\x1a\xfd\xbfZ\xe3m_\xfe\x8e\xfe\xef\xfe\x8a\x0fI{\xe9\x1f\x1d\x04\xb5\xb4\xdd\x01)\xf1a\x8cQ\x85\xd5\x85\x95R0 ]+\xbbQR-\xdf/\x81\xd1\x9b\x1b\x0fboY\x1d\"\x0d\x9a\x96&X\xf1^\x85 \xa7\xcbE\xd6t\xe2\xfc@\xff\x96\xf9\xf7_NlN\xbb9\x80~\\\xc25\xa6\x11\xe6\x9d\xe1~\x8e\xcb\xfb\n?\xb1\\\xc9\x02\xda\x02\xc9E\xdfsf\xc4\xd9\xe6#\xe67\xbf\x05\xd0\xaf'Z\xe6\x9d1?\xbe\xcf\xcf>O\x1d\x7f\xa0\xf9nL\x10\xc6\x12\x97\\\x85*k?N?\xd21IZ\xed\xa7 \xe6V\x9f6r\xfe\x03\xebNL\xf29\xb8\xdc\xe3%\x8f\x08\x97\xcfZ\xa3\xa25\xc2I\x0cUJ.\xc2\xc4\x11\x9a\x99\xc7\xf5\xfb'j\xac\x8a\xae)\x91\x989\xa97\xad(\x01y5V)\xf2zbj4V\xf7O\xc3\x17\xbf\xef\xe78\x90\xa7\x8f\x03\x10o\xd2A\x93\xee\xa6\xca\x8a\xba\xb0+\xe6l\xc4;\x17u\xc3\x90\xf80LO\x8f\xc4\x15\xd4xg\xb8\xcc!\xe2\xbb}\xc5\xb8\xfb\x97\x1dp\xda\x9c\x9coj/\x89\xf4\x1f\xfb\xfa\xdf\x88`\xc3\xd6\x0d\xfb\nq\xdd\x0f[\xf02i m\xcd:<\xdc/E\xfa3\xf7\xaf\xa2._d\x14\xe2\xef\\C\x84oI\xab\xfa\x8d\xfa\x1f\xbb\xe9\xe7\x11\x87\xc0\xa8\xfd\x86\x80n\xd2\x12b\xfaK\x0e\x14!B?\xc8\xf3\xc6\x90\xb6\xcab\x9b\x0b\xb3\xd8u8<\xe8}z>\x19>\xe0 \xf3\xbd\x10N\xa7Q\xde\xe0c7jaa-dw\x1e^\x1c\xd2N\x8a\xccoe\x81\x84\xcd\xb5\x85s\xad\x0f@x3r\x91O\x03j\x88\xfa\xa2\xf1\xa9\x15\xf8\x83:\xd5\xe6\xa4\x94\xbbS\xc6\x8cc\x88\xbd g\x9cF\xabh\x88>\xf0I\xdf\xf5=\xcb\xae\xa06\xd6\xcfq\x9e\x83/\xdd\x08o)\xda=n,\xcb+\xf3\xa2I\xeb\xc8)\xc0\xf9\xfb\xd83\xce\xdf\xe2QI\xcdp\xedXj\x0f\x05\xd0O\xef\xe3U\xed\xe1\x8d\xeb\n\xa4\xf0\xd5p\x9c$ \xf2O\x00\x110\x19\x1c\x0c\xc69\xfa\xb5*\xf4L\x8b&i\xde y\"\x1d\xa8\xa8\xc3\xce\xdc2vM\xbd\x94@`\x02\xbdp\xce\xf5\xb2\xa4\x1e\x08\\\xcc\x0b\xe0\xaa\xce \x1e\xa3J\xa18\xeau$\xf2.\x99\xdb\x83\xab;\xc4\xe3kb\xcaN\xa7;\x85$\x7fv\xaf\xac\x1f\x8cb\xa3\x8b\x0cG\xa8q]^I\xb5q\xca\xec\xf5\xb8\xa4c\x9f\xce\xbf\xa0^\xffm\xb4\x1a\xef@\xb0\xfe\xbd\xc5t\x8d\xed\xa4\x12\xcf\xe8\x94\x80N\xe1F\xe8\x94J\x14[\xedX\x99S\x86\x1f2\xf2\xbc\x8b4X\xb7\x8f\xfaOn\xe0Y\xd8\xa4\xe3*\xc5p\xc4\x98\x9fG\xa5e\xe0\x0d\xf37{:_?\xcc=sT\x07\xe5\x01\x1aw\xd3\xee\xc3\x9c\xafg\x11\xdd\xd86\x02\xd9\xaet\xbe\x89\xaeM7\xab\xdc\x0bo\xf0\xd5\x9b\xa8\xefz\xe9\xd3B\xbe\xdcy\x187\xd0\x9c\"\xf4\x1a\xe9~Z0\x84\xac\x827 \x90]zs}\xf8\xab\xf4}\x00\xbc\xf8\xae\xf3IS\xfe\x12'm\xeb\xec\xfb\x00OU\xeb\xfc\x10\xb5\xe4F\x8e\xc9\x91\xf0C!\x93\xbf\xc4\x9a\xfb\xb7\xf0?\xaea.\xfeL\xb7\x1f\xdc\x909\xa3\x11\xcc\xf7\x0f\x97\xe20\xbd\x1a\xfb\xcb\xeb\xb0!\xbf\xc2k\xbej\x1d\xc4\x02\x10\x9b\xce{\xf9\xd9/\xa6\x19\xae\xcd\x1a\xc3\xf3g\xf9~9-\xd3\xf4S jG\x01\xa4\x171\xd7\xef97\x7f\xc7\xe4\x08<6\xb8\x1d\x06\x91\x16\x10gPJ<7\xaf\xce\x85\xd5$)<\xb1r\x1f\x0e\x1d\xa9\xd0\x12\xb3\xff\x8d\x98#\x9b\xa3\xdaH\x0c\xc1\xe5\xe9\x0e\xbbS\xb4\xb9<\x1e\x9b[T\x1cuYeWX\xbb\xcb\x0d\xab\xd3\x05\xa7[<\x9a(\x8d\x0b\xae\xcc\xcf\x0ei\x8f2\xab\xb3%\x96\xb5e\x0d\xea\xfaS\xd8x9\xa5\x03\xc7\xf1p\x90\xe0\xbb\xf0\x16\x96\xf1\x97\x06\xe3\xe4\x19\x98\xff\x14c\x1aO\xe7L9\xbb\xbf\x80\xfe\x06o\x9e&\x7f\xcc\x89\x03\xa8]\x03o\x9c\xcfW~\x9a\xc1\x98&\xd6\x97\xab\xd8|\xf5\xd9\xae|8*j\xbd\xe5\x0fZ\x0f\xe5\x04\x00 a\xadl}\x94\xf9x\xf4\xc38M,\x92\xca\xc7\xe9\xe7\x81\xa4\x81\x1e\xa9Q\x98}i_\xb45\xaa\x90iq\xe3\xd3]G\x88\x07+0\x92\x8e_\xd3>\x9a\xb4\x023\xa6\xcf\xdd\x88}\xac\x8d\xe85_r\x96YI\xac\xa7\xc5\x90\x14\x17*\x84hU*\xc2\x10\xc9L\xc3\xde\x15\xcc\x0e\xb4\x04\x02\xa1z\x8dh\xd2\xa9\xc5\x9a#\x15\nD\x98\xbe\x82B\xc1%h\x9f\xfe\xc3l\xe6\xddo\xe1\n\x87\xb9\xa4\xa1\xac!\x80\xe2=\x95\x02p\xf5W\x9d&\x8a\xfe\xdd(I\xc6\x1eq\xe5)\x8c-t\xc3\xe2\"b\xd87\xd2U\xc7\xd0\xb9\xe94F\x0d\xfd\x9e\xe1\xe3\xd8\x07\xe8\x18\x9b\xde8:\x94\x139n\xc0\x89\x81\x8dG\xc9E\xe7\x8e@X\xd0\xbe\x9cC%\xca\x8b\xe7ou/\xbf\xa4\xa7\xf8\xc1\xb8\xee\xf8\xbf\x9f\xb7\x8b\xfd\xdbD \xa3R\"a&M\xad\x82\x00j{q\xb5T\xd6\xb6\xac\xa4&\x96\x80g\x11\xcdw<\xcdg\xd1\x1fV \x02\x9aY{\x0bpS\xd7D\x984J\xd49[T\xca\xa7jj\xbdq\xea\xa8\xee?\x0c}\xf0\x16\xcca\x8d\xc2\xdft\xd5]\x1aq\x8e?\x00\"o\xc8\x07N.\x9f \xb7\xcc\xcf~,@\xa4\x06\xf0\xee\xef\xaao\x02Q)\x05T\xd3w\xf3Cv\xf1\x89>\xfc-\x9f\x02 \x105\xb4\xb2\xb5\xc1\xdeVo7\xe6d\x06\x90)\x04\x04I\xba\x10=^\x1a\xd5E\x02\x8f\x1fs+q\xd7\xcf\xdbQN\x00\xc1f(\xaem\xfe\xdc\xc8\xce\x98\x94\x12\x86\x97\xe8\xe7\xe4\x99k\xc4 \x8dJ\xe8\x97\x1a\x89>\x04*\x83\x13\xc2\xa4R\xb5$\x92\x88\xb9UV.,%\x18\xd4JIq\xc8\xaa\xb2\x88\xe9aA\xe2\xda+\x07\xe3\xd6\x9f\xb7\xbb\x7f\xdf\x9d\xa7$m\xe0\xdf$\xc5\xb3\xd6\xe3\x80\xd3u\x19,\x0eo\xdau&\xa5B$\x95\xf60\x01\xc9\x01\xba\xf8\xe3\xf0\xaf,)\x07\xad\xbd\"5I\x0b\x12Ri\x81\xde\x0c\x8f\xe7BR\x83.\"m&DZ\xac\xbc\xa9\xaf\xd3\x94\x90\x0c\xfb)\xcd\xe1I\xd4\xcf\xcd\xf5'\xed\xa7s\xa7\x10\x98\xcdZ\xb1\xed\x90\xfe\n\x83\xd6\xf5\xca\xb0\x0e\xca//\x1f\x80\x08\x8d\x1a1:\x05j\xe9\xfeBh\xbc+\xdaFbZ\xfbX\xdc\xb2h;\xb6\x1e\xa4\x8f\"\xd4\xf8\x13=\n\xf6<*\x91>\x0c\x83\x06\xd5e5XF\xc0{E\xc7x\xd4U\xb5(\xd3\xfc\xa5\x8d\x04\x8f\xfat=\xfcO\xa5\x9e*\xf3\xed\x16E\xcb\xe1\xdd\xdb\xf0G\x9bd;\xfc\x13~^g\x99Ok\xfd\x83\"I\xa0\xdaZR\x8d=\xa4\x99\xb3\x86\xde\xcar*\x97\x07\xd0\xa75\xa9XJ\x9f\xe6\xc1\x0e\x17\x9f\xe14\xa9p\x18@.g\x06>\xa1[\x12\x86\xc5\x9a\xb0\xa5\xdc\x8ai\x0b6#\xd5@*\xe6\xe5\x03\xc1\x1a\xc5\x8d?l\xc1\xddKv\xa3\xdd\xb4~H!0\xf9\xea\xb2\x01bWR;\xbb\x86Iu=\xc4}\xd5\x0e,\xd8[ \xae \x06\xb9\xab\xa8J\x91\x1e\x1d\"\xb9\xcd\xe6T[\xb1`W\x9e\xd0%\xd6\xec\xf9zJOa\xf5\x8c~\x8a\xfbV\x86\xba_[\xbdO\x89\n\xcb\x14\x89\xc9kT\xe8G\xcc7\x85\x93-2'\xab\xb2\n\x1brJ\x92`\xb1_\x8b\x10\xfde\xd2\x8d\xfc!\xcd\xfbE\x1di\xfc\xb9\xa8\xb6v`$\x8b\x8d5#\xd5l\x90\xd4g\xde\xd3))\xac\x8c$ y\x90\xc0l\x1a\xfd\x9d\xf7t\xf66\xa0\xd5\xcc\xa7s~\"\x10\x9a\xfaKv\xb1\xea\xe1\xfe\xa9R@b.\xdd\xd7\x1dK\x0ebW^9\xdaG\x06\xe3\xfeA\xe9\x18\x11c\xc2\xff&\xf5\xc6\x84o\xd6!\x97?\x8c`\xfd3t\xb1[}\x1a\x1e8\xe2\x9e]\x16\xafS\xb4(\xcb\x0b\xd7\x8a\x0f$\x16\x84S\xe9/\xf4S\x12\x8d\x95yv\x8b\xa3\xdb}\xef0\xe1\x0c\x8d\xcf\x8e\x0c\x97\x1c\xa3\xc2\xd2\xf7\x17\xa6Sc\x15\xef}\xb0 \xc5\x1e\x91\xad\x0b@\x02\xf1\x86\xd2\xb6\xa2\xfbc\x11;\xc6\xd7x\xf3\x9c3\xa7\xdf\x0f\xaf\x17n\x03\x00\"\x8ai\x9c\xfd\xb2\x07\xd7\xcf\xa0\x03\xefl9$\xbd\xcf_\xae\x1a\x0c\x97\xd3\x0d\xde\xba\x98s\xd9\x00\x14\xd5\xda$\xd3\x0b\xff\xde?:X\xb1\xb3\xb4\xd6\xf3\xc9\xae\\\xcf\xe6\xc2*\x81\x18\xa3\xb2\x92]\xf1\x88I\xf2\xbe\x03\xc7e\xcc\x96\x8a9\x89\x92\xa5:\xbb\xb0R\xd9\xaf\xbc\xce\xf5\xee\x84\x1e\xc2\xab\xc3\xd2\x15\x03\x12\xc2\xdc\xb7\xfe\xb0Y0\x1bu\x8a\x97FtBo\xd2b\xc2\xf5^kZ\x99\xd5\x85\xcd\x85\x95xb\xd5>d\xe4\x94\xeaa6p>\x9f\xa3\xb9\x9a\x1aK)\xc4\xf4w\xa3\xa2N\x88\xa4\xfbzdP;IK\x8a0xS%\x14Y\xecX\x96S\x8ag\xd7\xecGm\xa5%\x95\xae\xb1\x8eVv\x1a\x8e\xdf[\xe1\x17\xef\xf6\xf4f\x8d\xe5\xbe>mh\xe1k$@\xe5<_\xbc\xa7\xd3\x85\xc0csq\x15&\x11h\xcc\x9d\xda\x0fc\x12Bpw\xff4<@\x00LZ\xcf54\x87{\xc1\xd9.\xf9\x81*\x958XY\x87*\x87S\xca\xcb\xe8n\x195\xa0\x9bR\xbby\x9e\x9f\x00\x12\x8a\xd6f\xac8?\x88\xddj\xb7\x9c\xa1\xb1\xd9\xe3\xeb\x98*\x81\x1c\xa4[C|b)W\xefT*Z\xe3j\x08\xc4|\xd0\x9a\xebX\xf7\xf1i\xab\\\xfe\xe2\xe5\x864\x10-\xa7\x1aa\x8f%\xa6\xec\x1a\x0b\xda\x84\x18\x10\xaa\x16\xb0\x99\xb4\x11\xe6smIr?BRwFY-.I\x8b\xc4[\xdbr\xf1\xf8\xc2\xad\xde\xfe\x0c\x14Z5\x9bi\nI\x1deu\x95\x93mu%`jO\xfc\"\x87\xfe\x9eM\xff\xbf}wN\xa9z\xc4\xe7\xab\xc4gFv\xf6<\xdc;I\x119c\x90\xb4x\xd2C\x0d\xa8\xa3\x05\xb5\xa5\xa4V\xda]k\x1f\x16\x84q\xa9\x11\xe8\xee\xdbo\xd9\xcevR\xb3\xf1;\xfa[\xe3\xebe+\x04v\x08\x10\xb8>\xc8\xb7S\xfb\xa2K\xb8\x01Nz\x85\xc5\x16\xa7\x94\xa6\x84+\x03v\x8d4\xa2_|\x18f\xd05J*\xebB`\xd2\xcf\xa2k\x8c9\xe1\xe1\xd9])!\x04B#\x92\"\x08@]xgk\x0ez\x93\x86\xf5\xc5\xf8\xee\x12;\xe4\xdb\xef\xf9\xc9J\xfc}\xe9nl\xbcj \x06\xd35\xb9|mU\xa5E\x07\x9d\x9a7\xdbV\x1d\xc5uN\x93\xe2$\x00jA\xc6\xfb\xdf\x9a\xd0w\x9b\x9f\xe7\xb3\xe7\x8e\x1e\xb2kmK#\xfe\x90\x9fE`I\x17OE_\xc3\xeb\xb9u\x0cE\x1b5XWP\x89\x9d\x0c Fmk|^\x81@\xde\xb9\xb0\xe6_\xf2 \x81\x0f\x1f\xc3j\x89\xb3U\xb0\xa7S\x89\xcd\x9bl\xf2\xe2\xd4h\xec\xa5\x97\xfbSv\x19\x06\xc6\x9804\xd6\x84\xc5$\xadO\xfa\xdf*Ir\x08Q lf\xe2d\x81\n\x84\x1a\x0b\x10bx\x9c\x18\xe5\x08\xe2\xccl\xe3\xe6\x0dj\xde\x1c\xbf\x8c~\xefN\x9cr\x121e.\xe0\xde\x03\x11A\x1bE\xb7[x\xe4\xfbM\x8aK~\xdc!\xb6\x0d1\x08\xb1\xbc7`w\xe1\x95M\xd9\x982s\x0d&\xccZ+\xc5z\xe4T\xdb\x11kP\xe1\x85\x0b;C\xcd\xdeWv'k\x07\x8d\xad30\x92\x98~w\x85^\x8d\xd7\xc7vC\x970\x03\n\xeb\\x|\xd5~\x8c\xa0{\xe0\xf6\xca\x86,\xd2t\xdc\x18D\xf7\xf6\xfc\xa8.^\x14p\xb9\xd9+\xa5\xfbqc\xc5sR\xc48Z\xe81\xb4\xf0+h\xaeoo\xce\xc6AR\xbd\xad\x84H\xbbJj\xc0\xd9\xde\xef\x1b\xd0VJ\xe4\xb8\xbb\xcc*\xa5-\x89e\xb7D\xefFy\xec\xb1\xd2\x97\xa2%U$d\x93]Q\x13\xfas*|\x7f2\x94\xeaZ\x99*\x7f>\x11\xc7\xe8d4\xe3xl\xe2\xfc\xdb \x9c\x97X\xaf\x99\x84\xcd\x1f9uI5g\x93h\x95q\x9e\x81\xecg\xd4\x9d#\xeb\x84\x93I^\x7f\xac\x06\"`\x1dq\xb5\xe1\xbf\xe6\x96\xe1\x9f\xbd\xdb\xe0\x9a\x8e \xe0\xbd\x8c\xcb\xbe^\x8b\xbf\xf5H\x86\x9b\xde\xfa\xd7\xdbrp)\xfd\xed\xca\x8eq\xc8\xb6\xb8\xb1\xeaH\xc5\xd1hp\xae\xda\xf5f#.\x9cA\xaaL?\x04\xe9\x96\xc1\xe6\xbc\xe0P\xb5U\xb0\xb9<\xb0\xb9=\xbc\xe1\x8eT\xd2x\xee\xe9\x97\x8aPb\xbc\xff\\\xbc\x03\xa5$\xe9\x7f1\xae\x07\xfaF\x07cL\xfbX\xfc\xb8)\x8b\xf7JxS\xa71{!W\xa1\xce\x86\xb1\xbdS1<>T*N\xf5\xd0\xaa\xbd\x98M\x8c\xff\x8e\x01\xe9\xb0\x11 <\xf3\xdbn\x94\xda\x9cxrP;)\x8d|\xbf\xf4Xl\xe0b6!z\x06\xc0\xfa\x85\xa2\xa4<^\x16\x9a'\xcf\xd7\xac!\x85K)Hy\xb5\x12\xf4\n\x14YTR\xe5\xc4\xfd\x15u\x120\x84\xe9T\xb0\xd3\xb9\x16)u\x8b\x10\xa8\xbd\xf4l\xd1\xbe&\xf6/\xf7\x01P\xa2\x1f\x92\x9bl\xbej\xd9\xa6\xac\xccf\x1akJCf\x1b\xadJ\x81#\xb5v\xac\xe0\xfd\x0f\x8d\xba)\xf5\xcf\xffJ\xd2\x07\xd0\xa7\xaa\x19\xae\xcb\xdf\xda\x93\x01\xf65\xf8\x80/E\x12\xe6\xfdwC\xaeO\\\\\x8b\xc3\x14\n\x8e\x9a\xb0>%\xc6\xfe\xaf\xad\x07\n\xf1\xf1\xee<<\xd23\x11\xef\x92T\xde\xcel\xc0\x9c=G$\x97\xdcGH\x13\xb8\xabW\x1b\x84\xd3\xd9/o\xce\xc5\x81\xec\x12\x0e\xc8\xe3\xfe\xfe\x95{t{\xca9\xb6\x83\xa3\xd3\x93\x82Tt\xbd\x12\xec*\xad\x91\n=\x89t\x9dH\x9d\x02\xf1f#.\x9b\xb3\x01K \xc4\xaen\x1f\x85^\x11&\xfc\xe8\xcd\xe6\xdb\xbeQ\xb6\x1f\x11]\xd8\xb6: :\x84\xb4%\x90\xb4S\x8a\xd9\xdbs\xf1\xf1\xd4~\xb8\xa1\x83\xb7\x86R\xbb\xc8`<\xb4d\x97\xa4m\x8dI2cPL\x086\xec\x96*\xb3v=n\xb4\x9d\x1cTy\xb8\xac\x8e4\x8c:LJ\x0e\xc3m\xdd\x93\xf1\xde\x8a=\xe8H\xcf\xe7\x82\xa4p\xbc\xbb#\x1f\xef\xd0\xefw\x0d\xed\x80v&\x0d\x16\xe4T \xaf\xa2\xeeha\x9cc\xa4:\x06!\x06\xe4\x16\xa2\x83\xe47\xb1\xbf\xbb\x85\x03\xa4L\xfe\x11\xc7\xfa\xf0\x9e\xddkM\x1c\xe7ux\x9d3N\xa0h\x12\x10\x97\x1d.\xf3:\x99\x184\xad\xf59\x05\x12\x14X\xda\x0c\xd7\xe5\"p\x1f4\xd3=\xb0\xa5\x85\x93k\xde\x19`\x7f\xae\x173\xf1(\x80\xec#\x86\xfb9T\x8ak\x9f \xa6\x1aI\x8c\xef\xaa\xce\xb1xeh:\x1e\x1e\x9c\xee\xad\x00D\xfc\xbb\x86\x84\x85\xd7\xb7\xe5\xe1\xc5\xe5{\xa4Hu(\x14/\x93\x04q\xc4\xcf\x0b'\xf1~\x8b\x96\x18\x0e\x08\xb0\xff~\x1f\x085\xa8~\x1a\xe9\x9b\\A\x00\"\xa5I\n7\xb5V\x0d$9\x80>\xf9\xcdp\xdd\xe64\xff\xb2\x95\x85cs\xb8r(\xa7\xb5\x89\xf7\xb3?;\xcf\xb4\xe3\xf7\xed\xf5)\x15\xc5\xeba\xd4nw;\xdc\xb8u\xc1\x16\xdc\xb4h'>\xdfW\x8cm\x85\xd5\xd8B\xed\xd3\xbdE\xb8n\xe1v\xdc\xfb\xe3V_z\x0e\xcdj\xfa\xf9\xa0\xdf\xd3\x16\x84\x12\xdeS\xe0\x9cP\x9c\xcbdBj\xa4\x14m\xfe\xc2\xda\x03H3k\xd16T\x87\xc7I\x9a\xb7\xd8\x9d\x18\x99\x14!\x9dSt\xb4\xd4%\xf7m\xdc5\xaa\xf9\xfcJ\x87K\xe2n\xc3\x12\xc3PUm\xc1W{\xf2\xd13\xc2\xc0\xc9\x1d\xf1\xd6\xd6C\xa8\xb3:00.T\x12\xa1+9\xef<\x9b\x9d\x84\x06|\xbc9N$H\x87o7g\xe3\xad\xed\xf9\x88\xd5*\xf0\xe6\x98N\x98;\xbd?n'\xadl\xde\xb4~x}T'\xc4\xf0q\xfa\xfb7\x9b\xb3\xbcQ\xf3\xde,\xb4G'%=\xb7\xce\xe1A0\xd0\xfd\xba[\xc6wS\xdb\x0cc\xc8\x1b\xe2\xe7\x1e\x8d\xf5i#\x81\xac\xa7\xfe'\xfb#\x83\x07k\xf1?\xb1S\x8c\xb6\xd5\x9a\xaf\x98:\x05\xd0'\xbb\x85\xde\xcbzx\xd3\xd3\x04Rm\xf1\x8e\xa3\x1a\x88\x97Mz\xc4\x010\xe9\xbe\x86\xdd5y\xd6\xfa\x83\x98\xb5\xe30\x82LzpI\xa4\xbaj+\xc0R3\xfd\x0e\x8dr\x16\x9d{e\x80\x13^\x03\xa50\x8e7\xe5\xffA\xcc\xb7]h\x10\xde\x1c\xdd\x15\xf7\xfc\xb4\x0d\xbbI\xade\x8f\xaf\xdd\xc5\xd5x{|\x0f\xa9\xb6\xc7\x91:\x17~\xe1T\xcf*\xc9C\xac~\x14\xfa\x10\x9f\x1a\xc6\x9b\xd4\\\xa3\x97=\x82\x8e\xd6+_M\x03\xf5_\x9e_\x81\xbb\x1c\xc9\x18\x18\x1b\x8a\xbbH;xh\xeeFl*\xa8\xc2\x87c:\xa3\x92\x00\xea\xfa\x1e\xc9\x12C\xe7\x1a\xec\xcb\xf3}.\x85\xc0\xe6\x06\xe6\xfc-\xa9A\xd7\xc2\xe9\x1e\x7f\xff\xcf\xdbQJ\x9a\xd1\x15\x9d\xe31&9\x14\x93\xa91\xc8\x1d\xa8\xb0afF\x1e^X\xb5\x0f\xbe\xf0\xfbE4\x87?\x8b\xec\xb8\x08\xa0\x8cz\xf4\x8b1\xa3\xdc\xeeh)&\xac\xe60?\xc9\xe9D\xce=b\xcd\x94\xed\xdb\xfe\xa6 \xe7e\xcd\x89\xfa*\x1b\\(\xbe\xec\x14\x0ew\xab\xde\x06k\x07\xff\xe3\x99\\>\xcd\xac\xa5\x12;\xc3p\xe4\xf9\xb3~\xf6\xe3}\xae\x7f\xd4w\x83\xe0d\x82S\xa0V^\x8aP\xe3\x8d$\x91\x0f\xa9\xad\xb5\x06K\x7f\xe1\xe2Mz\x0d\x07\x92|J\xe7,l\xc2d?#\xe9\xfc\xe9\x8c\xacbi\xbf\xe5\xd1\xbe\xc9\xb8\xbc},\x12L:|\xb9=\x97V\xa0\x88\xa7/\xec\x8c\xc1\xb1f\x04\x91\x90\xf2\xca\x96\xc3\xd8\xcf\x12K\xb0\x81\xa5\x1bN\xa3\xc0AI\xcf\x10\xc8u\x92\x00M\xf4\xa6K\x916\xf4\xb5\xaa\xbd\xf4\xff\x87i\x88\xe7\xe8\x1aw\xad\xda{D\xbb\x90\x18\xfd\x95\xed\xa2\xf0`\xdf4DhT\xf89\xb7\x0c[\x8bk\xf0\x18\x01\n/\xe8\x10\x8d\x80\xcf\xf7\x14`\xe7\xc1\xa2\xa3Z\xc3\x17\x0d\xce\x9a\x03\x0d\x83\xb4K`u\\\xf8\xc2\x92\xdd\xf8b\xef\x11)\xc5<\x07DU\xd0<\xd6\x17T\"\x8f\xdd\x12\xf5jN[\xb2\x94\xce\x1f_O#\x02\x08tzt\x8c\x97\xca\xf3\x1e\xae\xb1\xb6\x94\x85\xa3\x94ye\x8b\xa7\xbf\"~\xe0~\x04\x96\xee\x9f7i9\xddy\x83N\x15\xec\xc1\xc9uz\xa6\xb5\x8b\xc1G+\xf6\xd0\xf7\xa6m\x8d.\x15#\x03\xe8\xc3Rey\x0b\xbf\xaf\xb7\xe0\xdd\xa0\xf7'\xbf\x19\x9b\xf2\xd2\x1a\xf2\xa3\x9b+5\xa5\"\x94Z\xb4o\x11\x1fi&\x93G\x1e\x14\x8a\xd7\xa0V\xdd\xfb\xf4\xd2\xdd\x08'\xa6~51\xf9\x8bS\xc20!%\xec\x0f1\xa6\x860\xfb\xdd\xad$\xd1K\xfb-j\x16_\x9e\xa0?\xdc\x07\x87\xeb\x15\xd4\xd8\xa0!\xcd\xa1O\x9bH\x84\x12\x03gmbSa\x15\xec\xa55\x1d\x08\x04\xe6\x12\x90\xff\xc8/\xa9 \x9b\x9bWoM\xf0\x9e\x90\xd9PA`\xf1&\x9dwl\xd5.6\x8d)\x95\xb8\xbdk\x12a\xb3B\x92\xc2\x14\x82\x9c\xc6I\xa6FQ\xd0Y\xbe\x1e\xbb\xe2\xbf\xdc\x84\xfe\x1c\xd1\xce\xc1\xa9\xb77\xa4\x9e0\x88\\\x98\x14\x81\x8fXX\xe3\x04\xa7\xcaV\xa7\xc0^\x1d@\x9f\xcd\xad\xe0\xbe\x98\xb7o\xc2\x9f\xd9\x96\x1bK=O\xe5\x88]\xe1k\xcdK\xa2x\x1f\x8c\x9a\x01\xa8\xb5\x0f\xfa\xc7\xc2-X\x9a[\x8a\x8b\xd3\xa2\x11O\x8b\x8a\x05\x92\xfcZ\x1b\x16d\x16\xe2\xc7\x9d\x87\xbd\x0b\xcc\xa8\xfd\x0c\x1c\xc0bu\xe1\xfb~\x9b\x83\xe6\xf9\xf8\xc01\xdf\x94\x9d\xb4\x8eH\xb3\x06#\x13\xc2\x90IB\xa2\xf4Q\xb4\x9e\xcd\xf4\x17\x03\xec\xf7K+\xb9\xbf@\\\x8d\xc3\x9bC\x87\x9c@\x9f\xc9\n\xb8\xdc\x19\xa8\xb5\xcdF\x95\xf5}\xe9\xa7\xcb\xbd\x87\x8es\x1a\x8f\xc9'\x01\x91+H\x1d\x98@\x080\x1bZU\x81\x83\xa4\x13n\xc4\xd9\x8f\xc0l\xfc\x96\xfe6\x9e\xcey\x84\x16\xd9\x13\xa8\xb1\xaa\xc7vK\xc6\xbf\xfa\xa7\x92\xc6!\xe2\xb7\x9cr\x8c\x9f\xb9\x06c\xbfZ#\xfd\\|\xa8\x0c\x16:~\x7f\xdf6\x18\xdf#\x99\xf7\x1c\xf4\xd4o:\xf5\x9f\x06\x9d\xfa\x9ft\x8d=n\xbb\x139\xc5U\xd8\xbb'\x1f\xdf\x1f(\xf4\\\xdb!JHM\x8e\x10i\xae<\x9b~gF1\xb4\xa1\x7f\x878\x8co\x13%k\x1f\xe7\x1f\xf1\x9e@S]/#\xcf\xd2\\\xd9\xb3\xe8\xc9f\x1c\x8fk\xa5\x1f_\xf3G\xf2\x94\xe7\xcc\x13\xc3\x08@`\xd4y\xbd+[\x07q\xf0\xdd%\x01\xf4c!:\xf7\\^\xe3M\x03\x10\x85\xf0:I\xd9\x0bQ^7LRA\x92\"\xa4\xcc\xba\xfcS\xa2\xf2\xba!\xf4\xf7\xef\x89\x99\xbfu\x92\x118Q\xe2eP(R\xa1Qy\x1b\xff\x9f3\x04{\xa3\xce#Hl\x9f\xc6\x15\xcb\xae\xef\x1c\x0f-\xcdvSQ\x15\xa6\xce\xdb(%4|ph{\x04k\xd4\x986w#\xd6\x17V\xb2\xb0\x8f\xeb;%x7\xc4I\xbb\xf1}\x80oH\x1f\x88F\xd5\x99T\xe6\x9f\xe9\xa7\xf03\xa9\xcf\xacz\x8dN\x0c\xf7H\xb6X!\xe0\x80\x9aS\x99\xea$ \xeb\x96.\x89\x08\xd2(\xa5\x08u\x99\xce+\nB\xd3\xf70\x12\xcf\xd2\\\xe7\x9f\x811\xb9\x92\xdd\xa5\xc7\x1f,\xb28\xd0+:\x04)\xd1\xc1\\\xe1\xb4\xb5\xbc\xcby\x01\xf6\xfb\xaa\x15\xad\xd7\xe8\x00\xfa8\x02\x07\x10Ax\x99\xd4\x81\x7f\xf0\"\x98\xd43\x05s\xa7\xf6\xc5\xfcK\xfax\xdb\x94>\x98{i?L\xa4\xe3\xd2\"q8\xef\xa6\xf3_=\xc5h\xac\x06d\xfbZ\xfdU\xd5\x9b+\n\xc6\x85\x9b\xd0\xd6l\x94x\xfd\xfd+\xf6 \xc2\xa8\xc5\xbak\x06\xe1\xe6n\x89Xy\xd5\x00\xb4\x8d\x08\xc2\xa3\xab\xf7\xf3\x16 \xd2C\x8dH\x8a0y+\x11\x1ek\xd3\xcb\x80\x88\xa7\xb8\xb8\xd3\xba\x82JeV\x9dK\x18\x97\x1a-\xc0\xa8\xe1\x0d\xbdA\xa8W1\xady\x0c\x18^\xf5|uA\xa5o\xe3\\\xe6\xa8\xe7\x00\xf9\xf3\xbd\xa8\x03\xfc(\xebS\x97\xb3pOl\x9ai{\x86\xc6\xe6\x84\x8a\xc1\xc7\x98\xb1H\x90\xe2\xd2\xce\x17\xa7F\x1d\x8d\xbdj\xe9\xc4%\x9eS\x02\xec\xfbI+Z\xdb\x81Xa*\x03\x05\x90\x81\xf4\xf2\xef\x87\xd5\x89\xdb\x87\xb4\xc7\x17\x93z\xe0\x92\x94p\xb8H\xe8\xdeYX%\x19>/i\x13\x86/\xe9\xf8m\xf4w_\xc1\xa8{\xe1\xff&\x8d\x91\x83C8\x07\x95\x9e4\x8ej\x87\x07\x95\xa4\x02\x8fj\x13\x89Z\xba\xc8\xe6\xa2ji\xfd\x0d%\x8d'\xa7\xda\x82Z\xa77\xd5:\x9f/\xd5O?Q\x02\\\x07\xad\xfa\x80\xa5\xbc\x16\xdfg\x16y&$\x9ba6\x1b\xdd\xe0\xdc<.\xf7\x02b\xfa\x9c\xe3\xa5\xf9\xeaV\xe8\xd4\xf8tK\xb64O\xc9I@\xce\x04\xd5\xda\xc9_\x919\xbd \xd7\x8aB`Ak\xfe\x10\xe7Dz\xf0\x0c\x8e\xcf :\xf7x3V\xb9\xcd\x81q)\x91P\x98\x0d\x90\xa4\xbe\x96K\xec\x0c0\xb9 ZKq+Y\xd7l\xa2k\x17@\xbf\xc2\xc0\x00D\x10\x9eC\xb5\x15\xc3\xbb$\xe0\xf9\xe1\xed\xa5M\xed\xd7\xb6\xe6a\xe8\xe7+q\xc5\xf7\x9b\xa4\x9f/o9\x0c\x81\x8e\xbf@\x7f\x1f\xd6\x854q\x0eF\x14\xa4X\x0e\x7f\xe8w\xae\xc5\x91_g\x93T\xdf*\xd2*\x9e\x1f\xda\x1e7\xd2xUN7\xba\x93\x1a\x9c]\xeb\xc4\x97;\x0ecxB8B4\n)\xaa<\x9fS%x\x83\x0fO,?\xa9\x14\xf6\x82@\xe8\xd3]\x87\x05\x0e\\|\xbc\x7f[a\x18\xef\x9b\xf0/\xb5\xb6\xb1\xb4\xc2\xd9\x9d\xcd|\x9ayqr2\xce\x93\x7f\x01\xbc\xe5`u\x0d\x9e\xc5.\xc8\x95\x16|\xb8\xf30\xc2\xf5j)(S\xa6VM\xfe&\xc5\x1b\xd4\x84kM\xc0\x99\x0f\xd4\xfc>\x80\xfb\xdf\xe5g\x1f\x8e\x9d\xb8\xa5\xfe\x81R\x12<;\x85\x05\xa1gl\xa87\xadI\xcb#\x06V.y=\xbd c<\xda\x8a\xd6\xf5\x7f\x03\xec\xb7'\x90\x05\x1aI\xd2\xfa\x05\x1c4wc\xd7D\x84\xd0\x08\x1f\xef\xcc\xc7}?n\xc1\xb8\xb4(\xbc5\xae;&\xa7\xc7\xe2A\x02\x92g\xd7gI\x7f\xbf\xad[\x927\xc8\xce\xe5\xe1\xbd\x92\xc6\xe7]\x11\x84\xfb\x18\x08\xaarJ\xf1{~\xb9h\"\xcdbtr\x84\xc4\xeb'\x7f\xb7\x1eS\xbe\xdd\x80\x9e\x1f-CZ\x88\x1eO\x0eJ\x97\xf6@\xb6\x95T\xa3\xac\xb8\x9a\x997\xab \xc7\x17\xca\xe9C\x9a\xd08\x8e\xdb\xe8\x1b\x13\xa2\xc8\xaau*&\xb7\x8dV|=c \xbe\xbd\xb4/\x8c\\\x9f\xa0\xbc6\x0e^\xb7\\\xae\x8f\xd0\xe6\x04\x93\x82@\x0f[\x14\xb3`snD\x9d\x9d\xb3\x0bo\xa2 e\xd1qN0\xd6\xf1\x98\xb3Y\xe50\xe91?#\x0f\xeb\x0b\xaa\x10o\x92\xb5\x90VN\xfeJ\x94\xd3\x9ap\xad{\xcf\xf0\xbdp\xcdk\x7f\xf7X\xfe\xe1\x13\x98\xfc\xa5\x0f\xa9\xc5\x1d\xfd\x85\x8b\xa9\xf1\xb7<\x827\xd3=bs\x99\xb1\x8e4\xc3\x18\x0c\xf8\x1f\xc3\x1b\xbb\xd1\xb7 \xe3\xb0\xf6\x91\xd1J\xd6\xf4\xc7>a\xd8_\xe2\x88\xe8\xcd\x81\x00HO\x96\xe0\xa3\xc3M\xe8\x12\x1e\x84|\x9b\x1bO\xad\xdd\x8fI$\xc5\x7f1\xae+\x06\xc5\x87\xe2\xbfc;\xe1oC\xda\xe3\xdb}Gp\xd8\xeaB\xc7p#GhK\xd28qPN;2\x8b\xda\xad\xc71\xdctxm\xbe*ia+\x84\x95$\x9d<\x80j\x8bxa\xcf\x14\xf7(\x02\x0e\x93N)\x05#qU\xc0\xee\x91\xc1\xc4\xbb]\x98\xd1)\x1e_M\xe9#\x95\xc4-\xb2\xba\xf1\x16\xe7\xa2\x92\x82\xf7\x14\xf3\x1a\x90\x18\xefB\xb5M5\xb8c<^\x1c\xda\x01U6'\xf2kl8TQ\x87Ii\x11R\xfd\x10\xadN#\xa2\xac6\x894\xa6gh\x9cLZ\xdc,\xa5\x8d\xa5\x9f\x9c\x07\x7f'\xaa\xac7\xa1\xd6\x1em\xa2\xeb%\xc7\x9a\x11\xcc\xbe\xec\x16{,\x1d\xbfYZ4\xc2\xb1\xd2\x16\x9b\xb1D\xba\xef\x0fv\xe4J\x1b\xfe\xb2#V\xab\xa6\xd7\xe5x\xff\x0c\xdfG ^E\xf5=\xaa\xde\xa5v\x9d\x9f\x92+k.\x0f\xc1\x9b\x92\x08eV\x87\xf4\x0d\xf5$!l\xcb\xbe\x02\xce\xe4\xd0\xd4{\xfa\xba\x85\xac\x11\x8e\xc2/=\x03\xe3\x1aO\xb0p4\x8e\xd8\x96\xcf\x11\x9b\xbc\x1f\x9c\x04o\xc9\x8aaM\xd4\xb0\xe0\x03]\x04\x02 U\x1c!^JL\xb0\xc2\xeeD\xdb\x10\x1dB\x89\xd1\x7f\x93\x91\x8f\xdb:\xc5@IZ\x89\x82\x98\xf3\x1b\x9b\x0e!T\xa7\x91\xf2?1}5\xbe+\x0e\x0cj\x87\xdfsK\x9d\\\xd2r[q\x952\xa3\xacV\xe1\xaa\xb2\xf4\xe5\xad\x01U\x98Q\xca\xd8YU^\x97\xc4\xf1\x13i)\x91\xee\xb7\xc6\xf7\xc0\xf8\x84\x10\xc5\xcf\xf9U\x9e\xdb\x16nENv\x89\xe2\x87\xdcR\xdc\xde3\x19\x9d#\x82%\xf7\xd8Z\x02\xaf\x8c\x92\x1a|\xb05\x07\xfbr\x99\x81\x1bX\xc4\xb9\x89\x18\xf7\x89\x19E\x15B\x02k'\xc4\xc0E\xa7G\x14\x94>u\x80\xff\xe54#\x87I\x1b\x11\x88^\x18\x9a\xae|jh\xbag\xde\x81\x12\xcf\xf7\xb4\xb8\x17f\x17+\xebh\xce\x17\xf7k\x8b7\xc7tA\xaaA\x85J\x9asa\x8d\x03)\xd1z\x0c\x8f1\xe1\xe2\xf4\x18\xdc\xbdx\x07\x16n\xcb\xe5\x8f\x81\xcdY\x1c\x89\xeb-\xd0C\xcf\xc7Um\xc1\xc6\xc2*\\\xdd)A\x06\x90\xd6K\xdb\x03\xe8\xc3\x1f\xc0V\x9fVq\xaa\\\xcetw\x03\x80\xe2o\x8c\xc3\xf3\xf0\xee\xbb\xece\xcf\xc4`\xb5\n#\x12\xc3\xb1e\xef\x11\xefG\xd8\xfa\xcd\xbb?\xa2\xf9jv4\xb4>Z\x8aY\x8c\xf9\xda\xcf\x81\x02\xc8V\xdeC(%\xa6\xbd\x9dZ\x9fp\x03\x9e\x1b\xda\x017\xcf\xdd\x80\xd1\xdf\x89\x98H\x8c\xf4\xe7\x83\xc5X\xb43\x17\xcfM\xec\x8d.\xe1z\xcc=X*~\xb69[\x18\x91\x1e\xa3\x9a\xd19\x1e\xb7w\x8b\xf7X\x01\xcf\xe6\xc2\x1a\xcf\xf7\xfb\x0b\xc5:\x97G965R\xc1\xc9 \x7f&)\xfe`I\xb5\xe7\x85\xd1]\x856z\xa5\xf0\xe8\xda,\xcf\xb3+\xf6(9\x12\x1d\x04F\x07\x8eT\xe0\xde\xfcr\xe8C\x83\x08\xb8TR\x99YkE\xad\x17\x06\xccF\x17\xfd\xb8\x8d\xc0\xe3\xe3\x13f-\xe0\x01X\x1c\xeda\xd2\x89\xfdbB<:\xa5\xa0d;,o\xcc\xab\x08\xf0\xfaF\x19\xf1\xfa\xf6<\xf1\xf1\x1f\xb7 \x83\xe8\x1e.\xef\x10\xa7\x98\xde1\x0e3\xd2#\xc5\xcde\x16\xcf\xcf\x07\x8a\x84\xb1\xedb\x04=\x01\xcd\xcb\xdb\x0e\xe3\xab\xed\xb9R\xaa\xf9(\xd2(\x1e\x1e\xda\x9e4\xac\x08\xbc?\xb6;\x0e\x11\x08\xed\xca*a\x10y\x9e\xe61\xfd\x0f\x84r\xb9q\xa0\xd2\x02\x8d\xec\xcf\xdb\x9a\x897V]\x01|7\xbc\xef\xf7\x03\xbc\xf9\x868\xb86\xcbg\x92P\xfa\xcc\\=|&\x853\x9d\xbadd\x00&\x8bC\xd4\xfe\xd5\xc0q\xce\xe2z\x1f\xb5W\xfd\x1c\x8f\x9fC:\xc9i(\xb4\xd8qQJ$\xde\x8b0\xc1\xca\x9e\x9aZUk^\x1b\x99>\xed\xf1|\xa0?\xca\xda\x06\xf2\xc6\xca\xa0T.\x82\xd36\xee\xfdm9R\x1e\xab\xbfu\x8c&y\xa4/\x9e_\xbd\x1fO\xae\xdc\x8b\x10\x92\xb8?\x98\xd6\x1fWv\x8c\x95\x12#>\xb7.S\xd8\xb9\xe5\x10^\x893#-,\x08=\xa2\x82\x15\xfdbC\x15\x03\xe2C\xa5z\xe2\xbc\x11\xc3E\x96H+\xc0m=\x92a\xd4(\x85\xecJ\xabx\xeb\xa2=\x9e_\xb7\xe5(\xa5\xeae\xa1F\xcer\xfb\x02\x01\xc0\x85\xf4s\xba\xb5\xce\x9efe\xcf.\xce\x97\x15\xa4\xcb&\xed\xe2'\x92`\xde\xa4\xd6\x90\x99!\x8e\x8e\xbf\x84:\x07\xee\x18\xdb\xd5=--B\xf9\xe9\xfeb\xa4\x9a\xf4\xe8J\x8b\x97\xbd\xa3\xe6\x1f,\xc5\xcb4\x7f;i4\xcbv\xe7a\xd9\xbe#xq\xbd\x19cS\xa30\xbdC\x9cpc\xef6Bq\xad\xd7\\\xb7\xbd\xb8F2\x81\xdd\xda+\x05?e\x16c\xc6\xec\xf5\x989\xad\x1f\xaeh\x1b)Un\xbc-\xb7\x8c\xf7a.\xa59\x85\xe0\xe8>\x8cB\x81}\xe5\xb5\xa8\" \xe4\xbe\xbc\x91(S\xab\xa3j\x9ff9:\xc0\xfe}\x10\xd8\x86es\x10\x7f\xeb\xdf\x05\xd0\xefT{8\\\xbd\xf0Zj\xdd\xfc\x18\x8fcN^\xa1v?g\xb2\xee\x12\x11\x84\x01 aX\xb65\x87\xb3P\xb4V-\x84\x0bF\x0d\xc6_\x93=\xf9l\x13;D\xacn\n\x80\xb0]\xff!\x04\xeb\xc7m\xde_\x80{~\xdd\x897Fu\xc1M\x1dc0\xa1m4\xf2\xaa\xacH\x08\xd6#VC\x12\x86C\xa4\xbf\xef\xf2\xa6KO\x0cc\xa6\xea>x\xa4By0\xa7\x14s\x88\x11\xc7\xb6\x89\xc2\x9cI\xbd\xa4\x18\x0f6EI\xf61\xd2\x08\xd2C\x8d\xc2k\x9b\xb2\xf0\xeb\xda\xfd\n\xc4\x85\xb9\x89\xe3\xbeB\xa0\xf1\x90\xf7\xda\xf8\x15\x82\xf0\x7f$\xad\xb0=\x90\xddg\xd9\x0fp\xcf)}d\x15\xc2\xb7(\xabErZ\x94\xfb\xed!m\x15\xcb\x0bj\x84\x9b\xe6nD\x0c\x81\xd9H\x92\x80J\xacv,>P\xc4\xfb0\x02\xc2\x8c<\xd6Zx8\xc9!\xff\xce\xc7\xbf% \x90\xf2\xde(\x14\x0fB\xa5L\xa3>W\x916\xb1\x98\xd5U\xbdZ \xb3N}L\x9a\x0f\x16\xcc\xd9\xa4\x14m\xd0\n\xde~\xc22 \xb0N\xa4=\xf0\xda\x963p\xe2\xd2c\x97\xc1q\x92\xf4$\xe0)\xd8\x9c\x83\xa1Q\x89o]\xd8I\xa4\x1bV\xfc}Y\x86\x14\xdcXH\xa0\xf2\xd5\xef\xfb\xb0x\x9b\xcf\x94k\xd2\xcd\x91\nk\x89\xe2\x8d\xc4\xf0Si\xae\xefp\xc6\xd0 \x8dJ0k\xd5\x82\x95\xe6y\xcb\xe2\x9dXWP\x01\x17\xfd\x7fwi-\x8a\xed\x1e\xdc\xd0=\x89@\xc8\x81}\x156)\xc5|\xa4N\xedMW\xed\xb5\x7f\xffA\x1a\xd2\x96\xe4|X\xad\x9e\xbe\xf7\x99o\xce\x06q\x94\xf87\xcd0\x0egY\x98\xe1g\x1fv\x8b\xfdg#\x19h 1\x0f\xf3\xf9S\xe0\xf8\xae\xc1q\xa1\xd0\x910\x87\x96Q\xea\xd9\x1fbO\xa4\x9eh\x1e\x17\xe2\x96N\xeb\xe0\x8d{;A\xad\x0d\x8cD\xf1\x05\x02\x04\x01!\xfa\xe7\xd6\x1d(\xc0\xbaC%\x88\x894I\x1b\xea\\\xa3\xa3\xa0\xa4\x06p\xba\x00v\xd3S\x08\x8f\xd2\xf9G\xeb\x0c\x90\xae\x8a_\xa0S\x97\x94\xd4Z%\x8d#\xda\xa8E.\x81\x0eK#\xbc\xa8x\x9b \x93\x0b/y\xf7\x0b\xb2\xfc\x98\xd5\x854\xc0\xbd\xc4\xddG\xc3\xee\xd4H\xb5cU\\\xd0\xdd\x86+FtrOJ\nU>\xb21\x07\xbby\xd3.\xcc\xc8Z\xc1\xc30hX\xc2\xab\xa3\xb6\x9c\xc0\xa3~\xf6\xe0BI]S\x08w\xb1n\xc4\x9aC\x9b`\x1d\xf6\x14U\xe1\xbb\xfd\x85\xb8\xa6]\x14\x9d.J%n\x97\x1f.\x83F\xa1@|\x90F2Qq\x90#\xdd\x0c\x8fa=\xf6\x91\xc9\xdc\xf7\x1c\xa1\xeb\xa8-9\xc3\xd7\xe0\xb5\xf8P\x80f\xa7\xfa\xc4\x1e8\x81x(\xf9\x13\xc3\xc2\xd5\xec\xfe\x06\xff\xbc\x848\xcd\xcb\xa7\xc4+\xae\xef@\xe01\x80@d\xf9\xf6\\o\xbcX\xeb\xf8N\xb8\xb4\xc3\xe3\xe7\xc9zg\xed\xf5\x9a\x06\x8d;M\x1aV\x14\x9f'&<\x10f\xc3w\xd0\xa9\xac\x85\xa55\xd8\x93[\xea\x05\x0f\xad\xcaJ\xc7\xe7\x92\x060\x88\xce;\xbe\\b)\xb4\xea\xcc\x1a\xd2\x00^X\x97\x89p\x9d\n]\xa3LRp\x11\xd7\xec\x98\xb9\xa7\x00?\xed\xce\xe3Z |n\xe3\\\x0e\x05\xe1\xdft\x9d%\xa8\xa8\x9b\x00\x87[\x93\x18mF\xb7\xd4(\x98h\xbc\xa4\xd4(\xf7\x1b\xc3\xda+vTX\x85\x97~\xdf'J\xee\xc3\n\x05Kw\xec\x15\xc2\x9b\x80\x1f\xa0\xe1\xd4\xf3\x84\x0c\xa4Q\x95\xd1}\xd1\\\x13\xf5*<5\xbc#~\xd8\x9c\x8d\x7f,\xdb\x87\xac*\x0b\xee\xa1\x9f\xdf\xac\xcf\xc4#\xfd\xd3\x90lP\xd1yu(\xa0\xf3\xb9\x9f\xa4\xa9\xd5#\xb7(\xca rn\xd0Rx\xed\xffg\x8a\xb8\xae\xccp\xdf\xffS\x9a8\x16\x83\x87\xbf>\xb2\xecF\xbc\xd6\xcf>\x97\x06\x02\xc4\xf49\\\xc4\x1e\x90\xc39\xa8\x90\x05F\xb1\xc5k\xe8\x8b\xe1\xcd\x1bu>\x80\x07g\xf2\xb8\xf6d\xe0\xd14\x0d\xa4\xbej#b:1\xe4h\xe85\\O\x80\x93\xa7U\xc3\x9b\xbc\xb0\xf0\xe4\x0c\x1f\xd7\x13w_5\x9f\xa4\x8e\x89v\x97gzz4\xda\x84\x05)\xfaD\x07\xbb?\xdc\x91\x0bO\xb5U\x89\xa8\xe0%$\xe6\xff\xdc\x08\xf0x\x82\xb4\x9d\xc78q\xe3\xb0Nq\xb8\xa5{2\xbaD\x06K\x80\x94YQ\x07\x8dJ\xa9d\xd3\xd1\xb4\x1f\xb6z\\%5\n\xc4\x9a\xb3\xe0\xf6<\xd4\xa8\x07\xa8V\xaeF\x85u\xc8G\xbb\xf20>5\x02w\xf7J\x86\xdb\xed\xc1\xab\x04|\xb3v\x1d\x86\xc5#\xe2\xc9\xb1\xddqg\xcfd\xd4\xd2\xd7\xf0\x11\x1d\x93\x12H\x86\x06\xad\xa7\xaf!\xffO\xf4p#)X\x8f\x10\x9a\x13\x07Q\xc9\xd4\xea\x89\xed\xf8\xec[\xdf\xdcAc\xbc\x9f0\x06\x7f\xda\xd4\x9b\xb2X\xd8\x0b\xd0_\xcf \x8eW\xb9#@\x8d\x89\x05\xc5G\xfc\xe9Dp\xf1]a\x9d-\xe2\x82\xc4p\xdb+\x91&\xd4q= \x9d\xba%\xbe\xefe>\xa1a\xe1y\xb0\xb6\xd9$\xf7\x1f\x9fp}\xcaJ\xb4\xcd\xe97W\xe4k\x8d\xd4^\xb0\x1aJ\xc5-\x08\xd2\xfeg\xe5\x9e|\xc5\xca\xf5\x99\xe2\xcd\xe3\xbbcL\xa2\xd9-\xc9 Z\x95\x92\xc0\xe3\xedF\x8c\xd4\x0b\x1e\xcf\x93\xa8\xb5\xe3\xaa\x01m\xf1\xc6\xc8\xce\x08\xa7\xbb\xcaq\x88\xc8&M \x8c4\x10v\x0f.\xb1:\x91n6*\x0e\xa8\x95\xbc?\x11B+7\x0d^\xfb\xed\xe9\xb4\xacG\x08\xe8V\xfcJ\xa0\xf6d\x84 \xcf\x0cN\xc3\x93\x03\xda`b\xbb\x18\xe4\xf0\xc6|x\x10z\x87\xe9\xa5t\xc2\x8f\xae\xce\xc4o\xbc\x19(UZ\xab\xb7o\xc3Z\x07IY\xdd\"\x82\xa5\x18\x99\xe3\xb27\x18\x03x\xd6\x0d\xa5 \xd7\xf99\x86\xa1\x19\xdey\xa8\x9fs>\x9dx\x19\xe6\xc7xa~\xce\xd5_\xaedj\xc49\x1c\xc5\xcc\xb9\xa1^\xf4\x99\x8a\x9aJ\x9fR\xe3\xd2\x02\xf5M\x9f1~\xf4\x0f\xa9\xf7\x7f^\xdf\xdf\x060\x87\xcb\x9b0\x7f\xde\x0b\x99\xe1\xbbvc\xc9Xas.\xe9\x1aa\x1a<&%\x12\xf3H\x9b'A4\xac\x85\xa8\xea{|\x1a\xc7\xd7\xf0\xee\x014\x17\xb5D\x84\xac\x04\x9b\xf1\xbd{|\xb3\xa9Y\x1a\xd3\xe9\xafu\xbc\xe6hmA\xd8A\x0cz&\x81@j\x88F\x0d\xab\x07J\x9b\xdb#\xf8\xf6?N\xef\x16'\xe0%TZ\xd1\xbfc\x9c\x04\x1ez\xc2\x87\x0fv\x15\xe0\xdf\x8bw &:\x04\xef\x8d\xe9*\xedU\xf0\x1e\xc5\x7f\xc7v\xc5$\xab\x1d\x9b\xf7\x15\x84\xc3lx\x9b\x18\xfb\xf8F\xccr%i!\x1fB\xab\xbe\xf5\xd5e\x19(\xa8\xb2\xe0\xba\xeeI\xe8L`\xd2.T\x8f\x1a\xbb\x07\xbf\xe6W\xe1s\xd2\xa4fn\xc9\xf6\xfa\xb2\xab\x95\x1f\xd0E\x97\xff1\x82\xd3%\xc5\xad\x0c\x8e7K\x11\xb8\xc2\x89\xe6\x8a\x1a?\x9f\\Y\x03\xc7j\xfc\x1c\xa7\xb2\x19\xde`\xbe\x1f\x8c\xbc\xae\x11\xef\x937\xa7\x1b\x9b+\xcd\xdf\xb4\"6?\x9fOc#\xc7Y\xc8a3\xeb\xfd\xd4\xae\x84\xffE\xa0x\xe7x\x0e\xbc\xd1\xdd\x0d\x95?]\xcf\xfa+N_1\x93\x99R\xd6\x9f\x82\xbd\x14\xfc\x07?\xef\xf9K4P\x8a\xd6O\xba\xc47F\xa3=\xc7\x04A\x18ds\xbb/\x1f\x9d\x1c\xf1\xcd\xbc\x1d\xb9\"\xdc\x9e\xc3\xf4\xfd\xc7\x9fENT\xe3\xd3\xf8\xb8\xed\x86\xd71\x87c~v\x9c\xa1\xeb\xd9\x02\xf8\xe6\x9b\x8b<>\xfe\xc1\xdfn\xaeO\x00\xda\xe2\xbb_\xbfyBK\x88\xdcaS\xcf\x1e\xa8\x94\xa9\xa1:\xb5hq\x89J\x8e\xb5\x80\xa08\xfa\xa0OE\x91\xa4M\x8cd\xff\xf1\xdb{$K\x9a\xc7{\x04\x1ew\xce\xdb(i\x08W\xf6I\x95\xd2\xa8\xdc\xfe\xdb.\xf4\x8b1\xe3\xc9~)\xb8\xbd{2n\xe2t'.\xf1\"(\x05\x96\xa0O_\xf7\x9d\xeb\xa5\xeb\xd5j:\xff\x86\x99\x1b\x0eb\xd6\x81B\xf4\x8d5K\x0e\x03\xe56'6\x15VB\xac\xa8\xf3\x96\xe1\xd5(?\xa1\xf3o?\xb6\xbf\x07j5\xa7\xa4WJ\x9b\xf1\xc7\x11K\xafo\xf8\xf9\xcc\x1aJa\xcaR\xd2\x02?\x17qS\xe9\"x\x83\xe1\x1a\xa7szM\x9b\xa7\xa2\xfeh\xfc\xbe\x9c\xbf>\xf7l\xd3O\xf2\xe3|\x7fLG\x0cf\x1cA\xfe\x98\xcf\xfc\xc4y\x96\xba\xf9\xc0\x90\xcb\x03h\x8f{wY\xbe\x0f\x97\x01\xe3\x17\x9c\xda\xab\xeb\x8a\x00\xdf\x0d\x07\xff\xdd\x18\x80\x14\xdaT\xda \xef\xbeM\xa3y\x0b\xa1\x1d\x7f\xf3A\x9c\xe6\x07:\xb5\x07.w\x7f(T\x8a\xb3\xc8\x83,M4\x15\x06\xc0\xf3\xfcZ\x8b\xcd\x0d \xd5\xcd5XK \xfd\x0c\xe7e\x14DL\x96\xeb\x86\xdb\xb8\xd4\xa5W\x039\xdd\x82\x96\nN\xc5\x128\xf4\x8c\x0cF\xbe\xcd\x85\x87W\xef\xc5\xd8n\x89X0\xb9'2k\x9d\x12\xd7*\xac\xb3\xe3\xddm9\xb8\xa3G\"zD\x9a\xbcq\x1a\xa55\x02\xa9\xca\xec\x82\xb7\xb4q\x8f]\xbc\x91@n \xc2\x8c\x0f\x8avW\xf7\x0d\x9c\xbf\x87mQY\x81N\xe1A\xd8VX\x89 \x1d\xe3\xf0\xf8\x80t)\xde\x7fAv\x89T\x10\n\xa1F\xde\xf4.\x94_\x99L2\xc9$\xd3\xf9 uP(\xde\x86Ry\xdf\xab\xbf\xef\xc3\x90\x840\x0c\x8f2b\xce\xf4~\xf8rG.\xf6\x96\xd6\xe0\xeanI\x98\xd19\x01q\x1a\x01\x9f\xed/\xc1\xcc\xcd\xd9G\x0bK\xbd\"\xbf.\x99d\x92I\xa6\xf3\x17@x\xf3\xe3Q\x98t7TT\xd4\x85^1g\x03\xde\x1e\xdb\x15\xa3\x92#\xf0\xcc\xa0?K\x08\x14\xdaE\xbc\xb3#\x1f\xf7\xff\xb6\x8b\xbd\xaf\xd8{j]\x83I\ne\x92I&\x99d:\x8f\x00\x84\xd3\x88{\xc4\xe10\x1b\xd7\x15\x94\xd6\x18\xa6}\xb7\x01\x13\xda\xc7\xa1Gt\x88TA\x90\xab\xf5\xad\xcd+\xc7\xea\x03\x85\xde\x0d\xf7`\xfd^:\x7f\x8c\xfc\xaad\x92I&\x99d\x00a\xdaI\x9aH7\x98\x0d\x1f\xc2\xee\x1a\xf9\xe3\xb6C\xf8\x91\xeb\x7fp \x00\x97\xdb{F\x90\x8e~W~N\xe0\xc1U\xdf\xec\xf2\xab\x92I&\x99d\x92\x01\xe4(\x1d$p\x18\x05\xb5r\x14i#\x17CDw@4\x92\xdaQ A\xaa\x8e5\x8f@f\xb3\xfc\x8ad\x92I&\x99Z&\xfd\xbf\x00\x03\x00\x93\x19\xf9\xc5\x1e\x83\x05U\x00\x00\x00\x00IEND\xaeB`\x82PK\x07\x08z\x1b\xa9\xd9?M\x00\x00?M\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00$\x83QI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00index.html\n\n\n \n \n \n \n \n \n\n InfluxDB - Admin Interface\n\n \n \n \n \n\n\n\n\n
    \n \n \n\n\n \n
    \n\n \n
    \n
    \n
    \n

    Connection Settings

    \n
    \n
    \n
    \n
    \n \n \n
    \n\n
    \n \n \n
    \n\n
    \n \n \n
    \n\n
    \n \n \n
    \n\n
    \n \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    \n
    \n
    \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n \n
      \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    • \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n\n
    \n
    \n
    \n\n \n
    \n
    \n

    InfluxDB Admin UI: Server:

    \n
    \n
    \n\n \n
    \n
    \n
    \n
    \n \n

    Write Data to InfluxDB

    \n
    \n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n \n \n
    \n
    \n
    \n
    \n\n \n
    \n
    \n
    \n
    \n \n

    URL for the Query

    \n
    \n
    \n \n
    \n
    \n \n
    \n
    \n
    \n
    \n\n \n \n \n \n\n\n\nPK\x07\x08@\xaf\xe3\x8f\xde-\x00\x00\xde-\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00n\xa8[I\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00js/admin.js// allow the user to store recent queries for quick retrieval\nvar recentQueries = [];\nvar queryPointer = null;\n\n// keep track of the databases that exist on the server\nvar availableDatabases = [];\nvar currentlySelectedDatabase = null;\n\n// connection settings for the server, with sensible defaults\nvar connectionSettings = {\n hostname: (window.location.hostname ? window.location.hostname: \"localhost\"),\n port: \"8086\",\n username: \"\",\n password: \"\",\n ssl: ('https:' == window.location.protocol ? true : false)\n}\n\nvar connectionString = function() {\n var protocol = (connectionSettings.ssl ? \"https\" : \"http\");\n var host = connectionSettings.hostname + \":\" + connectionSettings.port;\n\n if (connectionSettings.username !== \"\") {\n $.ajaxSetup({\n headers: {\n 'Authorization': \"Basic \" + btoa(connectionSettings.username + \":\" + connectionSettings.password)\n }\n });\n }\n\n return protocol + \"://\" + host;\n}\n\nvar getSeriesFromJSON = function(data) {\n var results = [];\n data.results.forEach(function(result) {\n if (result.series) {\n result.series.forEach(function(s) {\n results.push(s);\n });\n }\n });\n return results.length > 0 ? results : null;\n}\n\n// gets settings from the browser's localStorage and sets defaults if they aren't found\nvar loadSettings = function() {\n var cs = localStorage.getItem(\"connectionSettings\");\n\n if (cs != null) { connectionSettings = JSON.parse(cs); }\n\n document.getElementById('hostname').value = connectionSettings.hostname;\n document.getElementById('port').value = connectionSettings.port;\n document.getElementById('username').value = connectionSettings.username;\n document.getElementById('password').value = connectionSettings.password;\n document.getElementById('ssl').checked = connectionSettings.ssl;\n\n getClientVersion();\n getDatabases();\n}\n\nvar updateSettings = function() {\n var hostname = document.getElementById('hostname').value;\n var port = document.getElementById('port').value;\n var username = document.getElementById('username').value;\n var password = document.getElementById('password').value;\n var ssl = document.getElementById('ssl').checked;\n\n if (hostname == \"\") { hostname = \"localhost\"; }\n\n if (port == \"\") { port = \"8086\"; }\n\n connectionSettings.hostname = hostname;\n connectionSettings.port = port;\n connectionSettings.username = username;\n connectionSettings.password = password;\n connectionSettings.ssl = ssl;\n\n localStorage.setItem(\"connectionSettings\", JSON.stringify(connectionSettings));\n\n getDatabases();\n}\n\nvar showSettings = function() {\n $(\"#settings\").show();\n $(\"input#query\").prop('disabled', true);\n}\n\nvar hideSettings = function() {\n $(\"#settings\").hide();\n $(\"input#query\").prop('disabled', false);\n}\n\n// hide errors within the Write Data modal\nvar hideModalError = function() {\n $(\"div#modal-error\").empty().hide();\n}\n\n// show errors within the Write Data modal\nvar showModalError = function(message) {\n hideModalSuccess();\n\n $(\"div#modal-error\").html(\"

    \" + message + \"

    \").show();\n}\n\n// hide success messages within the Write Data modal\nvar hideModalSuccess = function() {\n $(\"div#modal-success\").empty().hide();\n}\n\n// show success messages within the Write Data modal\nvar showModalSuccess = function(message) {\n hideModalError();\n\n $(\"div#modal-success\").html(\"

    \" + message + \"

    \").show();\n}\n\n// hide errors from queries\nvar hideQueryError = function() {\n $(\"div#query-error\").empty().hide();\n}\n\n// show errors from queries\nvar showQueryError = function(message) {\n hideQuerySuccess();\n\n $(\"div#query-error\").html(\"

    \" + message + \"

    \").show();\n}\n\n// hide success messages from queries\nvar hideQuerySuccess = function() {\n $(\"div#query-success\").empty().hide();\n}\n\n// show success messages from queries\nvar showQuerySuccess = function(message) {\n hideQueryError();\n\n $(\"div#query-success\").html(\"

    \" + message + \"

    \").show();\n}\n\n// hide warning from database lookup\nvar hideDatabaseWarning = function() {\n $(\"div#database-warning\").empty().hide();\n}\n\n// show warning from database lookup\nvar showDatabaseWarning = function(message) {\n $(\"div#database-warning\").html(\"

    \" + message + \"

    \").show();\n}\n\n// clear out the results table\nvar clearResults = function() {\n $(\"div#table\").empty();\n}\n\n// handle submissions of the query bar\nvar handleSubmit = function(e) {\n var queryElement = document.getElementById('query');\n var q = queryElement.value;\n\n clearResults();\n hideQueryError();\n hideQuerySuccess();\n\n if (q == \"\") { return };\n\n var query = $.get(connectionString() + \"/query\", {q: q, db: currentlySelectedDatabase}, function() {\n });\n\n recentQueries.push(q);\n queryPointer = recentQueries.length - 1;\n\n query.fail(handleRequestError);\n\n query.done(function (data) {\n var firstRow = data.results[0];\n if (firstRow.error) {\n showQueryError(\"Server returned error: \" + firstRow.error);\n return\n }\n\n var series = getSeriesFromJSON(data);\n\n if (series == null) {\n showQuerySuccess(\"Success! (no results to display)\");\n getDatabases();\n return\n }\n\n hideDatabaseWarning();\n React.render(\n React.createElement(DataTable, {series: series}),\n document.getElementById('table')\n );\n });\n\n if (e != null) { e.preventDefault(); }\n return false;\n};\n\nvar handleRequestError = function(e) {\n var errorText = e.status + \" \" + e.statusText;\n showDatabaseWarning(\"Unable to fetch list of databases.\");\n\n if (\"responseText\" in e) {\n try { errorText = \"Server returned error: \" + JSON.parse(e.responseText).error; } catch(e) {}\n }\n\n if (e.status == 400) {\n hideSettings();\n } else if (e.status == 401) {\n if (errorText.indexOf(\"error authorizing query\") > -1) {\n hideSettings();\n $(\"input#query\").val(\"CREATE USER WITH PASSWORD '' WITH ALL PRIVILEGES\").focus();\n } else {\n showSettings();\n $(\"input#username\").focus();\n }\n } else {\n showSettings();\n $(\"input#hostname\").focus();\n showDatabaseWarning(\"Hint: the InfluxDB API runs on port 8086 by default\");\n errorText = e.status + \" \" + e.statusText + \" - Could not connect to \" + connectionString();\n }\n showQueryError(errorText);\n};\n\nvar handleKeypress = function(e) {\n var queryElement = document.getElementById('query');\n\n // Enable/Disable the generate permalink button\n if(queryElement.value == \"\" && !$(\"#generate-query-url\").hasClass(\"disabled\")) {\n $(\"#generate-query-url\").addClass(\"disabled\");\n } else {\n $(\"#generate-query-url\").removeClass(\"disabled\");\n }\n\n // key press == enter\n if (e.keyCode == 13) {\n e.preventDefault();\n handleSubmit();\n return false;\n }\n\n // if we don't have any recent queries, ignore the arrow keys\n if (recentQueries.length == 0 ) { return }\n\n // key press == up arrow\n if (e.keyCode == 38) {\n clearResults()\n hideQuerySuccess()\n hideQueryError()\n\n // TODO: stash the current query, if there is one?\n if (queryPointer == recentQueries.length - 1) {\n // this is buggy.\n //recentQueries.push(queryElement.value);\n //queryPointer = recentQueries.length - 1;\n }\n\n if (queryPointer != null && queryPointer > 0) {\n queryPointer -= 1;\n queryElement.value = recentQueries[queryPointer];\n }\n }\n\n // key press == down arrow\n if (e.keyCode == 40) {\n if (queryPointer != null && queryPointer < recentQueries.length - 1) {\n queryPointer += 1;\n queryElement.value = recentQueries[queryPointer];\n }\n }\n};\n\nvar QueryError = React.createClass({\n render: function() {\n return React.createElement(\"div\", {className: \"alert alert-danger\"}, this.props.message)\n }\n});\n\nvar stringifyTags = function(tags) {\n var tagStrings = [];\n\n for(var index in tags) {\n tagStrings.push(index + \":\" + tags[index]);\n }\n\n return tagStrings.join(\", \");\n}\n\nvar DataTable = React.createClass({\n render: function() {\n var tables = this.props.series.map(function(series) {\n return React.createElement(\"div\", null,\n React.createElement(\"h1\", null, series.name),\n React.createElement(\"h2\", null, stringifyTags(series.tags)),\n React.createElement(\"table\", {className: \"table\"},\n React.createElement(TableHeader, {data: series.columns}),\n React.createElement(TableBody, {data: series})\n )\n );\n });\n\n return React.createElement(\"div\", null, tables);\n }\n});\n\nvar TableHeader = React.createClass({\n render: function() {\n var headers = this.props.data.map(function(column) {\n return React.createElement(\"th\", null, column);\n });\n\n return React.createElement(\"tr\", null, headers);\n }\n});\n\nvar TableBody = React.createClass({\n render: function() {\n if (this.props.data.values) {\n var tableRows = this.props.data.values.map(function (row) {\n return React.createElement(TableRow, {data: row});\n });\n }\n\n return React.createElement(\"tbody\", null, tableRows);\n }\n});\n\nvar TableRow = React.createClass({\n render: function() {\n var tableData = this.props.data.map(function (data, index) {\n if (index == 0) {\n return React.createElement(\"td\", {className: \"timestamp\"}, null, data);\n } else {\n return React.createElement(\"td\", null, pretty(data));\n }\n });\n\n return React.createElement(\"tr\", null, tableData);\n }\n});\n\nvar pretty = function(val) {\n if (typeof val == 'string') {\n return \"\\\"\" + val + \"\\\"\";\n } else if (typeof val == 'boolean' ){\n return val.toString();\n } else {\n return val;\n }\n}\n\nvar truncateVersion = function (version) {\n var parts = version.split(\".\")\n if (parts.length > 2) {\n parts = parts.slice(0, 2)\n }\n return parts.join(\".\")\n}\n\nvar getClientVersion = function () {\n var query = $.get(window.location.origin + window.location.pathname);\n\n query.fail(handleRequestError);\n\n query.done(function (data, status, xhr) {\n var version = xhr.getResponseHeader('X-InfluxDB-Version');\n if (version.indexOf(\"unknown\") == -1) {\n console.log('got client version v'+version);\n version = 'v' + truncateVersion(version);\n $('#influxdb-doc-link').attr('href', 'https://docs.influxdata.com/influxdb/'+version+'/introduction/getting_started/');\n }\n $('.influxdb-client-version').html(version);\n });\n}\n\nvar chooseDatabase = function (databaseName) {\n currentlySelectedDatabase = databaseName;\n document.getElementById(\"content-current-database\").innerHTML = currentlySelectedDatabase;\n}\n\nvar getDatabases = function () {\n var q = \"SHOW DATABASES\";\n var query = $.get(connectionString() + \"/query\", {q: q, db: currentlySelectedDatabase});\n\n query.fail(handleRequestError);\n\n query.done(function (data, status, xhr) {\n // Set version of the InfluxDB server\n var version = xhr.getResponseHeader('X-InfluxDB-Version');\n if (version.indexOf(\"unknown\") == -1) {\n version = \"v\" + version;\n }\n $('.influxdb-version').html(version);\n\n hideSettings();\n hideDatabaseWarning();\n\n var firstRow = data.results[0];\n if (firstRow.error) {\n showDatabaseWarning(firstRow.error);\n return;\n }\n\n var series = getSeriesFromJSON(data);\n var values = series[0].values;\n\n if ((values == null) || (values.length == 0)) {\n availableDatabases = [];\n updateDatabaseList();\n\n showDatabaseWarning(\"No databases found.\")\n } else {\n availableDatabases = values.map(function(value) {\n return value[0];\n }).sort();\n\n if (currentlySelectedDatabase == null) {\n chooseDatabase(availableDatabases[0]);\n } else if (availableDatabases.indexOf(currentlySelectedDatabase) == -1) {\n chooseDatabase(availableDatabases[0]);\n }\n updateDatabaseList();\n }\n });\n}\n\nvar updateDatabaseList = function() {\n var databaseList = $(\"ul#content-database-list\");\n\n databaseList.empty();\n availableDatabases.forEach(function(database) {\n var li = $(\"
  • \" + database + \"
  • \");\n databaseList.append(li);\n });\n\n if (availableDatabases.length == 0) {\n document.getElementById(\"content-current-database\").innerHTML = \"…\";\n }\n}\n\nvar generateQueryURL = function() {\n var q = document.getElementById('query').value;\n\n var query = connectionString() + \"/query?\";\n var queryParams = {q: q, db: currentlySelectedDatabase};\n query += $.param(queryParams);\n\n var textarea = $(\"#query-url\");\n textarea.val(query);\n}\n\n// when the page is ready, start everything up\n$(document).ready(function () {\n loadSettings();\n\n // bind to the settings cog in the navbar\n $(\"#action-settings\").click(function (e) {\n $(\"#settings\").toggle();\n });\n\n // bind to the save button in the settings form\n $(\"#form-settings\").submit(function (e) {\n updateSettings();\n });\n\n // bind to the items in the query template dropdown\n $(\"ul#action-template label\").click(function (e) {\n var el = $(e.target);\n $(\"input#query\").val(el.data(\"query\")).focus();\n });\n\n $(\"ul#content-database-list\").on(\"click\", function(e) {\n if (e.target.tagName != \"A\") { return; }\n\n chooseDatabase(e.target.innerHTML);\n e.preventDefault();\n })\n\n // load the Write Data modal\n $(\"button#action-send\").click(function (e) {\n var data = $(\"textarea#content-data\").val();\n\n var startTime = new Date().getTime();\n var write = $.post(connectionString() + \"/write?db=\" + currentlySelectedDatabase, data, function() {\n });\n\n write.fail(function (e) {\n if (e.status == 400) {\n showModalError(\"Failed to write: \" + e.responseText)\n }\n else {\n showModalError(\"Failed to contact server: \" + e.statusText)\n }\n });\n\n write.done(function (data) {\n var endTime = new Date().getTime();\n var elapsed = endTime - startTime;\n showModalSuccess(\"Write succeeded. (\" + elapsed + \"ms)\");\n });\n\n });\n\n // Enable auto select of the text in modal\n $('#queryURLModal').on('shown.bs.modal', function () {\n var textarea = $(\"#query-url\");\n textarea.focus();\n textarea.select();\n });\n\n //bind to the generate permalink button\n $(\"#generate-query-url\").click(function (e) {\n generateQueryURL();\n });\n\n // handle submit actions on the query bar\n var form = document.getElementById('query-form');\n form.addEventListener(\"submit\", handleSubmit);\n\n // handle keypresses on the query bar so we can get arrow keys and enter\n var query = document.getElementById('query');\n query.addEventListener(\"keydown\", handleKeypress);\n\n // make sure we start out with the query bar in focus\n document.getElementById('query').focus();\n})\nPK\x07\x08SM\xc0\x10*=\x00\x00*=\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00js/vendor/bootstrap-3.3.5.min.js/*!\n * Bootstrap v3.3.5 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under the MIT license\n */\nif(\"undefined\"==typeof jQuery)throw new Error(\"Bootstrap's JavaScript requires jQuery\");+function(a){\"use strict\";var b=a.fn.jquery.split(\" \")[0].split(\".\");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1)throw new Error(\"Bootstrap's JavaScript requires jQuery version 1.9.1 or higher\")}(jQuery),+function(a){\"use strict\";function b(){var a=document.createElement(\"bootstrap\"),b={WebkitTransition:\"webkitTransitionEnd\",MozTransition:\"transitionend\",OTransition:\"oTransitionEnd otransitionend\",transition:\"transitionend\"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one(\"bsTransitionEnd\",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){\"use strict\";function b(b){return this.each(function(){var c=a(this),e=c.data(\"bs.alert\");e||c.data(\"bs.alert\",e=new d(this)),\"string\"==typeof b&&e[b].call(c)})}var c='[data-dismiss=\"alert\"]',d=function(b){a(b).on(\"click\",c,this.close)};d.VERSION=\"3.3.5\",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger(\"closed.bs.alert\").remove()}var e=a(this),f=e.attr(\"data-target\");f||(f=e.attr(\"href\"),f=f&&f.replace(/.*(?=#[^\\s]*$)/,\"\"));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(\".alert\")),g.trigger(b=a.Event(\"close.bs.alert\")),b.isDefaultPrevented()||(g.removeClass(\"in\"),a.support.transition&&g.hasClass(\"fade\")?g.one(\"bsTransitionEnd\",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on(\"click.bs.alert.data-api\",c,d.prototype.close)}(jQuery),+function(a){\"use strict\";function b(b){return this.each(function(){var d=a(this),e=d.data(\"bs.button\"),f=\"object\"==typeof b&&b;e||d.data(\"bs.button\",e=new c(this,f)),\"toggle\"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION=\"3.3.5\",c.DEFAULTS={loadingText:\"loading...\"},c.prototype.setState=function(b){var c=\"disabled\",d=this.$element,e=d.is(\"input\")?\"val\":\"html\",f=d.data();b+=\"Text\",null==f.resetText&&d.data(\"resetText\",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),\"loadingText\"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle=\"buttons\"]');if(b.length){var c=this.$element.find(\"input\");\"radio\"==c.prop(\"type\")?(c.prop(\"checked\")&&(a=!1),b.find(\".active\").removeClass(\"active\"),this.$element.addClass(\"active\")):\"checkbox\"==c.prop(\"type\")&&(c.prop(\"checked\")!==this.$element.hasClass(\"active\")&&(a=!1),this.$element.toggleClass(\"active\")),c.prop(\"checked\",this.$element.hasClass(\"active\")),a&&c.trigger(\"change\")}else this.$element.attr(\"aria-pressed\",!this.$element.hasClass(\"active\")),this.$element.toggleClass(\"active\")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on(\"click.bs.button.data-api\",'[data-toggle^=\"button\"]',function(c){var d=a(c.target);d.hasClass(\"btn\")||(d=d.closest(\".btn\")),b.call(d,\"toggle\"),a(c.target).is('input[type=\"radio\"]')||a(c.target).is('input[type=\"checkbox\"]')||c.preventDefault()}).on(\"focus.bs.button.data-api blur.bs.button.data-api\",'[data-toggle^=\"button\"]',function(b){a(b.target).closest(\".btn\").toggleClass(\"focus\",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){\"use strict\";function b(b){return this.each(function(){var d=a(this),e=d.data(\"bs.carousel\"),f=a.extend({},c.DEFAULTS,d.data(),\"object\"==typeof b&&b),g=\"string\"==typeof b?b:f.slide;e||d.data(\"bs.carousel\",e=new c(this,f)),\"number\"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(\".carousel-indicators\"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on(\"keydown.bs.carousel\",a.proxy(this.keydown,this)),\"hover\"==this.options.pause&&!(\"ontouchstart\"in document.documentElement)&&this.$element.on(\"mouseenter.bs.carousel\",a.proxy(this.pause,this)).on(\"mouseleave.bs.carousel\",a.proxy(this.cycle,this))};c.VERSION=\"3.3.5\",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:\"hover\",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(\".item\"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d=\"prev\"==a&&0===c||\"next\"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e=\"prev\"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(\".item.active\"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one(\"slid.bs.carousel\",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?\"next\":\"prev\",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(\".next, .prev\").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide(\"next\")},c.prototype.prev=function(){return this.sliding?void 0:this.slide(\"prev\")},c.prototype.slide=function(b,d){var e=this.$element.find(\".item.active\"),f=d||this.getItemForDirection(b,e),g=this.interval,h=\"next\"==b?\"left\":\"right\",i=this;if(f.hasClass(\"active\"))return this.sliding=!1;var j=f[0],k=a.Event(\"slide.bs.carousel\",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(\".active\").removeClass(\"active\");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass(\"active\")}var m=a.Event(\"slid.bs.carousel\",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass(\"slide\")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one(\"bsTransitionEnd\",function(){f.removeClass([b,h].join(\" \")).addClass(\"active\"),e.removeClass([\"active\",h].join(\" \")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass(\"active\"),f.addClass(\"active\"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr(\"data-target\")||(d=e.attr(\"href\"))&&d.replace(/.*(?=#[^\\s]+$)/,\"\"));if(f.hasClass(\"carousel\")){var g=a.extend({},f.data(),e.data()),h=e.attr(\"data-slide-to\");h&&(g.interval=!1),b.call(f,g),h&&f.data(\"bs.carousel\").to(h),c.preventDefault()}};a(document).on(\"click.bs.carousel.data-api\",\"[data-slide]\",e).on(\"click.bs.carousel.data-api\",\"[data-slide-to]\",e),a(window).on(\"load\",function(){a('[data-ride=\"carousel\"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){\"use strict\";function b(b){var c,d=b.attr(\"data-target\")||(c=b.attr(\"href\"))&&c.replace(/.*(?=#[^\\s]+$)/,\"\");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data(\"bs.collapse\"),f=a.extend({},d.DEFAULTS,c.data(),\"object\"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data(\"bs.collapse\",e=new d(this,f)),\"string\"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle=\"collapse\"][href=\"#'+b.id+'\"],[data-toggle=\"collapse\"][data-target=\"#'+b.id+'\"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION=\"3.3.5\",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass(\"width\");return a?\"width\":\"height\"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass(\"in\")){var b,e=this.$parent&&this.$parent.children(\".panel\").children(\".in, .collapsing\");if(!(e&&e.length&&(b=e.data(\"bs.collapse\"),b&&b.transitioning))){var f=a.Event(\"show.bs.collapse\");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,\"hide\"),b||e.data(\"bs.collapse\",null));var g=this.dimension();this.$element.removeClass(\"collapse\").addClass(\"collapsing\")[g](0).attr(\"aria-expanded\",!0),this.$trigger.removeClass(\"collapsed\").attr(\"aria-expanded\",!0),this.transitioning=1;var h=function(){this.$element.removeClass(\"collapsing\").addClass(\"collapse in\")[g](\"\"),this.transitioning=0,this.$element.trigger(\"shown.bs.collapse\")};if(!a.support.transition)return h.call(this);var i=a.camelCase([\"scroll\",g].join(\"-\"));this.$element.one(\"bsTransitionEnd\",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass(\"in\")){var b=a.Event(\"hide.bs.collapse\");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass(\"collapsing\").removeClass(\"collapse in\").attr(\"aria-expanded\",!1),this.$trigger.addClass(\"collapsed\").attr(\"aria-expanded\",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass(\"collapsing\").addClass(\"collapse\").trigger(\"hidden.bs.collapse\")};return a.support.transition?void this.$element[c](0).one(\"bsTransitionEnd\",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass(\"in\")?\"hide\":\"show\"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle=\"collapse\"][data-parent=\"'+this.options.parent+'\"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass(\"in\");a.attr(\"aria-expanded\",c),b.toggleClass(\"collapsed\",!c).attr(\"aria-expanded\",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on(\"click.bs.collapse.data-api\",'[data-toggle=\"collapse\"]',function(d){var e=a(this);e.attr(\"data-target\")||d.preventDefault();var f=b(e),g=f.data(\"bs.collapse\"),h=g?\"toggle\":e.data();c.call(f,h)})}(jQuery),+function(a){\"use strict\";function b(b){var c=b.attr(\"data-target\");c||(c=b.attr(\"href\"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\\s]*$)/,\"\"));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass(\"open\")&&(c&&\"click\"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event(\"hide.bs.dropdown\",f)),c.isDefaultPrevented()||(d.attr(\"aria-expanded\",\"false\"),e.removeClass(\"open\").trigger(\"hidden.bs.dropdown\",f))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data(\"bs.dropdown\");d||c.data(\"bs.dropdown\",d=new g(this)),\"string\"==typeof b&&d[b].call(c)})}var e=\".dropdown-backdrop\",f='[data-toggle=\"dropdown\"]',g=function(b){a(b).on(\"click.bs.dropdown\",this.toggle)};g.VERSION=\"3.3.5\",g.prototype.toggle=function(d){var e=a(this);if(!e.is(\".disabled, :disabled\")){var f=b(e),g=f.hasClass(\"open\");if(c(),!g){\"ontouchstart\"in document.documentElement&&!f.closest(\".navbar-nav\").length&&a(document.createElement(\"div\")).addClass(\"dropdown-backdrop\").insertAfter(a(this)).on(\"click\",c);var h={relatedTarget:this};if(f.trigger(d=a.Event(\"show.bs.dropdown\",h)),d.isDefaultPrevented())return;e.trigger(\"focus\").attr(\"aria-expanded\",\"true\"),f.toggleClass(\"open\").trigger(\"shown.bs.dropdown\",h)}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(\".disabled, :disabled\")){var e=b(d),g=e.hasClass(\"open\");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger(\"focus\"),d.trigger(\"click\");var h=\" li:not(.disabled):visible a\",i=e.find(\".dropdown-menu\"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&jdocument.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:\"\",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:\"\"})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:\"\",paddingRight:\"\"})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth
    ',trigger:\"hover focus\",title:\"\",delay:0,html:!1,container:!1,viewport:{selector:\"body\",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error(\"`selector` option must be specified when initializing \"+this.type+\" on the window.document object!\");for(var e=this.options.trigger.split(\" \"),f=e.length;f--;){var g=e[f];if(\"click\"==g)this.$element.on(\"click.\"+this.type,this.options.selector,a.proxy(this.toggle,this));else if(\"manual\"!=g){var h=\"hover\"==g?\"mouseenter\":\"focusin\",i=\"hover\"==g?\"mouseleave\":\"focusout\";this.$element.on(h+\".\"+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+\".\"+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:\"manual\",selector:\"\"}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&\"number\"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data(\"bs.\"+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data(\"bs.\"+this.type,c)),b instanceof a.Event&&(c.inState[\"focusin\"==b.type?\"focus\":\"hover\"]=!0),c.tip().hasClass(\"in\")||\"in\"==c.hoverState?void(c.hoverState=\"in\"):(clearTimeout(c.timeout),c.hoverState=\"in\",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){\"in\"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data(\"bs.\"+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data(\"bs.\"+this.type,c)),b instanceof a.Event&&(c.inState[\"focusout\"==b.type?\"focus\":\"hover\"]=!1),c.isInStateTrue()?void 0:(clearTimeout(c.timeout),c.hoverState=\"out\",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){\"out\"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide())},c.prototype.show=function(){var b=a.Event(\"show.bs.\"+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr(\"id\",g),this.$element.attr(\"aria-describedby\",g),this.options.animation&&f.addClass(\"fade\");var h=\"function\"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\\s?auto?\\s?/i,j=i.test(h);j&&(h=h.replace(i,\"\")||\"top\"),f.detach().css({top:0,left:0,display:\"block\"}).addClass(h).data(\"bs.\"+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger(\"inserted.bs.\"+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h=\"bottom\"==h&&k.bottom+m>o.bottom?\"top\":\"top\"==h&&k.top-mo.width?\"left\":\"left\"==h&&k.left-lg.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;jg.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr(\"data-original-title\")||(\"function\"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+\" `template` option must consist of exactly 1 top-level element!\");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(\".tooltip-arrow\")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data(\"bs.\"+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data(\"bs.\"+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass(\"in\")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off(\".\"+a.type).removeData(\"bs.\"+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){\"use strict\";function b(b){return this.each(function(){var d=a(this),e=d.data(\"bs.popover\"),f=\"object\"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data(\"bs.popover\",e=new c(this,f)),\"string\"==typeof b&&e[b]())})}var c=function(a,b){this.init(\"popover\",a,b)};if(!a.fn.tooltip)throw new Error(\"Popover requires tooltip.js\");c.VERSION=\"3.3.5\",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:\"right\",trigger:\"click\",content:\"\",template:'

    '}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(\".popover-title\")[this.options.html?\"html\":\"text\"](b),a.find(\".popover-content\").children().detach().end()[this.options.html?\"string\"==typeof c?\"html\":\"append\":\"text\"](c),a.removeClass(\"fade top bottom left right in\"),a.find(\".popover-title\").html()||a.find(\".popover-title\").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr(\"data-content\")||(\"function\"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(\".arrow\")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){\"use strict\";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||\"\")+\" .nav li > a\",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on(\"scroll.bs.scrollspy\",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data(\"bs.scrollspy\"),f=\"object\"==typeof c&&c;e||d.data(\"bs.scrollspy\",e=new b(this,f)),\"string\"==typeof c&&e[c]()})}b.VERSION=\"3.3.5\",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c=\"offset\",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c=\"position\",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data(\"target\")||b.attr(\"href\"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(\":visible\")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b=e[a]&&(void 0===e[a+1]||b .dropdown-menu > .active\").removeClass(\"active\").end().find('[data-toggle=\"tab\"]').attr(\"aria-expanded\",!1),b.addClass(\"active\").find('[data-toggle=\"tab\"]').attr(\"aria-expanded\",!0),h?(b[0].offsetWidth,b.addClass(\"in\")):b.removeClass(\"fade\"),b.parent(\".dropdown-menu\").length&&b.closest(\"li.dropdown\").addClass(\"active\").end().find('[data-toggle=\"tab\"]').attr(\"aria-expanded\",!0),e&&e()}var g=d.find(\"> .active\"),h=e&&a.support.transition&&(g.length&&g.hasClass(\"fade\")||!!d.find(\"> .fade\").length);g.length&&h?g.one(\"bsTransitionEnd\",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass(\"in\")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),\"show\")};a(document).on(\"click.bs.tab.data-api\",'[data-toggle=\"tab\"]',e).on(\"click.bs.tab.data-api\",'[data-toggle=\"pill\"]',e)}(jQuery),+function(a){\"use strict\";function b(b){return this.each(function(){var d=a(this),e=d.data(\"bs.affix\"),f=\"object\"==typeof b&&b;e||d.data(\"bs.affix\",e=new c(this,f)),\"string\"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on(\"scroll.bs.affix.data-api\",a.proxy(this.checkPosition,this)).on(\"click.bs.affix.data-api\",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION=\"3.3.5\",c.RESET=\"affix affix-top affix-bottom\",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&\"top\"==this.affixed)return c>e?\"top\":!1;if(\"bottom\"==this.affixed)return null!=c?e+this.unpin<=f.top?!1:\"bottom\":a-d>=e+g?!1:\"bottom\";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&c>=e?\"top\":null!=d&&i+j>=a-d?\"bottom\":!1},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass(\"affix\");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(\":visible\")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());\"object\"!=typeof d&&(f=e=d),\"function\"==typeof e&&(e=d.top(this.$element)),\"function\"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css(\"top\",\"\");var i=\"affix\"+(h?\"-\"+h:\"\"),j=a.Event(i+\".bs.affix\");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin=\"bottom\"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace(\"affix\",\"affixed\")+\".bs.affix\")}\"bottom\"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on(\"load\",function(){a('[data-spy=\"affix\"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);PK\x07\x08^\xbfIn\xd0\x8f\x00\x00\xd0\x8f\x00\x00PK\x03\x04\x14\x00\x08\x00\x00\x00L\x84JI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00js/vendor/jquery-2.1.4.min.js/*! jQuery v2.1.4 | (c) 2005, 2015 jQuery Foundation, Inc. | jquery.org/license */\n!function(a,b){\"object\"==typeof module&&\"object\"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error(\"jQuery requires a window with a document\");return b(a)}:b(a)}(\"undefined\"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m=\"2.1.4\",n=function(a,b){return new n.fn.init(a,b)},o=/^[\\s\\uFEFF\\xA0]+|[\\s\\uFEFF\\xA0]+$/g,p=/^-ms-/,q=/-([\\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:\"\",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for(\"boolean\"==typeof g&&(j=g,g=arguments[h]||{},h++),\"object\"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:\"jQuery\"+(m+Math.random()).replace(/\\D/g,\"\"),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return\"function\"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return\"object\"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,\"isPrototypeOf\")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+\"\":\"object\"==typeof a||\"function\"==typeof a?h[i.call(a)]||\"object\":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf(\"use strict\")?(b=l.createElement(\"script\"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,\"ms-\").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?\"\":(a+\"\").replace(o,\"\")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,\"string\"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return\"string\"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each(\"Boolean Number String Function Array Date RegExp Object Error\".split(\" \"),function(a,b){h[\"[object \"+b+\"]\"]=b.toLowerCase()});function s(a){var b=\"length\"in a&&a.length,c=n.type(a);return\"function\"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:\"array\"===c||0===b||\"number\"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u=\"sizzle\"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K=\"checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped\",L=\"[\\\\x20\\\\t\\\\r\\\\n\\\\f]\",M=\"(?:\\\\\\\\.|[\\\\w-]|[^\\\\x00-\\\\xa0])+\",N=M.replace(\"w\",\"w#\"),O=\"\\\\[\"+L+\"*(\"+M+\")(?:\"+L+\"*([*^$|!~]?=)\"+L+\"*(?:'((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\"|(\"+N+\"))|)\"+L+\"*\\\\]\",P=\":(\"+M+\")(?:\\\\((('((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\")|((?:\\\\\\\\.|[^\\\\\\\\()[\\\\]]|\"+O+\")*)|.*)\\\\)|)\",Q=new RegExp(L+\"+\",\"g\"),R=new RegExp(\"^\"+L+\"+|((?:^|[^\\\\\\\\])(?:\\\\\\\\.)*)\"+L+\"+$\",\"g\"),S=new RegExp(\"^\"+L+\"*,\"+L+\"*\"),T=new RegExp(\"^\"+L+\"*([>+~]|\"+L+\")\"+L+\"*\"),U=new RegExp(\"=\"+L+\"*([^\\\\]'\\\"]*?)\"+L+\"*\\\\]\",\"g\"),V=new RegExp(P),W=new RegExp(\"^\"+N+\"$\"),X={ID:new RegExp(\"^#(\"+M+\")\"),CLASS:new RegExp(\"^\\\\.(\"+M+\")\"),TAG:new RegExp(\"^(\"+M.replace(\"w\",\"w*\")+\")\"),ATTR:new RegExp(\"^\"+O),PSEUDO:new RegExp(\"^\"+P),CHILD:new RegExp(\"^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\\\(\"+L+\"*(even|odd|(([+-]|)(\\\\d*)n|)\"+L+\"*(?:([+-]|)\"+L+\"*(\\\\d+)|))\"+L+\"*\\\\)|)\",\"i\"),bool:new RegExp(\"^(?:\"+K+\")$\",\"i\"),needsContext:new RegExp(\"^\"+L+\"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\\\(\"+L+\"*((?:-\\\\d)?\\\\d*)\"+L+\"*\\\\)|)(?=[^-]|$)\",\"i\")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\\d$/i,$=/^[^{]+\\{\\s*\\[native \\w/,_=/^(?:#([\\w-]+)|(\\w+)|\\.([\\w-]+))$/,aa=/[+~]/,ba=/'|\\\\/g,ca=new RegExp(\"\\\\\\\\([\\\\da-f]{1,6}\"+L+\"?|(\"+L+\")|.)\",\"ig\"),da=function(a,b,c){var d=\"0x\"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ea=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fa){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,\"string\"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&\"object\"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute(\"id\"))?s=r.replace(ba,\"\\\\$&\"):b.setAttribute(\"id\",s),s=\"[id='\"+s+\"'] \",l=o.length;while(l--)o[l]=s+ra(o[l]);w=aa.test(a)&&pa(b.parentNode)||b,x=o.join(\",\")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute(\"id\")}}}return i(a.replace(R,\"$1\"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+\" \")>d.cacheLength&&delete b[a.shift()],b[c+\" \"]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement(\"div\");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split(\"|\"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return\"input\"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return(\"input\"===c||\"button\"===c)&&b.type===a}}function oa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pa(a){return a&&\"undefined\"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?\"HTML\"!==b.nodeName:!1},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener(\"unload\",ea,!1):e.attachEvent&&e.attachEvent(\"onunload\",ea)),p=!f(g),c.attributes=ja(function(a){return a.className=\"i\",!a.getAttribute(\"className\")}),c.getElementsByTagName=ja(function(a){return a.appendChild(g.createComment(\"\")),!a.getElementsByTagName(\"*\").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(\"undefined\"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){return a.getAttribute(\"id\")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){var c=\"undefined\"!=typeof a.getAttributeNode&&a.getAttributeNode(\"id\");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return\"undefined\"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if(\"*\"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML=\"\",a.querySelectorAll(\"[msallowcapture^='']\").length&&q.push(\"[*^$]=\"+L+\"*(?:''|\\\"\\\")\"),a.querySelectorAll(\"[selected]\").length||q.push(\"\\\\[\"+L+\"*(?:value|\"+K+\")\"),a.querySelectorAll(\"[id~=\"+u+\"-]\").length||q.push(\"~=\"),a.querySelectorAll(\":checked\").length||q.push(\":checked\"),a.querySelectorAll(\"a#\"+u+\"+*\").length||q.push(\".#.+[+~]\")}),ja(function(a){var b=g.createElement(\"input\");b.setAttribute(\"type\",\"hidden\"),a.appendChild(b).setAttribute(\"name\",\"D\"),a.querySelectorAll(\"[name=d]\").length&&q.push(\"name\"+L+\"*[*^$|!~]?=\"),a.querySelectorAll(\":enabled\").length||q.push(\":enabled\",\":disabled\"),a.querySelectorAll(\"*,:x\"),q.push(\",.*:\")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,\"div\"),s.call(a,\"[s!='']:x\"),r.push(\"!=\",P)}),q=q.length&&new RegExp(q.join(\"|\")),r=r.length&&new RegExp(r.join(\"|\")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?la(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,\"='$1']\"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.error=function(a){throw new Error(\"Syntax error, unrecognized expression: \"+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c=\"\",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if(\"string\"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:X,attrHandle:{},find:{},relative:{\">\":{dir:\"parentNode\",first:!0},\" \":{dir:\"parentNode\"},\"+\":{dir:\"previousSibling\",first:!0},\"~\":{dir:\"previousSibling\"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ca,da),a[3]=(a[3]||a[4]||a[5]||\"\").replace(ca,da),\"~=\"===a[2]&&(a[3]=\" \"+a[3]+\" \"),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),\"nth\"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*(\"even\"===a[3]||\"odd\"===a[3])),a[5]=+(a[7]+a[8]||\"odd\"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||\"\":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(\")\",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ca,da).toLowerCase();return\"*\"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+\" \"];return b||(b=new RegExp(\"(^|\"+L+\")\"+a+\"(\"+L+\"|$)\"))&&y(a,function(a){return b.test(\"string\"==typeof a.className&&a.className||\"undefined\"!=typeof a.getAttribute&&a.getAttribute(\"class\")||\"\")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?\"!=\"===b:b?(e+=\"\",\"=\"===b?e===c:\"!=\"===b?e!==c:\"^=\"===b?c&&0===e.indexOf(c):\"*=\"===b?c&&e.indexOf(c)>-1:\"$=\"===b?c&&e.slice(-c.length)===c:\"~=\"===b?(\" \"+e.replace(Q,\" \")+\" \").indexOf(c)>-1:\"|=\"===b?e===c||e.slice(0,c.length+1)===c+\"-\":!1):!0}},CHILD:function(a,b,c,d,e){var f=\"nth\"!==a.slice(0,3),g=\"last\"!==a.slice(-4),h=\"of-type\"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?\"nextSibling\":\"previousSibling\",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p=\"only\"===a&&!o&&\"nextSibling\"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error(\"unsupported pseudo: \"+a);return e[u]?e(b):e.length>1?(c=[a,a,\"\",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(R,\"$1\"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(ca,da),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return W.test(a||\"\")||ga.error(\"unsupported lang: \"+a),a=a.replace(ca,da).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute(\"xml:lang\")||b.getAttribute(\"lang\"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+\"-\");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&!!a.checked||\"option\"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&\"button\"===a.type||\"button\"===b},text:function(a){var b;return\"input\"===a.nodeName.toLowerCase()&&\"text\"===a.type&&(null==(b=a.getAttribute(\"type\"))||\"text\"===b.toLowerCase())},first:oa(function(){return[0]}),last:oa(function(a,b){return[b-1]}),eq:oa(function(a,b,c){return[0>c?c+b:c]}),even:oa(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:oa(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:oa(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:oa(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function sa(a,b,c){var d=b.dir,e=c&&\"parentNode\"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function ta(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ua(a,b,c){for(var d=0,e=b.length;e>d;d++)ga(a,b[d],c);return c}function va(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wa(a,b,c,d,e,f){return d&&!d[u]&&(d=wa(d)),e&&!e[u]&&(e=wa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ua(b||\"*\",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:va(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=va(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=va(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[\" \"],i=g?1:0,k=sa(function(a){return a===b},h,!0),l=sa(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sa(ta(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wa(i>1&&ta(m),i>1&&ra(a.slice(0,i-1).concat({value:\" \"===a[i-2].type?\"*\":\"\"})).replace(R,\"$1\"),c,e>i&&xa(a.slice(i,e)),f>e&&xa(a=a.slice(e)),f>e&&ra(a))}m.push(c)}return ta(m)}function ya(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q=\"0\",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG(\"*\",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=va(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&ga.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+\" \"];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,ya(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n=\"function\"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&\"ID\"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ca,da),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ca,da),aa.test(j[0].type)&&pa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&ra(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,aa.test(a)&&pa(b.parentNode)||b),e},c.sortStable=u.split(\"\").sort(B).join(\"\")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement(\"div\"))}),ja(function(a){return a.innerHTML=\"\",\"#\"===a.firstChild.getAttribute(\"href\")})||ka(\"type|href|height|width\",function(a,b,c){return c?void 0:a.getAttribute(b,\"type\"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML=\"\",a.firstChild.setAttribute(\"value\",\"\"),\"\"===a.firstChild.getAttribute(\"value\")})||ka(\"value\",function(a,b,c){return c||\"input\"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ja(function(a){return null==a.getAttribute(\"disabled\")})||ka(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);n.find=t,n.expr=t.selectors,n.expr[\":\"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\\w+)\\s*\\/?>(?:<\\/\\1>|)$/,w=/^.[^:#\\[\\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if(\"string\"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=\":not(\"+a+\")\"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if(\"string\"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+\" \"+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,\"string\"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\\s*(<[\\w\\W]+>)[^>]*|#([\\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if(\"string\"==typeof a){if(c=\"<\"===a[0]&&\">\"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?\"undefined\"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||\"string\"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?\"string\"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,\"parentNode\")},parentsUntil:function(a,b,c){return n.dir(a,\"parentNode\",c)},next:function(a){return D(a,\"nextSibling\")},prev:function(a){return D(a,\"previousSibling\")},nextAll:function(a){return n.dir(a,\"nextSibling\")},prevAll:function(a){return n.dir(a,\"previousSibling\")},nextUntil:function(a,b,c){return n.dir(a,\"nextSibling\",c)},prevUntil:function(a,b,c){return n.dir(a,\"previousSibling\",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return\"Until\"!==a.slice(-5)&&(d=c),d&&\"string\"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a=\"string\"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);\"function\"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&\"string\"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[[\"resolve\",\"done\",n.Callbacks(\"once memory\"),\"resolved\"],[\"reject\",\"fail\",n.Callbacks(\"once memory\"),\"rejected\"],[\"notify\",\"progress\",n.Callbacks(\"memory\")]],c=\"pending\",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+\"With\"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+\"With\"](this===e?d:this,arguments),this},e[f[0]+\"With\"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler(\"ready\"),n(l).off(\"ready\"))))}});function I(){l.removeEventListener(\"DOMContentLoaded\",I,!1),a.removeEventListener(\"load\",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),\"complete\"===l.readyState?setTimeout(n.ready):(l.addEventListener(\"DOMContentLoaded\",I,!1),a.addEventListener(\"load\",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if(\"object\"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if(\"string\"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&\"string\"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\\{[\\w\\W]*\\}|\\[[\\w\\W]*\\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d=\"data-\"+b.replace(O,\"-$1\").toLowerCase(),c=a.getAttribute(d),\"string\"==typeof c){try{c=\"true\"===c?!0:\"false\"===c?!1:\"null\"===c?null:+c+\"\"===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){\nreturn M.access(a,b,c)},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,\"hasDataAttrs\"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf(\"data-\")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,\"hasDataAttrs\",!0)}return e}return\"object\"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf(\"-\")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||\"fx\")+\"queue\",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||\"fx\";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};\"inprogress\"===e&&(e=c.shift(),d--),e&&(\"fx\"===b&&c.unshift(\"inprogress\"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+\"queueHooks\";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks(\"once memory\").add(function(){L.remove(a,[b+\"queue\",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return\"string\"!=typeof a&&(b=a,a=\"fx\",c--),arguments.lengthx\",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U=\"undefined\";k.focusinBubbles=\"onfocusin\"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||\"\").match(E)||[\"\"],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||\"\").split(\".\").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(\".\")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||\"\").match(E)||[\"\"],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||\"\").split(\".\").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp(\"(^|\\\\.)\"+p.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&(\"**\"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,\"events\"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,\"type\")?b.type:b,r=j.call(b,\"namespace\")?b.namespace.split(\".\"):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(\".\")>=0&&(r=q.split(\".\"),q=r.shift(),r.sort()),k=q.indexOf(\":\")<0&&\"on\"+q,b=b[n.expando]?b:new n.Event(q,\"object\"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join(\".\"),b.namespace_re=b.namespace?new RegExp(\"(^|\\\\.)\"+r.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,\"events\")||{})[b.type]&&L.get(g,\"handle\"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,\"events\")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||\"click\"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||\"click\"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+\" \",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\\/>/gi,ba=/<([\\w:]+)/,ca=/<|&#?\\w+;/,da=/<(?:script|style|link)/i,ea=/checked\\s*(?:[^=]|=\\s*.checked.)/i,fa=/^$|\\/(?:java|ecma)script/i,ga=/^true\\/(.*)/,ha=/^\\s*\\s*$/g,ia={option:[1,\"\"],thead:[1,\"\",\"
    \"],col:[2,\"\",\"
    \"],tr:[2,\"\",\"
    \"],td:[3,\"\",\"
    \"],_default:[0,\"\",\"\"]};ia.optgroup=ia.option,ia.tbody=ia.tfoot=ia.colgroup=ia.caption=ia.thead,ia.th=ia.td;function ja(a,b){return n.nodeName(a,\"table\")&&n.nodeName(11!==b.nodeType?b:b.firstChild,\"tr\")?a.getElementsByTagName(\"tbody\")[0]||a.appendChild(a.ownerDocument.createElement(\"tbody\")):a}function ka(a){return a.type=(null!==a.getAttribute(\"type\"))+\"/\"+a.type,a}function la(a){var b=ga.exec(a.type);return b?a.type=b[1]:a.removeAttribute(\"type\"),a}function ma(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],\"globalEval\",!b||L.get(b[c],\"globalEval\"))}function na(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function oa(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||\"*\"):a.querySelectorAll?a.querySelectorAll(b||\"*\"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pa(a,b){var c=b.nodeName.toLowerCase();\"input\"===c&&T.test(a.type)?b.checked=a.checked:(\"input\"===c||\"textarea\"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=oa(h),f=oa(a),d=0,e=f.length;e>d;d++)pa(f[d],g[d]);if(b)if(c)for(f=f||oa(a),g=g||oa(h),d=0,e=f.length;e>d;d++)na(f[d],g[d]);else na(a,h);return g=oa(h,\"script\"),g.length>0&&ma(g,!i&&oa(a,\"script\")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if(\"object\"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(ca.test(e)){f=f||k.appendChild(b.createElement(\"div\")),g=(ba.exec(e)||[\"\",\"\"])[1].toLowerCase(),h=ia[g]||ia._default,f.innerHTML=h[1]+e.replace(aa,\"<$1>\")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=\"\"}else l.push(b.createTextNode(e));k.textContent=\"\",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=oa(k.appendChild(e),\"script\"),i&&ma(f),c)){j=0;while(e=f[j++])fa.test(e.type||\"\")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(oa(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&ma(oa(c,\"script\")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(oa(a,!1)),a.textContent=\"\");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if(\"string\"==typeof a&&!da.test(a)&&!ia[(ba.exec(a)||[\"\",\"\"])[1].toLowerCase()]){a=a.replace(aa,\"<$1>\");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(oa(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(oa(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&\"string\"==typeof p&&!k.checkClone&&ea.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(oa(c,\"script\"),ka),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,oa(h,\"script\"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,la),j=0;g>j;j++)h=f[j],fa.test(h.type||\"\")&&!L.access(h,\"globalEval\")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(ha,\"\")))}return this}}),n.each({appendTo:\"append\",prependTo:\"prepend\",insertBefore:\"before\",insertAfter:\"after\",replaceAll:\"replaceWith\"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qa,ra={};function sa(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],\"display\");return e.detach(),f}function ta(a){var b=l,c=ra[a];return c||(c=sa(a,b),\"none\"!==c&&c||(qa=(qa||n(\"