diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/CHANGELOG.md prometheus-mysqld-exporter-0.11.0+ds/CHANGELOG.md --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/CHANGELOG.md 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/CHANGELOG.md 2018-10-15 19:28:15.000000000 +0000 @@ -1,3 +1,30 @@ +## v0.10.0 / 2018-06-29 + +### BREAKING CHANGES: +* Flags now use the Kingpin library, and require double-dashes. #222 + +This also changes the behavior of boolean flags. +* Enable: `--collector.global_status` +* Disable: `--no-collector.global_status` + +### Changes: +* [CHANGE] Limit number and lifetime of connections #208 +* [ENHANCEMENT] Move session params to DSN #259 +* [ENHANCEMENT] Use native DB.Ping() instead of self-written implementation #210 +* [FEATURE] Add collector duration metrics #197 +* [FEATURE] Add 'collect[]' URL parameter to filter enabled collectors #235 +* [FEATURE] Set a `lock_wait_timeout` on the MySQL connection #252 +* [FEATURE] Set `last_scrape_error` when an error occurs #237 +* [FEATURE] Collect metrics from `performance_schema.replication_group_member_stats` #271 +* [FEATURE] Add innodb compression statistic #275 +* [FEATURE] Add metrics for the output of `SHOW SLAVE HOSTS` #279 +* [FEATURE] Support custom CA truststore and client SSL keypair. #255 +* [BUGFIX] Fix perfEventsStatementsQuery #213 +* [BUGFIX] Fix `file_instances` metric collector #205 +* [BUGFIX] Fix prefix removal in `perf_schema_file_instances` #257 +* [BUGFIX] Fix 32bit compile issue #273 +* [BUGFIX] Ignore boolean keys in my.cnf. #283 + ## v0.10.0 / 2017-04-25 BREAKING CHANGES: diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/.circleci/config.yml prometheus-mysqld-exporter-0.11.0+ds/.circleci/config.yml --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/.circleci/config.yml 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/.circleci/config.yml 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,129 @@ +--- +version: 2 + +jobs: + test: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/mysqld_exporter + + steps: + - checkout + - run: make promu + - run: make + - run: rm -v mysqld_exporter + + codespell: + docker: + - image: circleci/python + + steps: + - checkout + - run: sudo pip install codespell + - run: codespell --skip=".git,./vendor,ttar" + + build: + machine: true + working_directory: /home/circleci/.go_workspace/src/github.com/prometheus/mysqld_exporter + + steps: + - checkout + - run: make promu + - run: promu crossbuild -v + - persist_to_workspace: + root: . + paths: + - .build + + docker_hub_master: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/mysqld_exporter + + environment: + DOCKER_IMAGE_NAME: prom/mysqld-exporter + QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter + + steps: + - checkout + - setup_remote_docker + - attach_workspace: + at: . + - run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter + - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME + - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME + - run: docker images + - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD + - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io + - run: docker push $DOCKER_IMAGE_NAME + - run: docker push $QUAY_IMAGE_NAME + + docker_hub_release_tags: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/mysqld_exporter + + environment: + DOCKER_IMAGE_NAME: prom/mysqld-exporter + QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter + + steps: + - checkout + - setup_remote_docker + - run: mkdir -v -p ${HOME}/bin + - run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin + - run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV} + - attach_workspace: + at: . + - run: make promu + - run: promu crossbuild tarballs + - run: promu checksum .tarballs + - run: promu release .tarballs + - store_artifacts: + path: .tarballs + destination: releases + - run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter + - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG + - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG + - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD + - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io + - run: | + if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then + docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" + docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" + fi + - run: docker push $DOCKER_IMAGE_NAME + - run: docker push $QUAY_IMAGE_NAME + +workflows: + version: 2 + mysqld_exporter: + jobs: + - test: + filters: + tags: + only: /.*/ + - build: + filters: + tags: + only: /.*/ + - codespell: + filters: + tags: + only: /.*/ + - docker_hub_master: + requires: + - test + - build + filters: + branches: + only: master + - docker_hub_release_tags: + requires: + - test + - build + filters: + tags: + only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ + branches: + ignore: /.*/ diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/circle.yml prometheus-mysqld-exporter-0.11.0+ds/circle.yml --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/circle.yml 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/circle.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -machine: - environment: - DOCKER_IMAGE_NAME: prom/mysqld-exporter - QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter - DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.8-base - REPO_PATH: github.com/prometheus/mysqld_exporter - pre: - - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' - - sudo chmod 0755 /usr/bin/docker - - sudo curl -L 'https://github.com/aktau/github-release/releases/download/v0.6.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C $HOME/bin - services: - - docker - -dependencies: - pre: - - make promu - - docker info - override: - - promu crossbuild - - ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter - - | - if [ -n "$CIRCLE_TAG" ]; then - make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - else - make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME - make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME - fi - post: - - mkdir $CIRCLE_ARTIFACTS/binaries/ && cp -a .build/* $CIRCLE_ARTIFACTS/binaries/ - - docker images - -test: - override: - - docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T - -deployment: - hub_branch: - branch: master - owner: prometheus - commands: - - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - - docker push $DOCKER_IMAGE_NAME - - docker push $QUAY_IMAGE_NAME - hub_tag: - tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ - owner: prometheus - commands: - - promu crossbuild tarballs - - promu checksum .tarballs - - promu release .tarballs - - mkdir $CIRCLE_ARTIFACTS/releases/ && cp -a .tarballs/* $CIRCLE_ARTIFACTS/releases/ - - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - - | - if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then - docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" - docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" - fi - - docker push $DOCKER_IMAGE_NAME - - docker push $QUAY_IMAGE_NAME diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/binlog.go prometheus-mysqld-exporter-0.11.0+ds/collector/binlog.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/binlog.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/binlog.go 2018-10-15 19:28:15.000000000 +0000 @@ -38,7 +38,20 @@ ) // ScrapeBinlogSize colects from `SHOW BINARY LOGS`. -func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeBinlogSize struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeBinlogSize) Name() string { + return "binlog_size" +} + +// Help describes the role of the Scraper. +func (ScrapeBinlogSize) Help() string { + return "Collect the current size of all registered binlog files" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var logBin uint8 err := db.QueryRow(logbinQuery).Scan(&logBin) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/binlog_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/binlog_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/binlog_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/binlog_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -27,7 +27,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeBinlogSize(db, ch); err != nil { + if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -47,6 +47,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_innodb.go prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_innodb.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb.go 2018-10-15 19:28:15.000000000 +0000 @@ -19,7 +19,20 @@ ) // ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`. -func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeEngineInnodbStatus struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeEngineInnodbStatus) Name() string { + return "engine_innodb_status" +} + +// Help describes the role of the Scraper. +func (ScrapeEngineInnodbStatus) Help() string { + return "Collect from SHOW ENGINE INNODB STATUS" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.Query(engineInnodbStatusQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_innodb_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_innodb_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -140,7 +140,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeEngineInnodbStatus(db, ch); err != nil { + if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -160,6 +160,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_tokudb.go prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_tokudb.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb.go 2018-10-15 19:28:15.000000000 +0000 @@ -16,26 +16,21 @@ engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS` ) -func sanitizeTokudbMetric(metricName string) string { - replacements := map[string]string{ - ">": "", - ",": "", - ":": "", - "(": "", - ")": "", - " ": "_", - "-": "_", - "+": "and", - "/": "and", - } - for r := range replacements { - metricName = strings.Replace(metricName, r, replacements[r], -1) - } - return metricName +// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. +type ScrapeEngineTokudbStatus struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeEngineTokudbStatus) Name() string { + return "engine_tokudb_status" } -// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. -func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { +// Help describes the role of the Scraper. +func (ScrapeEngineTokudbStatus) Help() string { + return "Collect from SHOW ENGINE TOKUDB STATUS" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { tokudbRows, err := db.Query(engineTokudbStatusQuery) if err != nil { return err @@ -60,3 +55,21 @@ } return nil } + +func sanitizeTokudbMetric(metricName string) string { + replacements := map[string]string{ + ">": "", + ",": "", + ":": "", + "(": "", + ")": "", + " ": "_", + "-": "_", + "+": "and", + "/": "and", + } + for r := range replacements { + metricName = strings.Replace(metricName, r, replacements[r], -1) + } + return metricName +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_tokudb_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/engine_tokudb_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -44,7 +44,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeEngineTokudbStatus(db, ch); err != nil { + if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -64,6 +64,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/exporter.go prometheus-mysqld-exporter-0.11.0+ds/collector/exporter.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/exporter.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/exporter.go 2018-10-15 19:28:15.000000000 +0000 @@ -4,6 +4,7 @@ "database/sql" "fmt" "strings" + "sync" "time" _ "github.com/go-sql-driver/mysql" @@ -24,11 +25,9 @@ // See: https://github.com/go-sql-driver/mysql#system-variables sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27` timeoutParam = `lock_wait_timeout=%d` - - upQuery = `SELECT 1` ) -// Metric descriptors. +// Tunable flags. var ( exporterLockTimeout = kingpin.Flag( "exporter.lock_wait_timeout", @@ -38,7 +37,10 @@ "exporter.log_slow_filter", "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.", ).Default("false").Bool() +) +// Metric descriptors. +var ( scrapeDurationDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"), "Collector time duration.", @@ -46,47 +48,15 @@ ) ) -// Collect defines which metrics we should collect -type Collect struct { - Processlist bool - TableSchema bool - InnodbTablespaces bool - InnodbMetrics bool - GlobalStatus bool - GlobalVariables bool - SlaveStatus bool - AutoIncrementColumns bool - BinlogSize bool - PerfTableIOWaits bool - PerfIndexIOWaits bool - PerfTableLockWaits bool - PerfEventsStatements bool - PerfEventsWaits bool - PerfFileEvents bool - PerfFileInstances bool - UserStat bool - ClientStat bool - TableStat bool - QueryResponseTime bool - EngineTokudbStatus bool - EngineInnodbStatus bool - Heartbeat bool - HeartbeatDatabase string - HeartbeatTable string -} - // Exporter collects MySQL metrics. It implements prometheus.Collector. type Exporter struct { - dsn string - collect Collect - error prometheus.Gauge - totalScrapes prometheus.Counter - scrapeErrors *prometheus.CounterVec - mysqldUp prometheus.Gauge + dsn string + scrapers []Scraper + metrics Metrics } // New returns a new MySQL exporter for the provided DSN. -func New(dsn string, collect Collect) *Exporter { +func New(dsn string, metrics Metrics, scrapers []Scraper) *Exporter { // Setup extra params for the DSN, default to having a lock timeout. dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)} @@ -102,81 +72,39 @@ dsn += strings.Join(dsnParams, "&") return &Exporter{ - dsn: dsn, - collect: collect, - totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "scrapes_total", - Help: "Total number of times MySQL was scraped for metrics.", - }), - scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "scrape_errors_total", - Help: "Total number of times an error occurred scraping a MySQL.", - }, []string{"collector"}), - error: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "last_scrape_error", - Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).", - }), - mysqldUp: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "up", - Help: "Whether the MySQL server is up.", - }), + dsn: dsn, + scrapers: scrapers, + metrics: metrics, } } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { - // We cannot know in advance what metrics the exporter will generate - // from MySQL. So we use the poor man's describe method: Run a collect - // and send the descriptors of all the collected metrics. The problem - // here is that we need to connect to the MySQL DB. If it is currently - // unavailable, the descriptors will be incomplete. Since this is a - // stand-alone exporter and not used as a library within other code - // implementing additional metrics, the worst that can happen is that we - // don't detect inconsistent metrics created by this exporter - // itself. Also, a change in the monitored MySQL instance may change the - // exported metrics during the runtime of the exporter. - - metricCh := make(chan prometheus.Metric) - doneCh := make(chan struct{}) - - go func() { - for m := range metricCh { - ch <- m.Desc() - } - close(doneCh) - }() - - e.Collect(metricCh) - close(metricCh) - <-doneCh + ch <- e.metrics.TotalScrapes.Desc() + ch <- e.metrics.Error.Desc() + e.metrics.ScrapeErrors.Describe(ch) + ch <- e.metrics.MySQLUp.Desc() } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.scrape(ch) - ch <- e.totalScrapes - ch <- e.error - e.scrapeErrors.Collect(ch) - ch <- e.mysqldUp + ch <- e.metrics.TotalScrapes + ch <- e.metrics.Error + e.metrics.ScrapeErrors.Collect(ch) + ch <- e.metrics.MySQLUp } func (e *Exporter) scrape(ch chan<- prometheus.Metric) { - e.totalScrapes.Inc() + e.metrics.TotalScrapes.Inc() var err error scrapeTime := time.Now() db, err := sql.Open("mysql", e.dsn) if err != nil { log.Errorln("Error opening connection to database:", err) - e.error.Set(1) + e.metrics.Error.Set(1) return } defer db.Close() @@ -187,223 +115,69 @@ // Set max lifetime for a connection. db.SetConnMaxLifetime(1 * time.Minute) - isUpRows, err := db.Query(upQuery) - if err != nil { + if err := db.Ping(); err != nil { log.Errorln("Error pinging mysqld:", err) - e.mysqldUp.Set(0) - e.error.Set(1) + e.metrics.MySQLUp.Set(0) + e.metrics.Error.Set(1) return } - isUpRows.Close() - e.mysqldUp.Set(1) + e.metrics.MySQLUp.Set(1) ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection") - if e.collect.GlobalStatus { - scrapeTime = time.Now() - if err = ScrapeGlobalStatus(db, ch); err != nil { - log.Errorln("Error scraping for collect.global_status:", err) - e.scrapeErrors.WithLabelValues("collect.global_status").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.global_status") - } - if e.collect.GlobalVariables { - scrapeTime = time.Now() - if err = ScrapeGlobalVariables(db, ch); err != nil { - log.Errorln("Error scraping for collect.global_variables:", err) - e.scrapeErrors.WithLabelValues("collect.global_variables").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.global_variables") - } - if e.collect.SlaveStatus { - scrapeTime = time.Now() - if err = ScrapeSlaveStatus(db, ch); err != nil { - log.Errorln("Error scraping for collect.slave_status:", err) - e.scrapeErrors.WithLabelValues("collect.slave_status").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.slave_status") - } - if e.collect.Processlist { - scrapeTime = time.Now() - if err = ScrapeProcesslist(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.processlist:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.processlist").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.processlist") - } - if e.collect.TableSchema { - scrapeTime = time.Now() - if err = ScrapeTableSchema(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.tables:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.tables").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.tables") - } - if e.collect.InnodbTablespaces { - scrapeTime = time.Now() - if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.innodb_sys_tablespaces:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_sys_tablespaces").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodb_sys_tablespaces") - } - if e.collect.InnodbMetrics { - if err = ScrapeInnodbMetrics(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.innodb_metrics:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_metrics").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodb_metrics") - } - if e.collect.AutoIncrementColumns { - scrapeTime = time.Now() - if err = ScrapeAutoIncrementColumns(db, ch); err != nil { - log.Errorln("Error scraping for collect.auto_increment.columns:", err) - e.scrapeErrors.WithLabelValues("collect.auto_increment.columns").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.auto_increment.columns") - } - if e.collect.BinlogSize { - scrapeTime = time.Now() - if err = ScrapeBinlogSize(db, ch); err != nil { - log.Errorln("Error scraping for collect.binlog_size:", err) - e.scrapeErrors.WithLabelValues("collect.binlog_size").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.binlog_size") + wg := &sync.WaitGroup{} + defer wg.Wait() + for _, scraper := range e.scrapers { + wg.Add(1) + go func(scraper Scraper) { + defer wg.Done() + label := "collect." + scraper.Name() + scrapeTime := time.Now() + if err := scraper.Scrape(db, ch); err != nil { + log.Errorln("Error scraping for "+label+":", err) + e.metrics.ScrapeErrors.WithLabelValues(label).Inc() + e.metrics.Error.Set(1) + } + ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label) + }(scraper) } - if e.collect.PerfTableIOWaits { - scrapeTime = time.Now() - if err = ScrapePerfTableIOWaits(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.tableiowaits:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.tableiowaits").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.tableiowaits") - } - if e.collect.PerfIndexIOWaits { - scrapeTime = time.Now() - if err = ScrapePerfIndexIOWaits(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.indexiowaits:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.indexiowaits").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.indexiowaits") - } - if e.collect.PerfTableLockWaits { - scrapeTime = time.Now() - if err = ScrapePerfTableLockWaits(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.tablelocks:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.tablelocks").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.tablelocks") - } - if e.collect.PerfEventsStatements { - scrapeTime = time.Now() - if err = ScrapePerfEventsStatements(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.eventsstatements:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.eventsstatements").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.eventsstatements") - } - if e.collect.PerfEventsWaits { - scrapeTime = time.Now() - if err = ScrapePerfEventsWaits(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.eventswaits:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.eventswaits").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.eventswaits") - } - if e.collect.PerfFileEvents { - scrapeTime = time.Now() - if err = ScrapePerfFileEvents(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.file_events:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.file_events").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.file_events") - } - if e.collect.PerfFileInstances { - scrapeTime = time.Now() - if err = ScrapePerfFileInstances(db, ch); err != nil { - log.Errorln("Error scraping for collect.perf_schema.file_instances:", err) - e.scrapeErrors.WithLabelValues("collect.perf_schema.file_instances").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.file_instances") - } - if e.collect.UserStat { - scrapeTime = time.Now() - if err = ScrapeUserStat(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.userstats:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.userstats").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.userstats") - } - if e.collect.ClientStat { - scrapeTime = time.Now() - if err = ScrapeClientStat(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.clientstats:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.clientstats").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.clientstats") - } - if e.collect.TableStat { - scrapeTime = time.Now() - if err = ScrapeTableStat(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.tablestats:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.tablestats").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.tablestats") - } - if e.collect.QueryResponseTime { - scrapeTime = time.Now() - if err = ScrapeQueryResponseTime(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.query_response_time:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.query_response_time").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.query_response_time") - } - if e.collect.EngineTokudbStatus { - scrapeTime = time.Now() - if err = ScrapeEngineTokudbStatus(db, ch); err != nil { - log.Errorln("Error scraping for collect.engine_tokudb_status:", err) - e.scrapeErrors.WithLabelValues("collect.engine_tokudb_status").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_tokudb_status") - } - if e.collect.EngineInnodbStatus { - scrapeTime = time.Now() - if err = ScrapeEngineInnodbStatus(db, ch); err != nil { - log.Errorln("Error scraping for collect.engine_innodb_status:", err) - e.scrapeErrors.WithLabelValues("collect.engine_innodb_status").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_innodb_status") - } - if e.collect.Heartbeat { - scrapeTime = time.Now() - if err = ScrapeHeartbeat(db, ch, e.collect.HeartbeatDatabase, e.collect.HeartbeatTable); err != nil { - log.Errorln("Error scraping for collect.heartbeat:", err) - e.scrapeErrors.WithLabelValues("collect.heartbeat").Inc() - e.error.Set(1) - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.heartbeat") +} + +// Metrics represents exporter metrics which values can be carried between http requests. +type Metrics struct { + TotalScrapes prometheus.Counter + ScrapeErrors *prometheus.CounterVec + Error prometheus.Gauge + MySQLUp prometheus.Gauge +} + +// NewMetrics creates new Metrics instance. +func NewMetrics() Metrics { + subsystem := exporter + return Metrics{ + TotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "scrapes_total", + Help: "Total number of times MySQL was scraped for metrics.", + }), + ScrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "scrape_errors_total", + Help: "Total number of times an error occurred scraping a MySQL.", + }, []string{"collector"}), + Error: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "last_scrape_error", + Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).", + }), + MySQLUp: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "up", + Help: "Whether the MySQL server is up.", + }), } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/exporter_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/exporter_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/exporter_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/exporter_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -15,9 +15,12 @@ t.Skip("-short is passed, skipping test") } - exporter := New(dsn, Collect{ - GlobalStatus: true, - }) + exporter := New( + dsn, + NewMetrics(), + []Scraper{ + ScrapeGlobalStatus{}, + }) convey.Convey("Metrics describing", t, func() { ch := make(chan *prometheus.Desc) diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_status.go prometheus-mysqld-exporter-0.11.0+ds/collector/global_status.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_status.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/global_status.go 2018-10-15 19:28:15.000000000 +0000 @@ -11,15 +11,16 @@ ) const ( - // Scrape query + // Scrape query. globalStatusQuery = `SHOW GLOBAL STATUS` - // Subsytem. + // Subsystem. globalStatus = "global_status" ) // Regexp to match various groups of status vars. var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`) +// Metric descriptors. var ( globalCommandsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "commands_total"), @@ -59,7 +60,20 @@ ) // ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`. -func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeGlobalStatus struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeGlobalStatus) Name() string { + return globalStatus +} + +// Help describes the role of the Scraper. +func (ScrapeGlobalStatus) Help() string { + return "Collect from SHOW GLOBAL STATUS" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { globalStatusRows, err := db.Query(globalStatusQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_status_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/global_status_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_status_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/global_status_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -38,7 +38,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeGlobalStatus(db, ch); err != nil { + if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -68,6 +68,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_variables.go prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_variables.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables.go 2018-10-15 19:28:15.000000000 +0000 @@ -19,7 +19,20 @@ ) // ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`. -func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeGlobalVariables struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeGlobalVariables) Name() string { + return globalVariables +} + +// Help describes the role of the Scraper. +func (ScrapeGlobalVariables) Help() string { + return "Collect from SHOW GLOBAL VARIABLES" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { globalVariablesRows, err := db.Query(globalVariablesQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_variables_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/global_variables_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -37,7 +37,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeGlobalVariables(db, ch); err != nil { + if err = (ScrapeGlobalVariables{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -64,7 +64,7 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } @@ -76,7 +76,7 @@ convey.Convey("Parse wsrep_provider_options", t, func() { convey.So(parseWsrepProviderOptions(testE), convey.ShouldEqual, 0) convey.So(parseWsrepProviderOptions(testM), convey.ShouldEqual, 128*1024*1024) - convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, 2*1024*1024*1024) + convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, int64(2*1024*1024*1024)) convey.So(parseWsrepProviderOptions(testB), convey.ShouldEqual, 131072) }) } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/heartbeat.go prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/heartbeat.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat.go 2018-10-15 19:28:15.000000000 +0000 @@ -8,6 +8,7 @@ "strconv" "github.com/prometheus/client_golang/prometheus" + "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -20,6 +21,17 @@ heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`" ) +var ( + collectHeartbeatDatabase = kingpin.Flag( + "collect.heartbeat.database", + "Database from where to collect heartbeat data", + ).Default("heartbeat").String() + collectHeartbeatTable = kingpin.Flag( + "collect.heartbeat.table", + "Table from where to collect heartbeat data", + ).Default("heartbeat").String() +) + // Metric descriptors. var ( HeartbeatStoredDesc = prometheus.NewDesc( @@ -41,8 +53,21 @@ // ts varchar(26) NOT NULL, // server_id int unsigned NOT NULL PRIMARY KEY, // ); -func ScrapeHeartbeat(db *sql.DB, ch chan<- prometheus.Metric, collectDatabase, collectTable string) error { - query := fmt.Sprintf(heartbeatQuery, collectDatabase, collectTable) +type ScrapeHeartbeat struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeHeartbeat) Name() string { + return "heartbeat" +} + +// Help describes the role of the Scraper. +func (ScrapeHeartbeat) Help() string { + return "Collect from heartbeat" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeHeartbeat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { + query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable) heartbeatRows, err := db.Query(query) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/heartbeat_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/heartbeat_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -7,9 +7,18 @@ dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" + "gopkg.in/alecthomas/kingpin.v2" ) func TestScrapeHeartbeat(t *testing.T) { + _, err := kingpin.CommandLine.Parse([]string{ + "--collect.heartbeat.database", "heartbeat-test", + "--collect.heartbeat.table", "heartbeat-test", + }) + if err != nil { + t.Fatal(err) + } + db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) @@ -19,13 +28,11 @@ columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"} rows := sqlmock.NewRows(columns). AddRow("1487597613.001320", "1487598113.448042", 1) - mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat`.`heartbeat`")).WillReturnRows(rows) + mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { - database := "heartbeat" - table := "heartbeat" - if err = ScrapeHeartbeat(db, ch, database, table); err != nil { + if err = (ScrapeHeartbeat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -44,6 +51,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_auto_increment.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_auto_increment.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_auto_increment.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_auto_increment.go 2018-10-15 19:28:15.000000000 +0000 @@ -22,6 +22,7 @@ WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL ` +// Metric descriptors. var ( globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"), @@ -36,7 +37,20 @@ ) // ScrapeAutoIncrementColumns collects auto_increment column information. -func ScrapeAutoIncrementColumns(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeAutoIncrementColumns struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeAutoIncrementColumns) Name() string { + return "auto_increment.columns" +} + +// Help describes the role of the Scraper. +func (ScrapeAutoIncrementColumns) Help() string { + return "Collect auto_increment columns and max values from information_schema" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeAutoIncrementColumns) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_clientstats.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_clientstats.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats.go 2018-10-15 19:28:15.000000000 +0000 @@ -128,7 +128,20 @@ ) // ScrapeClientStat collects from `information_schema.client_statistics`. -func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeClientStat struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeClientStat) Name() string { + return "info_schema.clientstats" +} + +// Help describes the role of the Scraper. +func (ScrapeClientStat) Help() string { + return "If running with userstat=1, set to true to collect client statistics" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeClientStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_clientstats_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_clientstats_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -26,7 +26,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeClientStat(db, ch); err != nil { + if err = (ScrapeClientStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -65,6 +65,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmp.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmp.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,90 @@ +// Scrape `information_schema.INNODB_CMP`. + +package collector + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +const innodbCmpQuery = ` + SELECT + page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time + FROM information_schema.innodb_cmp + ` + +// Metric descriptors. +var ( + infoSchemaInnodbCmpCompressOps = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"), + "Number of times a B-tree page of the size PAGE_SIZE has been compressed.", + []string{"page_size"}, nil, + ) + infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"), + "Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.", + []string{"page_size"}, nil, + ) + infoSchemaInnodbCmpCompressTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"), + "Total time in seconds spent in attempts to compress B-tree pages.", + []string{"page_size"}, nil, + ) + infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"), + "Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.", + []string{"page_size"}, nil, + ) + infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"), + "Total time in seconds spent in uncompressing B-tree pages.", + []string{"page_size"}, nil, + ) +) + +// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. +type ScrapeInnodbCmp struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeInnodbCmp) Name() string { + return informationSchema + ".innodb_cmp" +} + +// Help describes the role of the Scraper. +func (ScrapeInnodbCmp) Help() string { + return "Collect metrics from information_schema.innodb_cmp" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { + + informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery) + if err != nil { + return err + } + defer informationSchemaInnodbCmpRows.Close() + + var ( + page_size string + compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64 + ) + + for informationSchemaInnodbCmpRows.Next() { + + if err := informationSchemaInnodbCmpRows.Scan( + &page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time, + ); err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size) + + } + + return nil +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmpmem.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmpmem.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,82 @@ +// Scrape `information_schema.INNODB_CMPMEM`. + +package collector + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +const innodbCmpMemQuery = ` + SELECT + page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time + FROM information_schema.innodb_cmpmem + ` + +// Metric descriptors. +var ( + infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"), + "Number of blocks of the size PAGE_SIZE that are currently in use.", + []string{"page_size", "buffer_pool"}, nil, + ) + infoSchemaInnodbCmpMemPagesFree = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"), + "Number of blocks of the size PAGE_SIZE that are currently available for allocation.", + []string{"page_size", "buffer_pool"}, nil, + ) + infoSchemaInnodbCmpMemRelocationOps = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"), + "Number of times a block of the size PAGE_SIZE has been relocated.", + []string{"page_size", "buffer_pool"}, nil, + ) + infoSchemaInnodbCmpMemRelocationTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"), + "Total time in seconds spent in relocating blocks.", + []string{"page_size", "buffer_pool"}, nil, + ) +) + +// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. +type ScrapeInnodbCmpMem struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeInnodbCmpMem) Name() string { + return informationSchema + ".innodb_cmpmem" +} + +// Help describes the role of the Scraper. +func (ScrapeInnodbCmpMem) Help() string { + return "Collect metrics from information_schema.innodb_cmpmem" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeInnodbCmpMem) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { + + informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery) + if err != nil { + return err + } + defer informationSchemaInnodbCmpMemRows.Close() + + var ( + page_size, buffer_pool string + pages_used, pages_free, relocation_ops, relocation_time float64 + ) + + for informationSchemaInnodbCmpMemRows.Next() { + if err := informationSchemaInnodbCmpMemRows.Scan( + &page_size, &buffer_pool, &pages_used, &pages_free, &relocation_ops, &relocation_time, + ); err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesRead, prometheus.CounterValue, pages_used, page_size, buffer_pool) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool) + ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool) + + } + return nil +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmpmem_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmpmem_test.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,49 @@ +package collector + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" + "gopkg.in/DATA-DOG/go-sqlmock.v1" +) + +func TestScrapeInnodbCmpMem(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error opening a stub database connection: %s", err) + } + defer db.Close() + + columns := []string{"page_size", "buffer_pool", "pages_used", "pages_free", "relocation_ops", "relocation_time"} + rows := sqlmock.NewRows(columns). + AddRow("1024", "0", 30, 40, 50, 6000) + mock.ExpectQuery(sanitizeQuery(innodbCmpMemQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + if err = (ScrapeInnodbCmpMem{}).Scrape(db, ch); err != nil { + t.Errorf("error calling function on test: %s", err) + } + close(ch) + }() + + expected := []MetricResult{ + {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 30, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 40, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 50, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 6, metricType: dto.MetricType_COUNTER}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + got := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, got) + } + }) + + // Ensure all SQL queries were executed + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmp_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_cmp_test.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,50 @@ +package collector + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" + "gopkg.in/DATA-DOG/go-sqlmock.v1" +) + +func TestScrapeInnodbCmp(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error opening a stub database connection: %s", err) + } + defer db.Close() + + columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"} + rows := sqlmock.NewRows(columns). + AddRow("1024", 10, 20, 30, 40, 50) + mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil { + t.Errorf("error calling function on test: %s", err) + } + close(ch) + }() + + expected := []MetricResult{ + {labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER}, + {labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + got := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, got) + } + }) + + // Ensure all SQL queries were executed + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_metrics.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_metrics.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics.go 2018-10-15 19:28:15.000000000 +0000 @@ -49,7 +49,20 @@ ) // ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`. -func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeInnodbMetrics struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeInnodbMetrics) Name() string { + return informationSchema + ".innodb_metrics" +} + +// Help describes the role of the Scraper. +func (ScrapeInnodbMetrics) Help() string { + return "Collect metrics from information_schema.innodb_metrics" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeInnodbMetrics) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_metrics_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_metrics_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -40,7 +40,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeInnodbMetrics(db, ch); err != nil { + if err = (ScrapeInnodbMetrics{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -64,6 +64,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_sys_tablespaces.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_sys_tablespaces.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces.go 2018-10-15 19:28:15.000000000 +0000 @@ -20,6 +20,7 @@ FROM information_schema.innodb_sys_tablespaces ` +// Metric descriptors. var ( infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"), @@ -39,7 +40,20 @@ ) // ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`. -func ScrapeInfoSchemaInnodbTablespaces(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeInfoSchemaInnodbTablespaces struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeInfoSchemaInnodbTablespaces) Name() string { + return informationSchema + ".innodb_tablespaces" +} + +// Help describes the role of the Scraper. +func (ScrapeInfoSchemaInnodbTablespaces) Help() string { + return "Collect metrics from information_schema.innodb_sys_tablespaces" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeInfoSchemaInnodbTablespaces) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { tablespacesRows, err := db.Query(innodbTablespacesQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_sys_tablespaces_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_innodb_sys_tablespaces_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -24,7 +24,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil { + if err = (ScrapeInfoSchemaInnodbTablespaces{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -47,6 +47,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_processlist.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_processlist.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_processlist.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_processlist.go 2018-10-15 19:28:15.000000000 +0000 @@ -20,13 +20,16 @@ ORDER BY null ` +// Tunable flags. var ( - // Tunable flags. processlistMinTime = kingpin.Flag( "collect.info_schema.processlist.min_time", "Minimum time a thread must be in each state to be counted", ).Default("0").Int() - // Prometheus descriptors. +) + +// Metric descriptors. +var ( processlistCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "threads"), "The number of threads (connections) split by current state.", @@ -118,37 +121,21 @@ } ) -func deriveThreadState(command string, state string) string { - var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1) - var normState = strings.Replace(strings.ToLower(state), "_", " ", -1) - // check if it's already a valid state - _, knownState := threadStateCounterMap[normState] - if knownState { - return normState - } - // check if plain mapping applies - mappedState, canMap := threadStateMapping[normState] - if canMap { - return mappedState - } - // check special waiting for XYZ lock - if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") { - return "waiting for lock" - } - if normCmd == "sleep" && normState == "" { - return "idle" - } - if normCmd == "query" { - return "executing" - } - if normCmd == "binlog dump" { - return "replication master" - } - return "other" +// ScrapeProcesslist collects from `information_schema.processlist`. +type ScrapeProcesslist struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeProcesslist) Name() string { + return informationSchema + ".processlist" } -// ScrapeProcesslist collects from `information_schema.processlist`. -func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error { +// Help describes the role of the Scraper. +func (ScrapeProcesslist) Help() string { + return "Collect current thread state counts from the information_schema.processlist" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeProcesslist) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, *processlistMinTime, @@ -191,3 +178,32 @@ return nil } + +func deriveThreadState(command string, state string) string { + var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1) + var normState = strings.Replace(strings.ToLower(state), "_", " ", -1) + // check if it's already a valid state + _, knownState := threadStateCounterMap[normState] + if knownState { + return normState + } + // check if plain mapping applies + mappedState, canMap := threadStateMapping[normState] + if canMap { + return mappedState + } + // check special waiting for XYZ lock + if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") { + return "waiting for lock" + } + if normCmd == "sleep" && normState == "" { + return "idle" + } + if normCmd == "query" { + return "executing" + } + if normCmd == "binlog dump" { + return "replication master" + } + return "other" +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_query_response_time.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_query_response_time.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time.go 2018-10-15 19:28:15.000000000 +0000 @@ -86,7 +86,20 @@ } // ScrapeQueryResponseTime collects from `information_schema.query_response_time`. -func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeQueryResponseTime struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeQueryResponseTime) Name() string { + return "info_schema.query_response_time" +} + +// Help describes the role of the Scraper. +func (ScrapeQueryResponseTime) Help() string { + return "Collect query response time distribution if query_response_time_stats is ON." +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeQueryResponseTime) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var queryStats uint8 err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_query_response_time_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_query_response_time_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -37,7 +37,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeQueryResponseTime(db, ch); err != nil { + if err = (ScrapeQueryResponseTime{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -73,6 +73,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tables.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tables.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tables.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tables.go 2018-10-15 19:28:15.000000000 +0000 @@ -36,11 +36,16 @@ ` ) +// Tunable flags. var ( tableSchemaDatabases = kingpin.Flag( "collect.info_schema.tables.databases", "The list of databases to collect table stats for, or '*' for all", ).Default("*").String() +) + +// Metric descriptors. +var ( infoSchemaTablesVersionDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_version"), "The version number of the table's .frm file", @@ -59,7 +64,20 @@ ) // ScrapeTableSchema collects from `information_schema.tables`. -func ScrapeTableSchema(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeTableSchema struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeTableSchema) Name() string { + return informationSchema + ".tables" +} + +// Help describes the role of the Scraper. +func (ScrapeTableSchema) Help() string { + return "Collect metrics from information_schema.tables" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeTableSchema) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var dbList []string if *tableSchemaDatabases == "*" { dbListRows, err := db.Query(dbListQuery) diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tablestats.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tablestats.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats.go 2018-10-15 19:28:15.000000000 +0000 @@ -19,6 +19,7 @@ FROM information_schema.table_statistics ` +// Metric descriptors. var ( infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"), @@ -38,7 +39,20 @@ ) // ScrapeTableStat collects from `information_schema.table_statistics`. -func ScrapeTableStat(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeTableStat struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeTableStat) Name() string { + return "info_schema.tablestats" +} + +// Help describes the role of the Scraper. +func (ScrapeTableStat) Help() string { + return "If running with userstat=1, set to true to collect table statistics" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeTableStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tablestats_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_tablestats_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -27,7 +27,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeTableStat(db, ch); err != nil { + if err = (ScrapeTableStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -53,6 +53,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_userstats.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_userstats.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats.go 2018-10-15 19:28:15.000000000 +0000 @@ -124,7 +124,20 @@ ) // ScrapeUserStat collects from `information_schema.user_statistics`. -func ScrapeUserStat(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeUserStat struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeUserStat) Name() string { + return "info_schema.userstats" +} + +// Help describes the role of the Scraper. +func (ScrapeUserStat) Help() string { + return "If running with userstat=1, set to true to collect user statistics" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeUserStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_userstats_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/info_schema_userstats_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -26,7 +26,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeUserStat(db, ch); err != nil { + if err = (ScrapeUserStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -65,6 +65,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_events_statements.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_statements.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_events_statements.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_statements.go 2018-10-15 19:28:15.000000000 +0000 @@ -54,7 +54,7 @@ LIMIT %d ` -// Tuning flags. +// Tunable flags. var ( perfEventsStatementsLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.limit", @@ -135,7 +135,20 @@ ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. -func ScrapePerfEventsStatements(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfEventsStatements struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfEventsStatements) Name() string { + return "perf_schema.eventsstatements" +} + +// Help describes the role of the Scraper. +func (ScrapePerfEventsStatements) Help() string { + return "Collect metrics from performance_schema.events_statements_summary_by_digest" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfEventsStatements) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, *perfEventsStatementsDigestTextLimit, diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_events_waits.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_waits.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_events_waits.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_waits.go 2018-10-15 19:28:15.000000000 +0000 @@ -28,7 +28,20 @@ ) // ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`. -func ScrapePerfEventsWaits(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfEventsWaits struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfEventsWaits) Name() string { + return "perf_schema.eventswaits" +} + +// Help describes the role of the Scraper. +func (ScrapePerfEventsWaits) Help() string { + return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfEventsWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_events.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_events.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_events.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_events.go 2018-10-15 19:28:15.000000000 +0000 @@ -37,7 +37,20 @@ ) // ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. -func ScrapePerfFileEvents(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfFileEvents struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfFileEvents) Name() string { + return "perf_schema.file_events" +} + +// Help describes the role of the Scraper. +func (ScrapePerfFileEvents) Help() string { + return "Collect metrics from performance_schema.file_summary_by_event_name" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfFileEvents) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_instances.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_instances.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances.go 2018-10-15 19:28:15.000000000 +0000 @@ -19,13 +19,16 @@ where FILE_NAME REGEXP ? ` -// Metric descriptors. +// Tunable flags. var ( performanceSchemaFileInstancesFilter = kingpin.Flag( "collect.perf_schema.file_instances.filter", "RegEx file_name filter for performance_schema.file_summary_by_instance", ).Default(".*").String() +) +// Metric descriptors. +var ( performanceSchemaFileInstancesRemovePrefix = kingpin.Flag( "collect.perf_schema.file_instances.remove_prefix", "Remove path prefix in performance_schema.file_summary_by_instance", @@ -43,8 +46,21 @@ ) ) -// ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. -func ScrapePerfFileInstances(db *sql.DB, ch chan<- prometheus.Metric) error { +// ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`. +type ScrapePerfFileInstances struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfFileInstances) Name() string { + return "perf_schema.file_instances" +} + +// Help describes the role of the Scraper. +func (ScrapePerfFileInstances) Help() string { + return "Collect metrics from performance_schema.file_summary_by_instance" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfFileInstances) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileInstancesRows, err := db.Query(perfFileInstancesQuery, *performanceSchemaFileInstancesFilter) if err != nil { diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_instances_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_file_instances_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -33,7 +33,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapePerfFileInstances(db, ch); err != nil { + if err = (ScrapePerfFileInstances{}).Scrape(db, ch); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) @@ -62,6 +62,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_index_io_waits.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_index_io_waits.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits.go 2018-10-15 19:28:15.000000000 +0000 @@ -31,7 +31,20 @@ ) // ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`. -func ScrapePerfIndexIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfIndexIOWaits struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfIndexIOWaits) Name() string { + return "perf_schema.indexiowaits" +} + +// Help describes the role of the Scraper. +func (ScrapePerfIndexIOWaits) Help() string { + return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfIndexIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaIndexWaitsRows, err := db.Query(perfIndexIOWaitsQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_index_io_waits_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_index_io_waits_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -25,7 +25,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapePerfIndexIOWaits(db, ch); err != nil { + if err = (ScrapePerfIndexIOWaits{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -56,6 +56,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_replication_group_member_stats.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_replication_group_member_stats.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_replication_group_member_stats.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_replication_group_member_stats.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,91 @@ +package collector + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +const perfReplicationGroupMemeberStatsQuery = ` + SELECT MEMBER_ID,COUNT_TRANSACTIONS_IN_QUEUE,COUNT_TRANSACTIONS_CHECKED,COUNT_CONFLICTS_DETECTED,COUNT_TRANSACTIONS_ROWS_VALIDATING + FROM performance_schema.replication_group_member_stats + ` + +// Metric descriptors. +var ( + performanceSchemaReplicationGroupMemberStatsTransInQueueDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, performanceSchema, "transaction_in_queue"), + "The number of transactions in the queue pending conflict detection checks. Once the "+ + "transactions have been checked for conflicts, if they pass the check, they are queued to be applied as well.", + []string{"member_id"}, nil, + ) + performanceSchemaReplicationGroupMemberStatsTransCheckedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, performanceSchema, "transaction_checked"), + "The number of transactions that have been checked for conflicts.", + []string{"member_id"}, nil, + ) + performanceSchemaReplicationGroupMemberStatsConflictsDetectedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, performanceSchema, "conflicts_detected"), + "The number of transactions that did not pass the conflict detection check.", + []string{"member_id"}, nil, + ) + performanceSchemaReplicationGroupMemberStatsTransRowValidatingDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, performanceSchema, "transaction_rows_validating"), + "The current size of the conflict detection database (against which each transaction is certified).", + []string{"member_id"}, nil, + ) +) + +// ScrapeReplicationGroupMemberStats collects from `performance_schema.replication_group_member_stats`. +type ScrapePerfReplicationGroupMemberStats struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfReplicationGroupMemberStats) Name() string { + return performanceSchema + ".replication_group_member_stats" +} + +// Help describes the role of the Scraper. +func (ScrapePerfReplicationGroupMemberStats) Help() string { + return "Collect metrics from performance_schema.replication_group_member_stats" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfReplicationGroupMemberStats) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { + perfReplicationGroupMemeberStatsRows, err := db.Query(perfReplicationGroupMemeberStatsQuery) + if err != nil { + return err + } + defer perfReplicationGroupMemeberStatsRows.Close() + + var ( + memberId string + countTransactionsInQueue, countTransactionsChecked uint64 + countConflictsDetected, countTransactionsRowsValidating uint64 + ) + + for perfReplicationGroupMemeberStatsRows.Next() { + if err := perfReplicationGroupMemeberStatsRows.Scan( + &memberId, &countTransactionsInQueue, &countTransactionsChecked, + &countConflictsDetected, &countTransactionsRowsValidating, + ); err != nil { + return err + } + ch <- prometheus.MustNewConstMetric( + performanceSchemaReplicationGroupMemberStatsTransInQueueDesc, prometheus.CounterValue, float64(countTransactionsInQueue), + memberId, + ) + ch <- prometheus.MustNewConstMetric( + performanceSchemaReplicationGroupMemberStatsTransCheckedDesc, prometheus.CounterValue, float64(countTransactionsChecked), + memberId, + ) + ch <- prometheus.MustNewConstMetric( + performanceSchemaReplicationGroupMemberStatsConflictsDetectedDesc, prometheus.CounterValue, float64(countConflictsDetected), + memberId, + ) + ch <- prometheus.MustNewConstMetric( + performanceSchemaReplicationGroupMemberStatsTransRowValidatingDesc, prometheus.CounterValue, float64(countTransactionsRowsValidating), + memberId, + ) + } + return nil +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_table_io_waits.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_io_waits.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_table_io_waits.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_io_waits.go 2018-10-15 19:28:15.000000000 +0000 @@ -32,7 +32,20 @@ ) // ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`. -func ScrapePerfTableIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfTableIOWaits struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfTableIOWaits) Name() string { + return "perf_schema.tableiowaits" +} + +// Help describes the role of the Scraper. +func (ScrapePerfTableIOWaits) Help() string { + return "Collect metrics from performance_schema.table_io_waits_summary_by_table" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfTableIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableWaitsRows, err := db.Query(perfTableIOWaitsQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_table_lock_waits.go prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_lock_waits.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/perf_schema_table_lock_waits.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_lock_waits.go 2018-10-15 19:28:15.000000000 +0000 @@ -61,7 +61,20 @@ ) // ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`. -func ScrapePerfTableLockWaits(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapePerfTableLockWaits struct{} + +// Name of the Scraper. Should be unique. +func (ScrapePerfTableLockWaits) Name() string { + return "perf_schema.tablelocks" +} + +// Help describes the role of the Scraper. +func (ScrapePerfTableLockWaits) Help() string { + return "Collect metrics from performance_schema.table_lock_waits_summary_by_table" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapePerfTableLockWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableLockWaitsRows, err := db.Query(perfTableLockWaitsQuery) if err != nil { return err diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/scraper.go prometheus-mysqld-exporter-0.11.0+ds/collector/scraper.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/scraper.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/scraper.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,19 @@ +package collector + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/prometheus/client_golang/prometheus" +) + +// Scraper is minimal interface that let's you add new prometheus metrics to mysqld_exporter. +type Scraper interface { + // Name of the Scraper. Should be unique. + Name() string + // Help describes the role of the Scraper. + // Example: "Collect from SHOW ENGINE INNODB STATUS" + Help() string + // Scrape collects data from database connection and sends it over channel as prometheus metric. + Scrape(db *sql.DB, ch chan<- prometheus.Metric) error +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_hosts.go prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_hosts.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,98 @@ +// Scrape heartbeat data. + +package collector + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" + "github.com/satori/go.uuid" +) + +const ( + // slavehosts is the Metric subsystem we use. + slavehosts = "slave_hosts" + // heartbeatQuery is the query used to fetch the stored and current + // timestamps. %s will be replaced by the database and table name. + // The second column allows gets the server timestamp at the exact same + // time the query is run. + slaveHostsQuery = "SHOW SLAVE HOSTS" +) + +// Metric descriptors. +var ( + SlaveHostsInfo = prometheus.NewDesc( + prometheus.BuildFQName(namespace, heartbeat, "mysql_slave_hosts_info"), + "Information about running slaves", + []string{"server_id", "slave_host", "port", "master_id", "slave_uuid"}, nil, + ) +) + +// ScrapeSlaveHosts scrapes metrics about the replicating slaves. +type ScrapeSlaveHosts struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeSlaveHosts) Name() string { + return slavehosts +} + +// Help describes the role of the Scraper. +func (ScrapeSlaveHosts) Help() string { + return "Scrape information from 'SHOW SLAVE HOSTS'" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeSlaveHosts) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { + slaveHostsRows, err := db.Query(slaveHostsQuery) + if err != nil { + return err + } + defer slaveHostsRows.Close() + + // fields of row + var serverId string + var host string + var port string + var rrrOrMasterId string + var slaveUuidOrMasterId string + + // Depends on the version of MySQL being scraped + var masterId string + var slaveUuid string + + for slaveHostsRows.Next() { + // Newer versions of mysql have the following + // Server_id, Host, Port, Master_id, Slave_UUID + // Older versions of mysql have the following + // Server_id, Host, Port, Rpl_recovery_rank, Master_id + err := slaveHostsRows.Scan(&serverId, &host, &port, &rrrOrMasterId, &slaveUuidOrMasterId) + if err != nil { + return err + } + + // Check to see if slaveUuidOrMasterId resembles a UUID or not + // to find out if we are using an old version of MySQL + if _, err = uuid.FromString(slaveUuidOrMasterId); err != nil { + // We are running an older version of MySQL with no slave UUID + slaveUuid = "" + masterId = slaveUuidOrMasterId + } else { + // We are running a more recent version of MySQL + slaveUuid = slaveUuidOrMasterId + masterId = rrrOrMasterId + } + + ch <- prometheus.MustNewConstMetric( + SlaveHostsInfo, + prometheus.GaugeValue, + 1, + serverId, + host, + port, + masterId, + slaveUuid, + ) + } + + return nil +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_hosts_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_hosts_test.go 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,86 @@ +package collector + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" + "gopkg.in/DATA-DOG/go-sqlmock.v1" +) + +func TestScrapeSlaveHostsOldFormat(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error opening a stub database connection: %s", err) + } + defer db.Close() + + columns := []string{"Server_id", "Host", "Port", "Rpl_recovery_rank", "Master_id"} + rows := sqlmock.NewRows(columns). + AddRow("380239978", "backup_server_1", "0", "1", "192168011"). + AddRow("11882498", "backup_server_2", "0", "1", "192168011") + mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + if err = (ScrapeSlaveHosts{}).Scrape(db, ch); err != nil { + t.Errorf("error calling function on test: %s", err) + } + close(ch) + }() + + counterExpected := []MetricResult{ + {labels: labelMap{"server_id": "380239978", "slave_host": "backup_server_1", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{"server_id": "11882498", "slave_host": "backup_server_2", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range counterExpected { + got := readMetric(<-ch) + convey.So(got, convey.ShouldResemble, expect) + } + }) + + // Ensure all SQL queries were executed + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} + +func TestScrapeSlaveHostsNewFormat(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error opening a stub database connection: %s", err) + } + defer db.Close() + + columns := []string{"Server_id", "Host", "Port", "Master_id", "Slave_UUID"} + rows := sqlmock.NewRows(columns). + AddRow("192168010", "iconnect2", "3306", "192168011", "14cb6624-7f93-11e0-b2c0-c80aa9429562"). + AddRow("1921680101", "athena", "3306", "192168011", "07af4990-f41f-11df-a566-7ac56fdaf645") + mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + if err = (ScrapeSlaveHosts{}).Scrape(db, ch); err != nil { + t.Errorf("error calling function on test: %s", err) + } + close(ch) + }() + + counterExpected := []MetricResult{ + {labels: labelMap{"server_id": "192168010", "slave_host": "iconnect2", "port": "3306", "master_id": "192168011", "slave_uuid": "14cb6624-7f93-11e0-b2c0-c80aa9429562"}, value: 1, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{"server_id": "1921680101", "slave_host": "athena", "port": "3306", "master_id": "192168011", "slave_uuid": "07af4990-f41f-11df-a566-7ac56fdaf645"}, value: 1, metricType: dto.MetricType_GAUGE}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range counterExpected { + got := readMetric(<-ch) + convey.So(got, convey.ShouldResemble, expect) + } + }) + + // Ensure all SQL queries were executed + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_status.go prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_status.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status.go 2018-10-15 19:28:15.000000000 +0000 @@ -36,7 +36,20 @@ } // ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`. -func ScrapeSlaveStatus(db *sql.DB, ch chan<- prometheus.Metric) error { +type ScrapeSlaveStatus struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeSlaveStatus) Name() string { + return slaveStatus +} + +// Help describes the role of the Scraper. +func (ScrapeSlaveStatus) Help() string { + return "Collect from SHOW SLAVE STATUS" +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeSlaveStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var ( slaveStatusRows *sql.Rows err error diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_status_test.go prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/collector/slave_status_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -23,7 +23,7 @@ ch := make(chan prometheus.Metric) go func() { - if err = ScrapeSlaveStatus(db, ch); err != nil { + if err = (ScrapeSlaveStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) @@ -44,6 +44,6 @@ // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expections: %s", err) + t.Errorf("there were unfulfilled exceptions: %s", err) } } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/changelog prometheus-mysqld-exporter-0.11.0+ds/debian/changelog --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/changelog 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/changelog 2018-10-16 00:53:55.000000000 +0000 @@ -1,3 +1,22 @@ +prometheus-mysqld-exporter (0.11.0+ds-1) unstable; urgency=medium + + [ Alexandre Viau ] + * Point Vcs-* urls to salsa.debian.org. + + [ Martín Ferrari ] + * New upstream release. + * Better document data source configuration. + * debian/init: always export DATA_SOURCE_NAME and use --inherit in + daemon call. + * debian/control: Update Standards-Version with no changes. + * Stop cme messing with Files-Excluded. + * Remove patch already applied upstream. + * debian/control: Add new dependency. + * Patch test to avoid depending on a new version of go-convey. + * debian/default: Update flag description. + + -- Martín Ferrari Tue, 16 Oct 2018 00:53:55 +0000 + prometheus-mysqld-exporter (0.10.0+git20180201.a71f4bb+ds-2) unstable; urgency=high * Use non-recursive chown. diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/control prometheus-mysqld-exporter-0.11.0+ds/debian/control --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/control 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/control 2018-10-16 00:53:55.000000000 +0000 @@ -14,15 +14,16 @@ golang-github-jtolds-gls-dev, golang-github-prometheus-client-golang-dev, golang-github-prometheus-common-dev, + golang-github-satori-go.uuid-dev, golang-github-sirupsen-logrus-dev, golang-github-smartystreets-assertions-dev, golang-github-smartystreets-goconvey-dev, golang-gopkg-ini.v1-dev, golang-goprotobuf-dev, golang-protobuf-extensions-dev, -Standards-Version: 4.1.3 -Vcs-Browser: https://anonscm.debian.org/cgit/pkg-go/packages/prometheus-mysqld-exporter.git -Vcs-Git: https://anonscm.debian.org/git/pkg-go/packages/prometheus-mysqld-exporter.git +Standards-Version: 4.2.1 +Vcs-Browser: https://salsa.debian.org/go-team/packages/prometheus-mysqld-exporter +Vcs-Git: https://salsa.debian.org/go-team/packages/prometheus-mysqld-exporter.git Homepage: https://github.com/prometheus/mysqld_exporter XS-Go-Import-Path: github.com/prometheus/mysqld_exporter diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/copyright prometheus-mysqld-exporter-0.11.0+ds/debian/copyright --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/copyright 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/copyright 2018-10-16 00:53:55.000000000 +0000 @@ -2,7 +2,7 @@ Upstream-Name: github.com/prometheus/mysqld_exporter Source: https://github.com/prometheus/mysqld_exporter Files-Excluded: - vendor/*/ + vendor/*/* Files: * Copyright: 2013-2015 The Prometheus Authors diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/default prometheus-mysqld-exporter-0.11.0+ds/debian/default --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/default 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/default 2018-10-16 00:53:55.000000000 +0000 @@ -1,9 +1,20 @@ # By default the connection string will be read from -# $HOME/my.cnf or -config.my-cnf. -# To set a connection string from the environment instead, uncomment the -# following line. +# $HOME/my.cnf or from the file specified with the -config.my-cnf parameter. -# export DATA_SOURCE_NAME="login:password@(hostname:port)/dbname" +# To set a connection string from the environment instead, uncomment one of the +# following lines. + +# Using UNIX domain sockets and authentication: +# DATA_SOURCE_NAME="prometheus:nopassword@unix(/run/mysqld/mysqld.sock)/" + +# Using a TCP connection and password authentication: +# DATA_SOURCE_NAME="login:password@(hostname:port)/dbname" + +# Note the user must be granted enough privileges for the exporter to run. +# Example to create a user to connect with the UNIX socket: +# +# CREATE USER IF NOT EXISTS 'prometheus'@'localhost' IDENTIFIED VIA unix_socket; +# GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'prometheus'@'localhost'; # Set the command-line arguments to pass to the exporter. # ARGS='-config.my-cnf /etc/mysql/debian.cnf' @@ -11,55 +22,99 @@ # Usage of prometheus-mysqld-exporter: # -collect.auto_increment.columns # Collect auto_increment columns and max values from information_schema -# -collect.binlog_size -# Collect the current size of all registered binlog files -# -collect.global_status -# Collect from SHOW GLOBAL STATUS (default true) -# -collect.global_variables -# Collect from SHOW GLOBAL VARIABLES (default true) -# -collect.info_schema.innodb_metrics -# Collect metrics from information_schema.innodb_metrics -# -collect.info_schema.processlist -# Collect current thread state counts from the information_schema.processlist -# -collect.info_schema.processlist.min_time int -# Minimum time a thread must be in each state to be counted -# -collect.info_schema.query_response_time -# Collect query response time distribution if query_response_time_stats is ON. -# -collect.info_schema.tables -# Collect metrics from information_schema.tables (default true) -# -collect.info_schema.tables.databases string -# The list of databases to collect table stats for, or '*' for all (default "*") -# -collect.info_schema.tablestats -# If running with userstat=1, set to true to collect table statistics -# -collect.info_schema.userstats -# If running with userstat=1, set to true to collect user statistics -# -collect.perf_schema.eventsstatements -# Collect metrics from performance_schema.events_statements_summary_by_digest -# -collect.perf_schema.eventsstatements.digest_text_limit int -# Maximum length of the normalized statement text (default 120) -# -collect.perf_schema.eventsstatements.limit int -# Limit the number of events statements digests by response time (default 250) -# -collect.perf_schema.eventsstatements.timelimit int -# Limit how old the 'last_seen' events statements can be, in seconds (default 86400) -# -collect.perf_schema.eventswaits -# Collect metrics from performance_schema.events_waits_summary_global_by_event_name -# -collect.perf_schema.file_events -# Collect metrics from performance_schema.file_summary_by_event_name -# -collect.perf_schema.indexiowaits -# Collect metrics from performance_schema.table_io_waits_summary_by_index_usage -# -collect.perf_schema.tableiowaits -# Collect metrics from performance_schema.table_io_waits_summary_by_table -# -collect.perf_schema.tablelocks -# Collect metrics from performance_schema.table_lock_waits_summary_by_table -# -collect.slave_status -# Collect from SHOW SLAVE STATUS (default true) -# -config.my-cnf string -# Path to .my.cnf file to read MySQL credentials from. (default "~/.my.cnf") -# -log.level value -# Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal, panic]. (default info) -# -log_slow_filter -# Add a log_slow_filter to avoid exessive MySQL slow logging. NOTE: Not supported by Oracle MySQL. -# -web.listen-address string -# Address to listen on for web interface and telemetry. (default ":9104") -# -web.telemetry-path string -# Path under which to expose metrics. (default "/metrics") +# --exporter.lock_wait_timeout=2 +# Set a lock_wait_timeout on the connection to avoid long metadata +# locking. +# --exporter.log_slow_filter +# Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not +# supported by Oracle MySQL. +# --collect.heartbeat.database="heartbeat" +# Database from where to collect heartbeat data +# --collect.heartbeat.table="heartbeat" +# Table from where to collect heartbeat data +# --collect.info_schema.processlist.min_time=0 +# Minimum time a thread must be in each state to be counted +# --collect.info_schema.tables.databases="*" +# The list of databases to collect table stats for, or '*' for all +# --collect.perf_schema.eventsstatements.limit=250 +# Limit the number of events statements digests by response time +# --collect.perf_schema.eventsstatements.timelimit=86400 +# Limit how old the 'last_seen' events statements can be, in seconds +# --collect.perf_schema.eventsstatements.digest_text_limit=120 +# Maximum length of the normalized statement text +# --collect.perf_schema.file_instances.filter=".*" +# RegEx file_name filter for performance_schema.file_summary_by_instance +# --collect.perf_schema.file_instances.remove_prefix="/var/lib/mysql/" +# Remove path prefix in performance_schema.file_summary_by_instance +# --web.listen-address=":9104" +# Address to listen on for web interface and telemetry. +# --web.telemetry-path="/metrics" +# Path under which to expose metrics. +# --config.my-cnf="$HOME/.my.cnf" +# Path to .my.cnf file to read MySQL credentials from. +# --collect.global_variables +# Collect from SHOW GLOBAL VARIABLES +# --collect.slave_status +# Collect from SHOW SLAVE STATUS +# --collect.info_schema.processlist +# Collect current thread state counts from the +# information_schema.processlist +# --collect.info_schema.tables +# Collect metrics from information_schema.tables +# --collect.info_schema.innodb_tablespaces +# Collect metrics from information_schema.innodb_sys_tablespaces +# --collect.info_schema.innodb_metrics +# Collect metrics from information_schema.innodb_metrics +# --collect.auto_increment.columns +# Collect auto_increment columns and max values from information_schema +# --collect.global_status +# Collect from SHOW GLOBAL STATUS +# --collect.perf_schema.tableiowaits +# Collect metrics from performance_schema.table_io_waits_summary_by_table +# --collect.perf_schema.indexiowaits +# Collect metrics from +# performance_schema.table_io_waits_summary_by_index_usage +# --collect.perf_schema.tablelocks +# Collect metrics from +# performance_schema.table_lock_waits_summary_by_table +# --collect.perf_schema.eventsstatements +# Collect metrics from +# performance_schema.events_statements_summary_by_digest +# --collect.perf_schema.eventswaits +# Collect metrics from +# performance_schema.events_waits_summary_global_by_event_name +# --collect.perf_schema.file_events +# Collect metrics from performance_schema.file_summary_by_event_name +# --collect.perf_schema.file_instances +# Collect metrics from performance_schema.file_summary_by_instance +# --collect.binlog_size +# Collect the current size of all registered binlog files +# --collect.info_schema.userstats +# If running with userstat=1, set to true to collect user statistics +# --collect.info_schema.clientstats +# If running with userstat=1, set to true to collect client statistics +# --collect.info_schema.tablestats +# If running with userstat=1, set to true to collect table statistics +# --collect.info_schema.innodb_cmp +# Collect metrics from information_schema.innodb_cmp +# --collect.info_schema.innodb_cmpmem +# Collect metrics from information_schema.innodb_cmpmem +# --collect.info_schema.query_response_time +# Collect query response time distribution if query_response_time_stats +# is ON. +# --collect.engine_tokudb_status +# Collect from SHOW ENGINE TOKUDB STATUS +# --collect.perf_schema.replication_group_member_stats +# Collect metrics from performance_schema.replication_group_member_stats +# --collect.heartbeat +# Collect from heartbeat +# --collect.slave_hosts +# Scrape information from 'SHOW SLAVE HOSTS' +# --collect.engine_innodb_status +# Collect from SHOW ENGINE INNODB STATUS +# --log.level="info" +# Only log messages with the given severity or above. Valid levels: +# [debug, info, warn, error, fatal] +# --log.format="logger:stderr" +# Set the log target and format. Example: +# "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true" diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/gitlab-ci.yml prometheus-mysqld-exporter-0.11.0+ds/debian/gitlab-ci.yml --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/gitlab-ci.yml 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/gitlab-ci.yml 2018-10-16 00:53:55.000000000 +0000 @@ -0,0 +1,28 @@ + +# auto-generated, DO NOT MODIFY. +# The authoritative copy of this file lives at: +# https://salsa.debian.org/go-team/ci/blob/master/cmd/ci/gitlabciyml.go + +# TODO: publish under debian-go-team/ci +image: stapelberg/ci2 + +test_the_archive: + artifacts: + paths: + - before-applying-commit.json + - after-applying-commit.json + script: + # Create an overlay to discard writes to /srv/gopath/src after the build: + - "rm -rf /cache/overlay/{upper,work}" + - "mkdir -p /cache/overlay/{upper,work}" + - "mount -t overlay overlay -o lowerdir=/srv/gopath/src,upperdir=/cache/overlay/upper,workdir=/cache/overlay/work /srv/gopath/src" + - "export GOPATH=/srv/gopath" + - "export GOCACHE=/cache/go" + # Build the world as-is: + - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > before-applying-commit.json" + # Copy this package into the overlay: + - "GBP_CONF_FILES=:debian/gbp.conf gbp buildpackage --git-no-pristine-tar --git-ignore-branch --git-ignore-new --git-export-dir=/tmp/export --git-no-overlay --git-tarball-dir=/nonexistant --git-cleaner=/bin/true --git-builder='dpkg-buildpackage -S -d --no-sign'" + - "pgt-gopath -dsc /tmp/export/*.dsc" + # Rebuild the world: + - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > after-applying-commit.json" + - "ci-diff before-applying-commit.json after-applying-commit.json" diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/init prometheus-mysqld-exporter-0.11.0+ds/debian/init --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/init 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/init 2018-10-16 00:53:55.000000000 +0000 @@ -22,16 +22,19 @@ LOGFILE=/var/log/prometheus/prometheus-mysqld-exporter.log ARGS="" +DATA_SOURCE_NAME="" [ -r /etc/default/$NAME ] && . /etc/default/$NAME HELPER=/usr/bin/daemon -HELPER_ARGS="--name=$NAME --output=$LOGFILE --pidfile=$PIDFILE --user=$USER" +HELPER_ARGS="--name=$NAME --output=$LOGFILE --pidfile=$PIDFILE --user=$USER + --inherit" do_start_prepare() { mkdir -p `dirname $PIDFILE` || true chown $USER: `dirname $LOGFILE` chown $USER: `dirname $PIDFILE` + export DATA_SOURCE_NAME } do_start_cmd() diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/01-Avoid_new_dependency.patch prometheus-mysqld-exporter-0.11.0+ds/debian/patches/01-Avoid_new_dependency.patch --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/01-Avoid_new_dependency.patch 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/patches/01-Avoid_new_dependency.patch 2018-10-16 00:53:55.000000000 +0000 @@ -0,0 +1,27 @@ +Description: Avoid requiring a new go-convey version with a workaround. +--- a/mysqld_exporter_test.go ++++ b/mysqld_exporter_test.go +@@ -104,19 +104,19 @@ + }) + convey.Convey("Missed user", func() { + _, err := parseMycnf([]byte(badConfig)) +- convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig)) ++ convey.So(err.Error(), convey.ShouldEqual, fmt.Errorf("no user or password specified under [client] in %s", badConfig).Error()) + }) + convey.Convey("Missed password", func() { + _, err := parseMycnf([]byte(badConfig2)) +- convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig2)) ++ convey.So(err.Error(), convey.ShouldEqual, fmt.Errorf("no user or password specified under [client] in %s", badConfig2).Error()) + }) + convey.Convey("No [client] section", func() { + _, err := parseMycnf([]byte(badConfig3)) +- convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig3)) ++ convey.So(err.Error(), convey.ShouldEqual, fmt.Errorf("no user or password specified under [client] in %s", badConfig3).Error()) + }) + convey.Convey("Invalid config", func() { + _, err := parseMycnf([]byte(badConfig4)) +- convey.So(err, convey.ShouldBeError, fmt.Errorf("failed reading ini file: unclosed section: %s", badConfig4)) ++ convey.So(err.Error(), convey.ShouldEqual, fmt.Errorf("failed reading ini file: unclosed section: %s", badConfig4).Error()) + }) + }) + } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/01-Fix_int_size_in_32bits.patch prometheus-mysqld-exporter-0.11.0+ds/debian/patches/01-Fix_int_size_in_32bits.patch --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/01-Fix_int_size_in_32bits.patch 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/patches/01-Fix_int_size_in_32bits.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -Description: Usage of a long integer literal fails to compile in 32bit arches. -Bug-Debian: https://bugs.debian.org/893051 -Forwarded: https://github.com/prometheus/mysqld_exporter/issues/272 -Author: Martín Ferrari -Last-Update: 2018-02-27 - ---- a/collector/global_variables_test.go -+++ b/collector/global_variables_test.go -@@ -76,7 +76,7 @@ - convey.Convey("Parse wsrep_provider_options", t, func() { - convey.So(parseWsrepProviderOptions(testE), convey.ShouldEqual, 0) - convey.So(parseWsrepProviderOptions(testM), convey.ShouldEqual, 128*1024*1024) -- convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, 2*1024*1024*1024) -+ convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, int64(2*1024*1024*1024)) - convey.So(parseWsrepProviderOptions(testB), convey.ShouldEqual, 131072) - }) - } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/series prometheus-mysqld-exporter-0.11.0+ds/debian/patches/series --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/debian/patches/series 2018-03-27 00:49:40.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/debian/patches/series 2018-10-16 00:53:55.000000000 +0000 @@ -1 +1 @@ -01-Fix_int_size_in_32bits.patch +01-Avoid_new_dependency.patch diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/example.rules prometheus-mysqld-exporter-0.11.0+ds/example.rules --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/example.rules 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/example.rules 2018-10-15 19:28:15.000000000 +0000 @@ -22,102 +22,69 @@ ### # Galera Alerts -# Alert: Galera node is not "ready". -ALERT MySQLGaleraNotReady - IF mysql_global_status_wsrep_ready != 1 - FOR 5m - LABELS { - severity = "warning" - } - ANNOTATIONS { - summary = "Galera cluster node not ready", - description = "{{$labels.job}} on {{$labels.instance}} is not ready.", - } - -# Alert: Galera node state is not synced. -ALERT MySQLGaleraOutOfSync - IF (mysql_global_status_wsrep_local_state != 4 AND mysql_global_variables_wsrep_desync == 0) - FOR 5m - LABELS { - severity = "warning" - } - ANNOTATIONS { - summary = "Galera cluster node out of sync", - description = "{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4).", - } - -# Alert: Galera node is in "doner" state, and is behind applying transactions. -ALERT MySQLGaleraDonorFallingBehind - IF (mysql_global_status_wsrep_local_state == 2 AND mysql_global_status_wsrep_local_recv_queue > 100) - FOR 5m - LABELS { - severity = "warning" - } - ANNOTATIONS { - summary = "xtradb cluster donor node falling behind", - description = "{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) and is falling behind (queue size {{$value}}).", - } - - -### -# Replication Alerts - -# Alert: The replication IO or SQL threads are stopped. -ALERT MySQLReplicationNotRunning - IF mysql_slave_status_slave_io_running == 0 OR mysql_slave_status_slave_sql_running == 0 - FOR 2m - LABELS { - severity = "critical" - } - ANNOTATIONS { - summary = "Slave replication is not running", - description = "Slave replication (IO or SQL) has been down for more than 2 minutes.", - } - -# Alert: The replication lag is non-zero and it predicted to not recover within -# 2 minutes. This allows for a small amount of replication lag. -# NOTE: This alert depends on the recording rule at the top of the file. -ALERT MySQLReplicationLag - IF - (mysql_slave_lag_seconds > 30) - AND on (instance) - (predict_linear(mysql_slave_lag_seconds[5m], 60*2) > 0) - FOR 1m - LABELS { - severity = "critical" - } - ANNOTATIONS { - summary = "MySQL slave replication is lagging", - description = "The mysql slave replication has fallen behind and is not recovering", - } - -# Alert: The replication lag is non-zero and it predicted to not recover within -# 2 minutes. This allows for a small amount of replication lag. -# NOTE: This alert depends on the recording rule at the top of the file. -ALERT MySQLReplicationLag - IF - (mysql_heartbeat_lag_seconds > 30) - AND on (instance) - (predict_linear(mysql_heartbeat_lag_seconds[5m], 60*2) > 0) - FOR 1m - LABELS { - severity = "critical" - } - ANNOTATIONS { - summary = "MySQL slave replication is lagging", - description = "The mysql slave replication has fallen behind and is not recovering", - } - -### -# Performance Alerts - -# Alert: InnoDB log writes are stalling. -ALERT MySQLInnoDBLogWaits - IF rate(mysql_global_status_innodb_log_waits[15m]) > 10 - LABELS { - severity = "warning" - } - ANNOTATIONS { - summary = "MySQL innodb log writes stalling", - description = "The innodb logs are waiting for disk at a rate of {{$value}} / second", - } +groups: +- name: GaleraAlerts + rules: + - alert: MySQLGaleraNotReady + expr: mysql_global_status_wsrep_ready != 1 + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is not ready.' + summary: Galera cluster node not ready + - alert: MySQLGaleraOutOfSync + expr: (mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync + == 0) + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} + != 4).' + summary: Galera cluster node out of sync + - alert: MySQLGaleraDonorFallingBehind + expr: (mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue + > 100) + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) + and is falling behind (queue size {{$value}}).' + summary: xtradb cluster donor node falling behind + - alert: MySQLReplicationNotRunning + expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running + == 0 + for: 2m + labels: + severity: critical + annotations: + description: Slave replication (IO or SQL) has been down for more than 2 minutes. + summary: Slave replication is not running + - alert: MySQLReplicationLag + expr: (mysql_slave_lag_seconds > 30) and on(instance) (predict_linear(mysql_slave_lag_seconds[5m], + 60 * 2) > 0) + for: 1m + labels: + severity: critical + annotations: + description: The mysql slave replication has fallen behind and is not recovering + summary: MySQL slave replication is lagging + - alert: MySQLReplicationLag + expr: (mysql_heartbeat_lag_seconds > 30) and on(instance) (predict_linear(mysql_heartbeat_lag_seconds[5m], + 60 * 2) > 0) + for: 1m + labels: + severity: critical + annotations: + description: The mysql slave replication has fallen behind and is not recovering + summary: MySQL slave replication is lagging + - alert: MySQLInnoDBLogWaits + expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10 + labels: + severity: warning + annotations: + description: The innodb logs are waiting for disk at a rate of {{$value}} / + second + summary: MySQL innodb log writes stalling diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/example.rules.yml prometheus-mysqld-exporter-0.11.0+ds/example.rules.yml --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/example.rules.yml 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/example.rules.yml 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,73 @@ +groups: +- name: example.rules + rules: + - record: mysql_slave_lag_seconds + expr: mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay + - record: mysql_heartbeat_lag_seconds + expr: mysql_heartbeat_now_timestamp_seconds - mysql_heartbeat_stored_timestamp_seconds + - record: job:mysql_transactions:rate5m + expr: sum(rate(mysql_global_status_commands_total{command=~"(commit|rollback)"}[5m])) + WITHOUT (command) + - alert: MySQLGaleraNotReady + expr: mysql_global_status_wsrep_ready != 1 + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is not ready.' + summary: Galera cluster node not ready + - alert: MySQLGaleraOutOfSync + expr: (mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync + == 0) + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} + != 4).' + summary: Galera cluster node out of sync + - alert: MySQLGaleraDonorFallingBehind + expr: (mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue + > 100) + for: 5m + labels: + severity: warning + annotations: + description: '{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) + and is falling behind (queue size {{$value}}).' + summary: xtradb cluster donor node falling behind + - alert: MySQLReplicationNotRunning + expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running + == 0 + for: 2m + labels: + severity: critical + annotations: + description: Slave replication (IO or SQL) has been down for more than 2 minutes. + summary: Slave replication is not running + - alert: MySQLReplicationLag + expr: (mysql_slave_lag_seconds > 30) and ON(instance) (predict_linear(mysql_slave_lag_seconds[5m], + 60 * 2) > 0) + for: 1m + labels: + severity: critical + annotations: + description: The mysql slave replication has fallen behind and is not recovering + summary: MySQL slave replication is lagging + - alert: MySQLReplicationLag + expr: (mysql_heartbeat_lag_seconds > 30) and ON(instance) (predict_linear(mysql_heartbeat_lag_seconds[5m], + 60 * 2) > 0) + for: 1m + labels: + severity: critical + annotations: + description: The mysql slave replication has fallen behind and is not recovering + summary: MySQL slave replication is lagging + - alert: MySQLInnoDBLogWaits + expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10 + labels: + severity: warning + annotations: + description: The innodb logs are waiting for disk at a rate of {{$value}} / + second + summary: MySQL innodb log writes stalling diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/Makefile prometheus-mysqld-exporter-0.11.0+ds/Makefile --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/Makefile 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/Makefile 2018-10-15 19:28:15.000000000 +0000 @@ -11,55 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -GO := go -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) +all: vet -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_NAME ?= mysqld-exporter -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +include Makefile.common +STATICCHECK_IGNORE = \ + github.com/prometheus/mysqld_exporter/mysqld_exporter.go:SA1019 -all: format build test-short +DOCKER_IMAGE_NAME ?= mysqld-exporter -style: - @echo ">> checking code style" - @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' +test-docker: + @echo ">> testing docker image" + ./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9104 -test-short: - @echo ">> running short tests" - @$(GO) test -short -race $(pkgs) - -test: - @echo ">> running tests" - @$(GO) test -race $(pkgs) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -build: promu - @echo ">> building binaries" - @$(PROMU) build --prefix $(PREFIX) - -tarball: promu - @echo ">> building release tarball" - @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -docker: - @echo ">> building docker image" - @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . - -promu: - @GOOS=$(shell uname -s | tr A-Z a-z) \ - GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ - $(GO) get -u github.com/prometheus/promu - - -.PHONY: all style format build test vet tarball docker promu +.PHONY: test-docker diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/Makefile.common prometheus-mysqld-exporter-0.11.0+ds/Makefile.common --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/Makefile.common 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/Makefile.common 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,100 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +GOVENDOR := $(FIRST_GOPATH)/bin/govendor +pkgs = ./... + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) + +all: style staticcheck unused build test + +style: + @echo ">> checking code style" + ! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' + +check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +test-short: + @echo ">> running short tests" + $(GO) test -short $(pkgs) + +test: + @echo ">> running all tests" + $(GO) test -race $(pkgs) + +format: + @echo ">> formatting code" + $(GO) fmt $(pkgs) + +vet: + @echo ">> vetting code" + $(GO) vet $(pkgs) + +staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + +unused: $(GOVENDOR) + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' + +build: promu + @echo ">> building binaries" + $(PROMU) build --prefix $(PREFIX) + +tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +docker: + docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +promu: + GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu + +$(FIRST_GOPATH)/bin/staticcheck: + GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck + +$(FIRST_GOPATH)/bin/govendor: + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor + +.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendor \ No newline at end of file diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/mysqld_exporter.go prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/mysqld_exporter.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter.go 2018-10-15 19:28:15.000000000 +0000 @@ -1,11 +1,15 @@ package main import ( + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "net/http" "os" "path" + "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/log" @@ -29,112 +33,47 @@ "config.my-cnf", "Path to .my.cnf file to read MySQL credentials from.", ).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String() - collectProcesslist = kingpin.Flag( - "collect.info_schema.processlist", - "Collect current thread state counts from the information_schema.processlist", - ).Default("false").Bool() - collectTableSchema = kingpin.Flag( - "collect.info_schema.tables", - "Collect metrics from information_schema.tables", - ).Default("true").Bool() - collectInnodbTablespaces = kingpin.Flag( - "collect.info_schema.innodb_tablespaces", - "Collect metrics from information_schema.innodb_sys_tablespaces", - ).Default("false").Bool() - collectInnodbMetrics = kingpin.Flag( - "collect.info_schema.innodb_metrics", - "Collect metrics from information_schema.innodb_metrics", - ).Default("false").Bool() - collectGlobalStatus = kingpin.Flag( - "collect.global_status", - "Collect from SHOW GLOBAL STATUS", - ).Default("true").Bool() - collectGlobalVariables = kingpin.Flag( - "collect.global_variables", - "Collect from SHOW GLOBAL VARIABLES", - ).Default("true").Bool() - collectSlaveStatus = kingpin.Flag( - "collect.slave_status", - "Collect from SHOW SLAVE STATUS", - ).Default("true").Bool() - collectAutoIncrementColumns = kingpin.Flag( - "collect.auto_increment.columns", - "Collect auto_increment columns and max values from information_schema", - ).Default("false").Bool() - collectBinlogSize = kingpin.Flag( - "collect.binlog_size", - "Collect the current size of all registered binlog files", - ).Default("false").Bool() - collectPerfTableIOWaits = kingpin.Flag( - "collect.perf_schema.tableiowaits", - "Collect metrics from performance_schema.table_io_waits_summary_by_table", - ).Default("false").Bool() - collectPerfIndexIOWaits = kingpin.Flag( - "collect.perf_schema.indexiowaits", - "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage", - ).Default("false").Bool() - collectPerfTableLockWaits = kingpin.Flag( - "collect.perf_schema.tablelocks", - "Collect metrics from performance_schema.table_lock_waits_summary_by_table", - ).Default("false").Bool() - collectPerfEventsStatements = kingpin.Flag( - "collect.perf_schema.eventsstatements", - "Collect metrics from performance_schema.events_statements_summary_by_digest", - ).Default("false").Bool() - collectPerfEventsWaits = kingpin.Flag( - "collect.perf_schema.eventswaits", - "Collect metrics from performance_schema.events_waits_summary_global_by_event_name", - ).Default("false").Bool() - collectPerfFileEvents = kingpin.Flag( - "collect.perf_schema.file_events", - "Collect metrics from performance_schema.file_summary_by_event_name", - ).Default("false").Bool() - collectPerfFileInstances = kingpin.Flag( - "collect.perf_schema.file_instances", - "Collect metrics from performance_schema.file_summary_by_instance", - ).Default("false").Bool() - collectUserStat = kingpin.Flag( - "collect.info_schema.userstats", - "If running with userstat=1, set to true to collect user statistics", - ).Default("false").Bool() - collectClientStat = kingpin.Flag( - "collect.info_schema.clientstats", - "If running with userstat=1, set to true to collect client statistics", - ).Default("false").Bool() - collectTableStat = kingpin.Flag( - "collect.info_schema.tablestats", - "If running with userstat=1, set to true to collect table statistics", - ).Default("false").Bool() - collectQueryResponseTime = kingpin.Flag( - "collect.info_schema.query_response_time", - "Collect query response time distribution if query_response_time_stats is ON.", - ).Default("false").Bool() - collectEngineTokudbStatus = kingpin.Flag( - "collect.engine_tokudb_status", - "Collect from SHOW ENGINE TOKUDB STATUS", - ).Default("false").Bool() - collectEngineInnodbStatus = kingpin.Flag( - "collect.engine_innodb_status", - "Collect from SHOW ENGINE INNODB STATUS", - ).Default("false").Bool() - collectHeartbeat = kingpin.Flag( - "collect.heartbeat", - "Collect from heartbeat", - ).Default("false").Bool() - collectHeartbeatDatabase = kingpin.Flag( - "collect.heartbeat.database", - "Database from where to collect heartbeat data", - ).Default("heartbeat").String() - collectHeartbeatTable = kingpin.Flag( - "collect.heartbeat.table", - "Table from where to collect heartbeat data", - ).Default("heartbeat").String() dsn string ) +// scrapers lists all possible collection methods and if they should be enabled by default. +var scrapers = map[collector.Scraper]bool{ + collector.ScrapeGlobalStatus{}: true, + collector.ScrapeGlobalVariables{}: true, + collector.ScrapeSlaveStatus{}: true, + collector.ScrapeProcesslist{}: false, + collector.ScrapeTableSchema{}: true, + collector.ScrapeInfoSchemaInnodbTablespaces{}: false, + collector.ScrapeInnodbMetrics{}: false, + collector.ScrapeAutoIncrementColumns{}: false, + collector.ScrapeBinlogSize{}: false, + collector.ScrapePerfTableIOWaits{}: false, + collector.ScrapePerfIndexIOWaits{}: false, + collector.ScrapePerfTableLockWaits{}: false, + collector.ScrapePerfEventsStatements{}: false, + collector.ScrapePerfEventsWaits{}: false, + collector.ScrapePerfFileEvents{}: false, + collector.ScrapePerfFileInstances{}: false, + collector.ScrapePerfReplicationGroupMemberStats{}: false, + collector.ScrapeUserStat{}: false, + collector.ScrapeClientStat{}: false, + collector.ScrapeTableStat{}: false, + collector.ScrapeInnodbCmp{}: false, + collector.ScrapeInnodbCmpMem{}: false, + collector.ScrapeQueryResponseTime{}: false, + collector.ScrapeEngineTokudbStatus{}: false, + collector.ScrapeEngineInnodbStatus{}: false, + collector.ScrapeHeartbeat{}: false, + collector.ScrapeSlaveHosts{}: false, +} + func parseMycnf(config interface{}) (string, error) { var dsn string - cfg, err := ini.Load(config) + opts := ini.LoadOptions{ + // MySQL ini file can have boolean keys. + AllowBooleanKeys: true, + } + cfg, err := ini.LoadSources(opts, config) if err != nil { return dsn, fmt.Errorf("failed reading ini file: %s", err) } @@ -151,74 +90,103 @@ } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port) } + sslCA := cfg.Section("client").Key("ssl-ca").String() + sslCert := cfg.Section("client").Key("ssl-cert").String() + sslKey := cfg.Section("client").Key("ssl-key").String() + if sslCA != "" { + if tlsErr := customizeTLS(sslCA, sslCert, sslKey); tlsErr != nil { + tlsErr = fmt.Errorf("failed to register a custom TLS configuration for mysql dsn: %s", tlsErr) + return dsn, tlsErr + } + dsn = fmt.Sprintf("%s?tls=custom", dsn) + } + log.Debugln(dsn) return dsn, nil } +func customizeTLS(sslCA string, sslCert string, sslKey string) error { + var tlsCfg tls.Config + caBundle := x509.NewCertPool() + pemCA, err := ioutil.ReadFile(sslCA) + if err != nil { + return err + } + if ok := caBundle.AppendCertsFromPEM(pemCA); ok { + tlsCfg.RootCAs = caBundle + } else { + return fmt.Errorf("failed parse pem-encoded CA certificates from %s", sslCA) + } + if sslCert != "" && sslKey != "" { + certPairs := make([]tls.Certificate, 0, 1) + keypair, err := tls.LoadX509KeyPair(sslCert, sslKey) + if err != nil { + return fmt.Errorf("failed to parse pem-encoded SSL cert %s or SSL key %s: %s", + sslCert, sslKey, err) + } + certPairs = append(certPairs, keypair) + tlsCfg.Certificates = certPairs + } + mysql.RegisterTLSConfig("custom", &tlsCfg) + return nil +} + func init() { prometheus.MustRegister(version.NewCollector("mysqld_exporter")) } -func filter(filters map[string]bool, name string, flag bool) bool { - if len(filters) > 0 { - return flag && filters[name] - } - return flag -} +func newHandler(metrics collector.Metrics, scrapers []collector.Scraper) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + filteredScrapers := scrapers + params := r.URL.Query()["collect[]"] + log.Debugln("collect query:", params) + + // Check if we have some "collect[]" query parameters. + if len(params) > 0 { + filters := make(map[string]bool) + for _, param := range params { + filters[param] = true + } + + filteredScrapers = nil + for _, scraper := range scrapers { + if filters[scraper.Name()] { + filteredScrapers = append(filteredScrapers, scraper) + } + } + } -func handler(w http.ResponseWriter, r *http.Request) { - var filters map[string]bool - params := r.URL.Query()["collect[]"] - log.Debugln("collect query:", params) - - if len(params) > 0 { - filters = make(map[string]bool) - for _, param := range params { - filters[param] = true + registry := prometheus.NewRegistry() + registry.MustRegister(collector.New(dsn, metrics, filteredScrapers)) + + gatherers := prometheus.Gatherers{ + prometheus.DefaultGatherer, + registry, } + // Delegate http serving to Prometheus client library, which will call collector.Collect. + h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{}) + h.ServeHTTP(w, r) } - - collect := collector.Collect{ - Processlist: filter(filters, "info_schema.processlist", *collectProcesslist), - TableSchema: filter(filters, "info_schema.tables", *collectTableSchema), - InnodbTablespaces: filter(filters, "info_schema.innodb_tablespaces", *collectInnodbTablespaces), - InnodbMetrics: filter(filters, "info_schema.innodb_metrics", *collectInnodbMetrics), - GlobalStatus: filter(filters, "global_status", *collectGlobalStatus), - GlobalVariables: filter(filters, "global_variables", *collectGlobalVariables), - SlaveStatus: filter(filters, "slave_status", *collectSlaveStatus), - AutoIncrementColumns: filter(filters, "auto_increment.columns", *collectAutoIncrementColumns), - BinlogSize: filter(filters, "binlog_size", *collectBinlogSize), - PerfTableIOWaits: filter(filters, "perf_schema.tableiowaits", *collectPerfTableIOWaits), - PerfIndexIOWaits: filter(filters, "perf_schema.indexiowaits", *collectPerfIndexIOWaits), - PerfTableLockWaits: filter(filters, "perf_schema.tablelocks", *collectPerfTableLockWaits), - PerfEventsStatements: filter(filters, "perf_schema.eventsstatements", *collectPerfEventsStatements), - PerfEventsWaits: filter(filters, "perf_schema.eventswaits", *collectPerfEventsWaits), - PerfFileEvents: filter(filters, "perf_schema.file_events", *collectPerfFileEvents), - PerfFileInstances: filter(filters, "perf_schema.file_instances", *collectPerfFileInstances), - UserStat: filter(filters, "info_schema.userstats", *collectUserStat), - ClientStat: filter(filters, "info_schema.clientstats", *collectClientStat), - TableStat: filter(filters, "info_schema.tablestats", *collectTableStat), - QueryResponseTime: filter(filters, "info_schema.query_response_time", *collectQueryResponseTime), - EngineTokudbStatus: filter(filters, "engine_tokudb_status", *collectEngineTokudbStatus), - EngineInnodbStatus: filter(filters, "engine_innodb_status", *collectEngineInnodbStatus), - Heartbeat: filter(filters, "heartbeat", *collectHeartbeat), - HeartbeatDatabase: *collectHeartbeatDatabase, - HeartbeatTable: *collectHeartbeatTable, - } - - registry := prometheus.NewRegistry() - registry.MustRegister(collector.New(dsn, collect)) - - gatherers := prometheus.Gatherers{ - prometheus.DefaultGatherer, - registry, - } - // Delegate http serving to Prometheus client library, which will call collector.Collect. - h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{}) - h.ServeHTTP(w, r) } func main() { + // Generate ON/OFF flags for all scrapers. + scraperFlags := map[collector.Scraper]*bool{} + for scraper, enabledByDefault := range scrapers { + defaultOn := "false" + if enabledByDefault { + defaultOn = "true" + } + + f := kingpin.Flag( + "collect."+scraper.Name(), + scraper.Help(), + ).Default(defaultOn).Bool() + + scraperFlags[scraper] = f + } + + // Parse flags. log.AddFlags(kingpin.CommandLine) kingpin.Version(version.Print("mysqld_exporter")) kingpin.HelpFlag.Short('h') @@ -246,7 +214,17 @@ } } - http.HandleFunc(*metricPath, prometheus.InstrumentHandlerFunc("metrics", handler)) + // Register only scrapers enabled by flag. + log.Infof("Enabled scrapers:") + enabledScrapers := []collector.Scraper{} + for scraper, enabled := range scraperFlags { + if *enabled { + log.Infof(" --collect.%s", scraper.Name()) + enabledScrapers = append(enabledScrapers, scraper) + } + } + handlerFunc := newHandler(collector.NewMetrics(), enabledScrapers) + http.HandleFunc(*metricPath, prometheus.InstrumentHandlerFunc("metrics", handlerFunc)) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write(landingPage) }) diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/mysqld_exporter_test.go prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter_test.go --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/mysqld_exporter_test.go 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter_test.go 2018-10-15 19:28:15.000000000 +0000 @@ -54,6 +54,14 @@ host = 1.2.3.4 port = 3307 ` + ignoreBooleanKeys = ` + [client] + user = root + password = abc123 + + [mysql] + skip-auto-rehash + ` badConfig = ` [client] user = root @@ -67,10 +75,7 @@ [hello] world = ismine ` - badConfig4 = ` - [hello] - world - ` + badConfig4 = `[hello` ) convey.Convey("Various .my.cnf configurations", t, func() { convey.Convey("Local tcp connection", func() { @@ -93,21 +98,25 @@ dsn, _ := parseMycnf([]byte(remoteConfig)) convey.So(dsn, convey.ShouldEqual, "dude:nopassword@tcp(1.2.3.4:3307)/") }) + convey.Convey("Ignore boolean keys", func() { + dsn, _ := parseMycnf([]byte(ignoreBooleanKeys)) + convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3306)/") + }) convey.Convey("Missed user", func() { _, err := parseMycnf([]byte(badConfig)) - convey.So(err, convey.ShouldNotBeNil) + convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig)) }) convey.Convey("Missed password", func() { _, err := parseMycnf([]byte(badConfig2)) - convey.So(err, convey.ShouldNotBeNil) + convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig2)) }) convey.Convey("No [client] section", func() { _, err := parseMycnf([]byte(badConfig3)) - convey.So(err, convey.ShouldNotBeNil) + convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig3)) }) convey.Convey("Invalid config", func() { _, err := parseMycnf([]byte(badConfig4)) - convey.So(err, convey.ShouldNotBeNil) + convey.So(err, convey.ShouldBeError, fmt.Errorf("failed reading ini file: unclosed section: %s", badConfig4)) }) }) } diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/README.md prometheus-mysqld-exporter-0.11.0+ds/README.md --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/README.md 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/README.md 2018-10-15 19:28:15.000000000 +0000 @@ -28,7 +28,7 @@ Running using an environment variable: - export DATA_SOURCE_NAME='login:password@(hostname:port)/' + export DATA_SOURCE_NAME='user:password@(hostname:3306)/' ./mysqld_exporter Running using ~/.my.cnf: @@ -58,6 +58,8 @@ collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics. collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics. collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces. +collect.info_schema.innodb_cmp | 5.5 | Collect InnoDB compressed tables metrics from information_schema.innodb_cmp. +collect.info_schema.innodb_cmpmem | 5.5 | Collect InnoDB buffer pool compression metrics from information_schema.innodb_cmpmem. collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist. collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0) collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON. @@ -75,7 +77,9 @@ collect.perf_schema.indexiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage. collect.perf_schema.tableiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_table. collect.perf_schema.tablelocks | 5.6 | Collect metrics from performance_schema.table_lock_waits_summary_by_table. +collect.perf_schema.replication_group_member_stats | 5.7 | Collect metrics from performance_schema.replication_group_member_stats. collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default) +collect.slave_hosts | 5.1 | Collect from SHOW SLAVE HOSTS collect.heartbeat | 5.1 | Collect from [heartbeat](#heartbeat). collect.heartbeat.database | 5.1 | Database from where to collect heartbeat data. (default: heartbeat) collect.heartbeat.table | 5.1 | Table from where to collect heartbeat data. (default: heartbeat) @@ -98,6 +102,24 @@ must be set via the `DATA_SOURCE_NAME` environment variable. The format of this variable is described at https://github.com/go-sql-driver/mysql#dsn-data-source-name. + +## Customizing Configuration for a SSL Connection +if The MySQL server supports SSL, you may need to specify a CA truststore to verify the server's chain-of-trust. You may also need to specify a SSL keypair for the client side of the SSL connection. To configure the mysqld exporter to use a custom CA certificate, add the following to the mysql cnf file: + +``` +ssl-ca=/path/to/ca/file +``` + +To specify the client SSL keypair, add the following to the cnf. + +``` +ssl-key=/path/to/ssl/client/key +ssl-cert=/path/to/ssl/client/cert +``` + +Customizing the SSL configuration is only supported in the mysql cnf file and is not supported if you set the mysql server's data source name in the environment variable DATA_SOURCE_NAME. + + ## Using Docker You can deploy this exporter using the [prom/mysqld-exporter](https://registry.hub.docker.com/u/prom/mysqld-exporter/) Docker image. @@ -105,10 +127,14 @@ For example: ```bash +docker network create my-mysql-network docker pull prom/mysqld-exporter -docker run -d -p 9104:9104 --link=my_mysql_container:bdd \ - -e DATA_SOURCE_NAME="user:password@(bdd:3306)/database" prom/mysqld-exporter +docker run -d \ + -p 9104:9104 \ + --network my-mysql-network \ + -e DATA_SOURCE_NAME="user:password@(my-mysql-network:3306)/" \ + prom/mysqld-exporter ``` ## heartbeat diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/test_image.sh prometheus-mysqld-exporter-0.11.0+ds/test_image.sh --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/test_image.sh 1970-01-01 00:00:00.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/test_image.sh 2018-10-15 19:28:15.000000000 +0000 @@ -0,0 +1,36 @@ +#!/bin/bash +set -exo pipefail + +docker_image=$1 +port=$2 + +container_id='' + +wait_start() { + for in in {1..10}; do + if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then + docker_cleanup + exit 0 + else + sleep 1 + fi + done + + exit 1 +} + +docker_start() { + container_id=$(docker run -d --network mysql-test -e DATA_SOURCE_NAME="root:secret@(mysql-test:3306)/" -p "${port}":"${port}" "${docker_image}") +} + +docker_cleanup() { + docker kill "${container_id}" +} + +if [[ "$#" -ne 2 ]] ; then + echo "Usage: $0 quay.io/prometheus/mysqld-exporter:v0.10.0 9104" >&2 + exit 1 +fi + +docker_start +wait_start diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/.travis.yml prometheus-mysqld-exporter-0.11.0+ds/.travis.yml --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/.travis.yml 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/.travis.yml 2018-10-15 19:28:15.000000000 +0000 @@ -3,9 +3,8 @@ language: go go: -- 1.8.x - 1.9.x -- master +- 1.10.x env: - MYSQL_IMAGE=mysql/mysql-server:5.5 diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/vendor/vendor.json prometheus-mysqld-exporter-0.11.0+ds/vendor/vendor.json --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/vendor/vendor.json 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/vendor/vendor.json 2018-10-15 19:28:15.000000000 +0000 @@ -31,18 +31,20 @@ "revisionTime": "2016-08-04T10:47:26Z" }, { - "checksumSHA1": "xmTgJXWHguGKHPuZE50FIbs88L0=", + "checksumSHA1": "kxzfnN+IPF0sViI9iiUnI7aZVWM=", "path": "github.com/go-sql-driver/mysql", - "revision": "a0583e0143b1624142adab07e0e97fe106d99561", - "revisionTime": "2016-12-01T11:50:36Z", - "version": "v1.3", - "versionExact": "v1.3" + "revision": "d523deb1b23d913de5bdada721a6071e71283618", + "revisionTime": "2018-06-03T12:45:54Z", + "version": "v1.4", + "versionExact": "v1.4.0" }, { - "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=", + "checksumSHA1": "Pyou8mceOASSFxc7GeXZuVdSMi0=", "path": "github.com/golang/protobuf/proto", - "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845", - "revisionTime": "2017-11-13T18:07:20Z" + "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265", + "revisionTime": "2018-04-30T18:52:41Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "yIkYzW7bzAD81zHyuCNmEj4+oxQ=", @@ -62,23 +64,21 @@ "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", - "revisionTime": "2016-04-24T11:30:07Z" + "revisionTime": "2016-04-24T11:30:07Z", + "version": "v1.0.1", + "versionExact": "v1.0.1" }, { - "checksumSHA1": "WSCsSdD3I7Are0ntaIq6zR9tl0s=", + "checksumSHA1": "WVgL9pNO2RZCCcaXfSYSNEPgtCo=", "path": "github.com/prometheus/client_golang/prometheus", - "revision": "f02bfc3484a6b03d1fc00d72d86add103ef9567b", - "revisionTime": "2018-01-11T15:59:30Z", - "version": "v0.8.0", - "versionExact": "v0.8.0" + "revision": "77e8f2ddcfed59ece3a8151879efb2304b5cbbcf", + "revisionTime": "2018-06-23T15:59:54Z" }, { - "checksumSHA1": "0AZfw0mlRpOkYPR+d4ftlPUwtlc=", + "checksumSHA1": "MYqKV5uVTfCxP9zBug7naBQ1vr8=", "path": "github.com/prometheus/client_golang/prometheus/promhttp", - "revision": "f02bfc3484a6b03d1fc00d72d86add103ef9567b", - "revisionTime": "2018-01-11T15:59:30Z", - "version": "v0.8.0", - "versionExact": "v0.8.0" + "revision": "77e8f2ddcfed59ece3a8151879efb2304b5cbbcf", + "revisionTime": "2018-06-23T15:59:54Z" }, { "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", @@ -87,120 +87,158 @@ "revisionTime": "2017-11-17T10:05:41Z" }, { - "checksumSHA1": "xfnn0THnqNwjwimeTClsxahYrIo=", + "checksumSHA1": "vPdC/DzEm7YbzRir2wwnpLPfay8=", "path": "github.com/prometheus/common/expfmt", - "revision": "89604d197083d4781071d3c65855d24ecfb0a563", - "revisionTime": "2018-01-10T21:49:58Z" + "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", + "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=", "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "revision": "89604d197083d4781071d3c65855d24ecfb0a563", - "revisionTime": "2018-01-10T21:49:58Z" + "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", + "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "MGnqHnmEqc1fjnYiWReSiW8C27A=", "path": "github.com/prometheus/common/log", - "revision": "89604d197083d4781071d3c65855d24ecfb0a563", - "revisionTime": "2018-01-10T21:49:58Z" + "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", + "revisionTime": "2018-05-18T15:47:59Z" }, { - "checksumSHA1": "YU+/K48IMawQnToO4ETE6a+hhj4=", + "checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=", "path": "github.com/prometheus/common/model", - "revision": "89604d197083d4781071d3c65855d24ecfb0a563", - "revisionTime": "2018-01-10T21:49:58Z" + "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", + "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=", "path": "github.com/prometheus/common/version", - "revision": "89604d197083d4781071d3c65855d24ecfb0a563", - "revisionTime": "2018-01-10T21:49:58Z" + "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", + "revisionTime": "2018-05-18T15:47:59Z" }, { - "checksumSHA1": "53Wwu3eXhNXZl5GsiOJJyVAY+6s=", + "checksumSHA1": "s8OGVwKHbYx/oNKNyZ8f7wWK0dA=", "path": "github.com/prometheus/procfs", - "revision": "b15cd069a83443be3154b719d0cc9fe8117f09fb", - "revisionTime": "2017-12-26T18:39:07Z" + "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", + "revisionTime": "2018-06-12T22:21:13Z" }, { - "checksumSHA1": "xCiFAAwVTrjsfZT1BIJQ3DgeNCY=", + "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=", + "path": "github.com/prometheus/procfs/internal/util", + "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", + "revisionTime": "2018-06-12T22:21:13Z" + }, + { + "checksumSHA1": "HSP5hVT0CNMRa8+Xtz4z2Ic5U0E=", + "path": "github.com/prometheus/procfs/nfs", + "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", + "revisionTime": "2018-06-12T22:21:13Z" + }, + { + "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=", "path": "github.com/prometheus/procfs/xfs", - "revision": "b15cd069a83443be3154b719d0cc9fe8117f09fb", - "revisionTime": "2017-12-26T18:39:07Z" + "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", + "revisionTime": "2018-06-12T22:21:13Z" }, { - "checksumSHA1": "ySaT8G3I3y4MmnoXOYAAX0rC+p8=", + "checksumSHA1": "+nVM+CEZGAopOrYlLifgWP+X01E=", + "path": "github.com/satori/go.uuid", + "revision": "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3", + "revisionTime": "2018-01-03T03:42:45Z", + "version": "v1.2.0", + "versionExact": "v1.2.0" + }, + { + "checksumSHA1": "GWtDi0sYbtCQzF/ZaVhaHvCMvuk=", "path": "github.com/sirupsen/logrus", - "revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba", - "revisionTime": "2017-12-05T20:32:29Z", - "version": "v1.0.4", - "versionExact": "v1.0.4" + "revision": "c155da19408a8799da419ed3eeb0cb5db0ad5dbc", + "revisionTime": "2018-03-11T22:51:37Z", + "version": "v1.0.5", + "versionExact": "v1.0.5" }, { - "checksumSHA1": "ozRFnFdTG0IjkTE0RkZn71XO4gw=", + "checksumSHA1": "wVmkBavCZSwHYTDGxa1xOD3RKe0=", "path": "github.com/smartystreets/assertions", - "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea", - "revisionTime": "2017-09-25T17:21:51Z" + "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", + "revisionTime": "2018-03-01T16:12:46Z", + "version": "1.8.3", + "versionExact": "1.8.3" }, { - "checksumSHA1": "Vzb+dEH/LTYbvr8RXHmt6xJHz04=", + "checksumSHA1": "v6W3GIQMzr3QSXB2NtBa9X7SwiI=", "path": "github.com/smartystreets/assertions/internal/go-render/render", - "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea", - "revisionTime": "2017-09-25T17:21:51Z" + "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", + "revisionTime": "2018-03-01T16:12:46Z", + "version": "1.8.3", + "versionExact": "1.8.3" }, { "checksumSHA1": "r6FauVdOTFnwYQgrKGFuWUbIAJE=", "path": "github.com/smartystreets/assertions/internal/oglematchers", - "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea", - "revisionTime": "2017-09-25T17:21:51Z" + "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", + "revisionTime": "2018-03-01T16:12:46Z", + "version": "1.8.3", + "versionExact": "1.8.3" }, { "checksumSHA1": "f4m09DHEetaanti/GqUJzyCBTaI=", "path": "github.com/smartystreets/goconvey/convey", - "revision": "e5b2b7c9111590d019a696c7800593f666e1a7f4", - "revisionTime": "2017-08-25T22:14:26Z" + "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", + "revisionTime": "2017-06-02T16:46:21Z", + "version": "1.6.3", + "versionExact": "1.6.3" }, { "checksumSHA1": "9LakndErFi5uCXtY1KWl0iRnT4c=", "path": "github.com/smartystreets/goconvey/convey/gotest", - "revision": "e5b2b7c9111590d019a696c7800593f666e1a7f4", - "revisionTime": "2017-08-25T22:14:26Z" + "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", + "revisionTime": "2017-06-02T16:46:21Z", + "version": "1.6.3", + "versionExact": "1.6.3" }, { "checksumSHA1": "FWDhk37bhAwZ2363D/L2xePwR64=", "path": "github.com/smartystreets/goconvey/convey/reporting", - "revision": "e5b2b7c9111590d019a696c7800593f666e1a7f4", - "revisionTime": "2017-08-25T22:14:26Z" + "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", + "revisionTime": "2017-06-02T16:46:21Z", + "version": "1.6.3", + "versionExact": "1.6.3" }, { - "checksumSHA1": "X1NTlfcau2XcV6WtAHF6b/DECOA=", + "checksumSHA1": "BGm8lKZmvJbf/YOJLeL1rw2WVjA=", "path": "golang.org/x/crypto/ssh/terminal", - "revision": "13931e22f9e72ea58bb73048bc752b48c6d4d4ac", - "revisionTime": "2018-01-11T11:10:38Z" + "revision": "a49355c7e3f8fe157a85be2f77e6e269a0f89602", + "revisionTime": "2018-06-20T09:14:27Z" }, { - "checksumSHA1": "gRu2kYXXhEN+A+NpuAuMrjuiud0=", + "checksumSHA1": "e66DmNWQKgI97tvj4BH7rHYnyJs=", "path": "golang.org/x/sys/unix", - "revision": "810d7000345868fc619eb81f46307107118f4ae1", - "revisionTime": "2018-01-09T14:25:55Z" + "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", + "revisionTime": "2018-06-27T13:57:12Z" }, { - "checksumSHA1": "eQq+ZoTWPjyizS9XalhZwfGjQao=", + "checksumSHA1": "zc2NI38L40/N4+pjd9P2ESz68/0=", "path": "golang.org/x/sys/windows", - "revision": "810d7000345868fc619eb81f46307107118f4ae1", - "revisionTime": "2018-01-09T14:25:55Z" + "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", + "revisionTime": "2018-06-27T13:57:12Z" }, { - "checksumSHA1": "ZdFZFaXmCgEEaEhVPkyXrnhKhsg=", + "checksumSHA1": "P9OIhD26uWlIST/me4TYnvseCoY=", "path": "golang.org/x/sys/windows/registry", - "revision": "810d7000345868fc619eb81f46307107118f4ae1", - "revisionTime": "2018-01-09T14:25:55Z" + "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", + "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=", "path": "golang.org/x/sys/windows/svc/eventlog", - "revision": "810d7000345868fc619eb81f46307107118f4ae1", - "revisionTime": "2018-01-09T14:25:55Z" + "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", + "revisionTime": "2018-06-27T13:57:12Z" + }, + { + "checksumSHA1": "LiyXfqOzaeQ8vgYZH3t2hUEdVTw=", + "path": "google.golang.org/appengine/cloudsql", + "revision": "b1f26356af11148e710935ed1ac8a7f5702c7612", + "revisionTime": "2018-05-21T22:34:13Z" }, { "checksumSHA1": "WfiM+grLatLDuXKj1roCcRDod/4=", @@ -215,10 +253,10 @@ "revisionTime": "2017-12-17T18:08:21Z" }, { - "checksumSHA1": "P4LsadWqFYNCs98XRAiacey3q1U=", + "checksumSHA1": "nv0VcXrE4cc3r10jK27JZYbr17E=", "path": "gopkg.in/ini.v1", - "revision": "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a", - "revisionTime": "2017-11-19T05:34:21Z" + "revision": "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5", + "revisionTime": "2018-05-26T22:45:42Z" } ], "rootPath": "github.com/prometheus/mysqld_exporter" diff -Nru prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/VERSION prometheus-mysqld-exporter-0.11.0+ds/VERSION --- prometheus-mysqld-exporter-0.10.0+git20180201.a71f4bb+ds/VERSION 2018-03-14 14:59:10.000000000 +0000 +++ prometheus-mysqld-exporter-0.11.0+ds/VERSION 2018-10-15 19:28:15.000000000 +0000 @@ -1 +1 @@ -0.10.0 +0.11.0