diff -Nru ruby-sidekiq-6.4.1+dfsg/bin/sidekiqload ruby-sidekiq-6.5.7+dfsg3/bin/sidekiqload --- ruby-sidekiq-6.4.1+dfsg/bin/sidekiqload 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/bin/sidekiqload 2022-10-26 06:38:14.000000000 +0000 @@ -11,6 +11,10 @@ require_relative "../lib/sidekiq/cli" require_relative "../lib/sidekiq/launcher" +if ENV["SIDEKIQ_REDIS_CLIENT"] + Sidekiq::RedisConnection.adapter = :redis_client +end + Sidekiq.configure_server do |config| config.options[:concurrency] = 10 config.redis = {db: 13, port: 6380} @@ -36,7 +40,6 @@ # brew tap shopify/shopify # brew install toxiproxy -# gem install toxiproxy # run `toxiproxy-server` in a separate terminal window. require "toxiproxy" # simulate a non-localhost network for realer-world conditions. @@ -86,16 +89,11 @@ `ps -o rss= -p #{Process.pid}`.chomp.to_i end -iter = 50 +iter = 10 count = 10_000 iter.times do - arr = Array.new(count) do - [] - end - count.times do |idx| - arr[idx][0] = idx - end + arr = Array.new(count) { |idx| [idx] } Sidekiq::Client.push_bulk("class" => LoadWorker, "args" => arr) end Sidekiq.logger.error "Created #{count * iter} jobs" @@ -124,16 +122,24 @@ end end +def with_latency(latency, &block) + Sidekiq.logger.error "Simulating #{latency}ms of latency between Sidekiq and redis" + if latency > 0 + Toxiproxy[:redis].downstream(:latency, latency: latency).apply(&block) + else + yield + end +end + begin # RubyProf::exclude_threads = [ Monitoring ] # RubyProf.start events = Sidekiq.options[:lifecycle_events][:startup] events.each(&:call) events.clear - - Sidekiq.logger.error "Simulating 1ms of latency between Sidekiq and redis" - Toxiproxy[:redis].downstream(:latency, latency: 1).apply do - launcher = Sidekiq::Launcher.new(Sidekiq.options) + + with_latency(Integer(ENV.fetch("LATENCY", "1"))) do + launcher = Sidekiq::Launcher.new(Sidekiq) launcher.run while readable_io = IO.select([self_read]) diff -Nru ruby-sidekiq-6.4.1+dfsg/Changes.md ruby-sidekiq-6.5.7+dfsg3/Changes.md --- ruby-sidekiq-6.4.1+dfsg/Changes.md 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/Changes.md 2022-10-26 06:38:14.000000000 +0000 @@ -2,7 +2,86 @@ [Sidekiq Changes](https://github.com/mperham/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/main/Ent-Changes.md) -HEAD +6.5.7 +---------- + +- Updates for JA and ZH locales +- Further optimizations for scheduled polling [#5513] + +6.5.6 +---------- + +- Fix deprecation warnings with redis-rb 4.8.0 [#5484] +- Lock redis-rb to < 5.0 as we are moving to redis-client in Sidekiq 7.0 + +6.5.5 +---------- + +- Fix require issue with job_retry.rb [#5462] +- Improve Sidekiq::Web compatibility with Rack 3.x + +6.5.4 +---------- + +- Fix invalid code on Ruby 2.5 [#5460] +- Fix further metrics dependency issues [#5457] + +6.5.3 +---------- + +- Don't require metrics code without explicit opt-in [#5456] + +6.5.2 +---------- + +- [Job Metrics are under active development, help wanted!](https://github.com/mperham/sidekiq/wiki/Metrics#contributing) **BETA** +- Add `Context` column on queue page which shows any CurrentAttributes [#5450] +- `sidekiq_retry_in` may now return `:discard` or `:kill` to dynamically stop job retries [#5406] +- Smarter sorting of processes in /busy Web UI [#5398] +- Fix broken hamburger menu in mobile UI [#5428] +- Require redis-rb 4.5.0. Note that Sidekiq will break if you use the + [`Redis.exists_returns_integer = false`](https://github.com/redis/redis-rb/blob/master/CHANGELOG.md#450) flag. [#5394] + +6.5.1 +---------- + +- Fix `push_bulk` breakage [#5387] + +6.5.0 +--------- + +- Substantial refactoring of Sidekiq server internals, part of a larger effort + to reduce Sidekiq's internal usage of global methods and data, see [docs/global_to_local.md](docs/global_to_local.md) and [docs/middleware.md](docs/middleware.md). +- **Add beta support for the `redis-client` gem**. This will become the default Redis driver in Sidekiq 7.0. [#5298] + Read more: https://github.com/mperham/sidekiq/wiki/Using-redis-client +- **Add beta support for DB transaction-aware client** [#5291] + Add this line to your initializer and any jobs created during a transaction + will only be pushed to Redis **after the transaction commits**. You will need to add the + `after_commit_everywhere` gem to your Gemfile. +```ruby +Sidekiq.transactional_push! +``` + This feature does not have a lot of production usage yet; please try it out and let us + know if you have any issues. It will be fully supported in Sidekiq 7.0 or removed if it + proves problematic. +- Fix regression with middleware arguments [#5312] + +6.4.2 +--------- + +- Strict argument checking now runs after client-side middleware [#5246] +- Fix page events with live polling [#5184] +- Many under-the-hood changes to remove all usage of the term "worker" + from the Sidekiq codebase and APIs. This mostly involved RDoc and local + variable names but a few constants and public APIs were changed. The old + APIs will be removed in Sidekiq 7.0. +``` +Sidekiq::DEFAULT_WORKER_OPTIONS -> Sidekiq.default_job_options +Sidekiq.default_worker_options -> Sidekiq.default_job_options +Sidekiq::Queues["default"].jobs_by_worker(HardJob) -> Sidekiq::Queues["default"].jobs_by_class(HardJob) +``` + +6.4.1 --------- - Fix pipeline/multi deprecations in redis-rb 4.6 @@ -319,6 +398,13 @@ - Integrate the StandardRB code formatter to ensure consistent code styling. [#4114, gearnode] +5.2.10 +--------- + +- Backport fix for CVE-2022-23837. +- Migrate to `exists?` for redis-rb. +- Lock redis-rb to <4.6 to avoid deprecations. + 5.2.9 --------- diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/changelog ruby-sidekiq-6.5.7+dfsg3/debian/changelog --- ruby-sidekiq-6.4.1+dfsg/debian/changelog 2022-04-03 13:57:01.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/changelog 2023-07-23 13:12:19.000000000 +0000 @@ -1,3 +1,30 @@ +ruby-sidekiq (6.5.7+dfsg3-3) unstable; urgency=medium + + * Team upload. + * d/control: fix dep name for ruby-connection-pool (Closes: #1039921) + + -- Mohammed Bilal Sun, 23 Jul 2023 18:42:19 +0530 + +ruby-sidekiq (6.5.7+dfsg3-2) unstable; urgency=medium + + * Reupload to unstable + * Update minimum version of ruby-connection-pool to 2.2.5~ + * Drop obsolete X{S,B}-Ruby-Versions fields + + -- Pirate Praveen Tue, 13 Jun 2023 01:36:29 +0530 + +ruby-sidekiq (6.5.7+dfsg3-1) experimental; urgency=medium + + [ Debian Janitor ] + * Remove constraints unnecessary since buster (oldstable) + + [ Pirate Praveen ] + * New upstream version 6.5.7+dfsg + * Bump Standards-Version to 4.6.2 (no changes needed) + * Exclude generated javascript assets and regenerate them + + -- Pirate Praveen Sun, 26 Feb 2023 02:15:36 +0530 + ruby-sidekiq (6.4.1+dfsg-1) unstable; urgency=medium * New upstream version 6.4.1+dfsg diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/control ruby-sidekiq-6.5.7+dfsg3/debian/control --- ruby-sidekiq-6.4.1+dfsg/debian/control 2022-04-03 13:57:01.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/control 2023-07-23 13:12:19.000000000 +0000 @@ -4,39 +4,38 @@ Maintainer: Debian Ruby Team Uploaders: Pirate Praveen Build-Depends: debhelper-compat (= 13), - gem2deb, + gem2deb (>= 2.1~), libjs-d3, libjs-jquery, libjs-jquery-timeago, libjs-rickshaw, libjs-bootstrap, + libjs-chart.js (>= 3.9.1+~cs3.1.2~), procps, redis-server, ruby-actionmailer (>= 2:6.0.2), ruby-activerecord (>= 2:6.0.2), ruby-celluloid, ruby-concurrent, - ruby-connection-pool (<< 3.0), - ruby-connection-pool, + ruby-connection-pool (>= 2.2.5~), ruby-coveralls, ruby-railties (>= 2:6.0.2), ruby-redis, ruby-redis-namespace (<< 2.0), ruby-redis-namespace, - ruby-rack-protection (>= 1.5.0), + ruby-rack-protection, ruby-sinatra, ruby-tilt -Standards-Version: 4.6.0 +Standards-Version: 4.6.2 Vcs-Git: https://salsa.debian.org/ruby-team/ruby-sidekiq.git Vcs-Browser: https://salsa.debian.org/ruby-team/ruby-sidekiq Homepage: http://sidekiq.org Testsuite: autopkgtest-pkg-ruby -XS-Ruby-Versions: all Package: ruby-sidekiq Architecture: all -XB-Ruby-Versions: ${ruby:Versions} Depends: libjs-bootstrap, + libjs-chart.js (>= 3.9.1+~cs3.1.2~), ${ruby:Depends}, ${misc:Depends}, ${shlibs:Depends} diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/copyright ruby-sidekiq-6.5.7+dfsg3/debian/copyright --- ruby-sidekiq-6.4.1+dfsg/debian/copyright 2022-04-03 13:57:01.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/copyright 2023-07-23 13:12:19.000000000 +0000 @@ -5,6 +5,9 @@ web/assets/javascripts/application.js web/assets/javascripts/dashboard.js web/assets/stylesheets/bootstrap.css + web/assets/javascripts/chart.min.js + web/assets/javascripts/chartjs-plugin-annotation.min.js + web/assets/javascripts/graph.js Files: * Copyright: Contributed Systems LLC diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/generate-graph-js ruby-sidekiq-6.5.7+dfsg3/debian/generate-graph-js --- ruby-sidekiq-6.4.1+dfsg/debian/generate-graph-js 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/generate-graph-js 2023-07-23 13:12:19.000000000 +0000 @@ -0,0 +1,13 @@ +#!/bin/sh +set -e +mkdir -p web/assets/javascripts +echo "// D3 3.5.16" > web/assets/javascripts/graph.js +cat /usr/share/javascript/d3/d3.min.js >> web/assets/javascripts/graph.js +echo "" >>graph.js +echo "// Match width of graphs with summary bar" >> web/assets/javascripts/graph.js +echo "var responsiveWidth = function() {" >> web/assets/javascripts/graph.js +echo " return document.getElementsByClassName('summary_bar')[0].clientWidth - 30;" >> web/assets/javascripts/graph.js +echo "};" >> web/assets/javascripts/graph.js +echo "" >> web/assets/javascripts/graph.js +echo "// Rickshaw 1.6.0" >> web/assets/javascripts/graph.js +cat /usr/share/javascript/rickshaw/rickshaw.min.js >> web/assets/javascripts/graph.js diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/rules ruby-sidekiq-6.5.7+dfsg3/debian/rules --- ruby-sidekiq-6.4.1+dfsg/debian/rules 2022-04-03 13:57:01.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/rules 2023-07-23 13:12:19.000000000 +0000 @@ -19,6 +19,7 @@ override_dh_auto_build: dh_auto_build -O--buildsystem=ruby chmod -x web/assets/images/logo.png + debian/generate-graph-js override_dh_auto_clean: dh_auto_clean -O--buildsystem=ruby diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/salsa-ci.yml ruby-sidekiq-6.5.7+dfsg3/debian/salsa-ci.yml --- ruby-sidekiq-6.4.1+dfsg/debian/salsa-ci.yml 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/salsa-ci.yml 2023-07-23 13:12:19.000000000 +0000 @@ -0,0 +1,4 @@ +--- +include: + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml diff -Nru ruby-sidekiq-6.4.1+dfsg/debian/symlink-js ruby-sidekiq-6.5.7+dfsg3/debian/symlink-js --- ruby-sidekiq-6.4.1+dfsg/debian/symlink-js 2022-04-03 13:57:01.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/debian/symlink-js 2023-07-23 13:12:19.000000000 +0000 @@ -1,3 +1,5 @@ #!/bin/sh version=$(dpkg-parsechangelog -Sversion|cut -d+ -f1) ln -sf /usr/share/javascript/bootstrap/css/bootstrap.min.css debian/ruby-sidekiq/usr/share/rubygems-integration/all/gems/sidekiq-${version}/web/assets/stylesheets/bootstrap.css +ln -sf /usr/share/javascript/chart.js/chart.min.js debian/ruby-sidekiq/usr/share/rubygems-integration/all/gems/sidekiq-${version}/web/assets/javascripts/chart.min.js +ln -sf /usr/share/javascript/chartjs-plugin-annotation/chartjs-plugin-annotation.min.js debian/ruby-sidekiq/usr/share/rubygems-integration/all/gems/sidekiq-${version}/web/assets/javascripts/chartjs-plugin-annotation.min.js diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/api.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/api.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/api.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/api.rb 2022-10-26 06:38:14.000000000 +0000 @@ -3,9 +3,31 @@ require "sidekiq" require "zlib" +require "set" require "base64" +if ENV["SIDEKIQ_METRICS_BETA"] + require "sidekiq/metrics/deploy" + require "sidekiq/metrics/query" +end + +# +# Sidekiq's Data API provides a Ruby object model on top +# of Sidekiq's runtime data in Redis. This API should never +# be used within application code for business logic. +# +# The Sidekiq server process never uses this API: all data +# manipulation is done directly for performance reasons to +# ensure we are using Redis as efficiently as possible at +# every callsite. +# + module Sidekiq + # Retrieve runtime statistics from Redis regarding + # this Sidekiq cluster. + # + # stat = Sidekiq::Stats.new + # stat.processed class Stats def initialize fetch_stats_fast! @@ -52,6 +74,7 @@ end # O(1) redis calls + # @api private def fetch_stats_fast! pipe1_res = Sidekiq.redis { |conn| conn.pipelined do |pipeline| @@ -91,6 +114,7 @@ end # O(number of processes + number of queues) redis calls + # @api private def fetch_stats_slow! processes = Sidekiq.redis { |conn| conn.sscan_each("processes").to_a @@ -116,11 +140,13 @@ @stats end + # @api private def fetch_stats! fetch_stats_fast! fetch_stats_slow! end + # @api private def reset(*stats) all = %w[failed processed] stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s) @@ -191,7 +217,7 @@ stat_hash[dates[idx]] = value ? value.to_i : 0 end end - rescue Redis::CommandError + rescue RedisConnection.adapter::CommandError # mget will trigger a CROSSSLOT error when run against a Cluster # TODO Someone want to add Cluster support? end @@ -202,9 +228,10 @@ end ## - # Encapsulates a queue within Sidekiq. + # Represents a queue within Sidekiq. # Allows enumeration of all jobs within the queue - # and deletion of jobs. + # and deletion of jobs. NB: this queue data is real-time + # and is changing within Redis moment by moment. # # queue = Sidekiq::Queue.new("mailer") # queue.each do |job| @@ -212,29 +239,34 @@ # job.args # => [1, 2, 3] # job.delete if job.jid == 'abcdef1234567890' # end - # class Queue include Enumerable ## - # Return all known queues within Redis. + # Fetch all known queues within Redis. # + # @return [Array] def self.all Sidekiq.redis { |c| c.sscan_each("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) } end attr_reader :name + # @param name [String] the name of the queue def initialize(name = "default") @name = name.to_s @rname = "queue:#{name}" end + # The current size of the queue within Redis. + # This value is real-time and can change between calls. + # + # @return [Integer] the size def size Sidekiq.redis { |con| con.llen(@rname) } end - # Sidekiq Pro overrides this + # @return [Boolean] if the queue is currently paused def paused? false end @@ -243,7 +275,7 @@ # Calculates this queue's latency, the difference in seconds since the oldest # job in the queue was enqueued. # - # @return Float + # @return [Float] in seconds def latency entry = Sidekiq.redis { |conn| conn.lrange(@rname, -1, -1) @@ -279,34 +311,54 @@ ## # Find the job with the given JID within this queue. # - # This is a slow, inefficient operation. Do not use under + # This is a *slow, inefficient* operation. Do not use under # normal conditions. + # + # @param jid [String] the job_id to look for + # @return [Sidekiq::JobRecord] + # @return [nil] if not found def find_job(jid) detect { |j| j.jid == jid } end + # delete all jobs within this queue + # @return [Boolean] true def clear Sidekiq.redis do |conn| conn.multi do |transaction| transaction.unlink(@rname) - transaction.srem("queues", name) + transaction.srem("queues", [name]) end end + true end alias_method :💣, :clear + + # :nodoc: + # @api private + def as_json(options = nil) + {name: name} # 5336 + end end ## - # Encapsulates a pending job within a Sidekiq queue or - # sorted set. + # Represents a pending job within a Sidekiq queue. # # The job should be considered immutable but may be # removed from the queue via JobRecord#delete. - # class JobRecord + # the parsed Hash of job data + # @!attribute [r] Item attr_reader :item + # the underlying String in Redis + # @!attribute [r] Value attr_reader :value + # the queue associated with this job + # @!attribute [r] Queue + attr_reader :queue + # :nodoc: + # @api private def initialize(item, queue_name = nil) @args = nil @value = item @@ -314,6 +366,8 @@ @queue = queue_name || @item["queue"] end + # :nodoc: + # @api private def parse(item) Sidekiq.load_json(item) rescue JSON::ParserError @@ -325,6 +379,8 @@ {} end + # This is the job class which Sidekiq will execute. If using ActiveJob, + # this class will be the ActiveJob adapter class rather than a specific job. def klass self["class"] end @@ -354,31 +410,31 @@ def display_args # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI @display_args ||= case klass - when /\ASidekiq::Extensions::Delayed/ - safe_load(args[0], args) do |_, _, arg, kwarg| - if !kwarg || kwarg.empty? - arg - else - [arg, kwarg] - end - end - when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" - job_args = self["wrapped"] ? args[0]["arguments"] : [] - if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob" - # remove MailerClass, mailer_method and 'deliver_now' - job_args.drop(3) - elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob" - # remove MailerClass, mailer_method and 'deliver_now' - job_args.drop(3).first["args"] - else - job_args - end - else - if self["encrypt"] - # no point in showing 150+ bytes of random garbage - args[-1] = "[encrypted data]" - end - args + when /\ASidekiq::Extensions::Delayed/ + safe_load(args[0], args) do |_, _, arg, kwarg| + if !kwarg || kwarg.empty? + arg + else + [arg, kwarg] + end + end + when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" + job_args = self["wrapped"] ? args[0]["arguments"] : [] + if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob" + # remove MailerClass, mailer_method and 'deliver_now' + job_args.drop(3) + elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob" + # remove MailerClass, mailer_method and 'deliver_now' + job_args.drop(3).first["args"] + else + job_args + end + else + if self["encrypt"] + # no point in showing 150+ bytes of random garbage + args[-1] = "[encrypted data]" + end + args end end @@ -412,15 +468,12 @@ end end - attr_reader :queue - def latency now = Time.now.to_f now - (@item["enqueued_at"] || @item["created_at"] || now) end - ## - # Remove this job from the queue. + # Remove this job from the queue def delete count = Sidekiq.redis { |conn| conn.lrem("queue:#{@queue}", 1, @value) @@ -428,6 +481,7 @@ count != 0 end + # Access arbitrary attributes within the job hash def [](name) # nil will happen if the JSON fails to parse. # We don't guarantee Sidekiq will work with bad job JSON but we should @@ -442,6 +496,7 @@ rescue => ex # #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into # memory yet so the YAML can't be loaded. + # TODO is this still necessary? Zeitwerk reloader should handle? Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development" default end @@ -464,20 +519,28 @@ end end + # Represents a job within a Redis sorted set where the score + # represents a timestamp associated with the job. This timestamp + # could be the scheduled time for it to run (e.g. scheduled set), + # or the expiration date after which the entry should be deleted (e.g. dead set). class SortedEntry < JobRecord attr_reader :score attr_reader :parent + # :nodoc: + # @api private def initialize(parent, score, item) super(item) - @score = score + @score = Float(score) @parent = parent end + # The timestamp associated with this entry def at Time.at(score).utc end + # remove this entry from the sorted set def delete if @value @parent.delete_by_value(@parent.name, @value) @@ -486,12 +549,17 @@ end end + # Change the scheduled time for this job. + # + # @param at [Time] the new timestamp for this job def reschedule(at) Sidekiq.redis do |conn| conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item)) end end + # Enqueue this job from the scheduled or dead set so it will + # be executed at some point in the near future. def add_to_queue remove_job do |message| msg = Sidekiq.load_json(message) @@ -499,6 +567,8 @@ end end + # enqueue this job from the retry set so it will be executed + # at some point in the near future. def retry remove_job do |message| msg = Sidekiq.load_json(message) @@ -507,8 +577,7 @@ end end - ## - # Place job in the dead set + # Move this job from its current set into the Dead set. def kill remove_job do |message| DeadSet.new.kill(message) @@ -556,20 +625,32 @@ end end + # Base class for all sorted sets within Sidekiq. class SortedSet include Enumerable + # Redis key of the set + # @!attribute [r] Name attr_reader :name + # :nodoc: + # @api private def initialize(name) @name = name @_size = size end + # real-time size of the set, will change def size Sidekiq.redis { |c| c.zcard(name) } end + # Scan through each element of the sorted set, yielding each to the supplied block. + # Please see Redis's SCAN documentation for implementation details. + # + # @param match [String] a snippet or regexp to filter matches. + # @param count [Integer] number of elements to retrieve at a time, default 100 + # @yieldparam [Sidekiq::SortedEntry] each entry def scan(match, count = 100) return to_enum(:scan, match, count) unless block_given? @@ -581,18 +662,32 @@ end end + # @return [Boolean] always true def clear Sidekiq.redis do |conn| conn.unlink(name) end + true end alias_method :💣, :clear + + # :nodoc: + # @api private + def as_json(options = nil) + {name: name} # 5336 + end end + # Base class for all sorted sets which contain jobs, e.g. scheduled, retry and dead. + # Sidekiq Pro and Enterprise add additional sorted sets which do not contain job data, + # e.g. Batches. class JobSet < SortedSet - def schedule(timestamp, message) + # Add a job with the associated timestamp to this set. + # @param timestamp [Time] the score for the job + # @param job [Hash] the job data + def schedule(timestamp, job) Sidekiq.redis do |conn| - conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message)) + conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(job)) end end @@ -606,7 +701,7 @@ range_start = page * page_size + offset_size range_end = range_start + page_size - 1 elements = Sidekiq.redis { |conn| - conn.zrange name, range_start, range_end, with_scores: true + conn.zrange name, range_start, range_end, withscores: true } break if elements.empty? page -= 1 @@ -620,6 +715,10 @@ ## # Fetch jobs that match a given time or Range. Job ID is an # optional second argument. + # + # @param score [Time,Range] a specific timestamp or range + # @param jid [String, optional] find a specific JID within the score + # @return [Array] any results found, can be empty def fetch(score, jid = nil) begin_score, end_score = if score.is_a?(Range) @@ -629,7 +728,7 @@ end elements = Sidekiq.redis { |conn| - conn.zrangebyscore(name, begin_score, end_score, with_scores: true) + conn.zrangebyscore(name, begin_score, end_score, withscores: true) } elements.each_with_object([]) do |element, result| @@ -641,7 +740,10 @@ ## # Find the job with the given JID within this sorted set. - # This is a slower O(n) operation. Do not use for app logic. + # *This is a slow O(n) operation*. Do not use for app logic. + # + # @param jid [String] the job identifier + # @return [SortedEntry] the record or nil def find_job(jid) Sidekiq.redis do |conn| conn.zscan_each(name, match: "*#{jid}*", count: 100) do |entry, score| @@ -653,6 +755,8 @@ nil end + # :nodoc: + # @api private def delete_by_value(name, value) Sidekiq.redis do |conn| ret = conn.zrem(name, value) @@ -661,6 +765,8 @@ end end + # :nodoc: + # @api private def delete_by_jid(score, jid) Sidekiq.redis do |conn| elements = conn.zrangebyscore(name, score, score) @@ -681,10 +787,10 @@ end ## - # Allows enumeration of scheduled jobs within Sidekiq. + # The set of scheduled jobs within Sidekiq. # Based on this, you can search/filter for jobs. Here's an - # example where I'm selecting all jobs of a certain type - # and deleting them from the schedule queue. + # example where I'm selecting jobs based on some complex logic + # and deleting them from the scheduled set. # # r = Sidekiq::ScheduledSet.new # r.select do |scheduled| @@ -699,7 +805,7 @@ end ## - # Allows enumeration of retries within Sidekiq. + # The set of retries within Sidekiq. # Based on this, you can search/filter for jobs. Here's an # example where I'm selecting all jobs of a certain type # and deleting them from the retry queue. @@ -715,23 +821,29 @@ super "retry" end + # Enqueues all jobs pending within the retry set. def retry_all each(&:retry) while size > 0 end + # Kills all jobs pending within the retry set. def kill_all each(&:kill) while size > 0 end end ## - # Allows enumeration of dead jobs within Sidekiq. + # The set of dead jobs within Sidekiq. Dead jobs have failed all of + # their retries and are helding in this set pending some sort of manual + # fix. They will be removed after 6 months (dead_timeout) if not. # class DeadSet < JobSet def initialize super "dead" end + # Add the given job to the Dead set. + # @param message [String] the job data as JSON def kill(message, opts = {}) now = Time.now.to_f Sidekiq.redis do |conn| @@ -753,16 +865,21 @@ true end + # Enqueue all dead jobs def retry_all each(&:retry) while size > 0 end + # The maximum size of the Dead set. Older entries will be trimmed + # to stay within this limit. Default value is 10,000. def self.max_jobs - Sidekiq.options[:dead_max_jobs] + Sidekiq[:dead_max_jobs] end + # The time limit for entries within the Dead set. Older entries will be thrown away. + # Default value is six months. def self.timeout - Sidekiq.options[:dead_timeout_in_seconds] + Sidekiq[:dead_timeout_in_seconds] end end @@ -771,21 +888,28 @@ # right now. Each process sends a heartbeat to Redis every 5 seconds # so this set should be relatively accurate, barring network partitions. # - # Yields a Sidekiq::Process. + # @yieldparam [Sidekiq::Process] # class ProcessSet include Enumerable + # :nodoc: + # @api private def initialize(clean_plz = true) cleanup if clean_plz end # Cleans up dead processes recorded in Redis. # Returns the number of processes cleaned. + # :nodoc: + # @api private def cleanup + # dont run cleanup more than once per minute + return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) } + count = 0 Sidekiq.redis do |conn| - procs = conn.sscan_each("processes").to_a.sort + procs = conn.sscan_each("processes").to_a heartbeats = conn.pipelined { |pipeline| procs.each do |key| pipeline.hget(key, "info") @@ -836,6 +960,7 @@ # based on current heartbeat. #each does that and ensures the set only # contains Sidekiq processes which have sent a heartbeat within the last # 60 seconds. + # @return [Integer] current number of registered Sidekiq processes def size Sidekiq.redis { |conn| conn.scard("processes") } end @@ -843,10 +968,12 @@ # Total number of threads available to execute jobs. # For Sidekiq Enterprise customers this number (in production) must be # less than or equal to your licensed concurrency. + # @return [Integer] the sum of process concurrency def total_concurrency sum { |x| x["concurrency"].to_i } end + # @return [Integer] total amount of RSS memory consumed by Sidekiq processes def total_rss_in_kb sum { |x| x["rss"].to_i } end @@ -855,6 +982,8 @@ # Returns the identity of the current cluster leader or "" if no leader. # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq # or Sidekiq Pro. + # @return [String] Identity of cluster leader + # @return [String] empty string if no leader def leader @leader ||= begin x = Sidekiq.redis { |c| c.get("dear-leader") } @@ -881,6 +1010,8 @@ # 'identity' => , # } class Process + # :nodoc: + # @api private def initialize(hash) @attribs = hash end @@ -905,18 +1036,31 @@ self["queues"] end + # Signal this process to stop processing new jobs. + # It will continue to execute jobs it has already fetched. + # This method is *asynchronous* and it can take 5-10 + # seconds for the process to quiet. def quiet! signal("TSTP") end + # Signal this process to shutdown. + # It will shutdown within its configured :timeout value, default 25 seconds. + # This method is *asynchronous* and it can take 5-10 + # seconds for the process to start shutting down. def stop! signal("TERM") end + # Signal this process to log backtraces for all threads. + # Useful if you have a frozen or deadlocked process which is + # still sending a heartbeat. + # This method is *asynchronous* and it can take 5-10 seconds. def dump_threads signal("TTIN") end + # @return [Boolean] true if this process is quiet or shutting down def stopping? self["quiet"] == "true" end @@ -964,7 +1108,7 @@ procs.sort.each do |key| valid, workers = conn.pipelined { |pipeline| pipeline.exists?(key) - pipeline.hgetall("#{key}:workers") + pipeline.hgetall("#{key}:work") } next unless valid workers.each_pair do |tid, json| diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/client.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/client.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/client.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/client.rb 2022-10-26 06:38:14.000000000 +0000 @@ -15,7 +15,7 @@ # client.middleware do |chain| # chain.use MyClientMiddleware # end - # client.push('class' => 'SomeWorker', 'args' => [1,2,3]) + # client.push('class' => 'SomeJob', 'args' => [1,2,3]) # # All client instances default to the globally-defined # Sidekiq.client_middleware but you can change as necessary. @@ -49,16 +49,16 @@ # The main method used to push a job to Redis. Accepts a number of options: # # queue - the named queue to use, default 'default' - # class - the worker class to call, required + # class - the job class to call, required # args - an array of simple arguments to the perform method, must be JSON-serializable # at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f) # retry - whether to retry this job if it fails, default true or an integer number of retries # backtrace - whether to save any error backtrace, default false # # If class is set to the class name, the jobs' options will be based on Sidekiq's default - # worker options. Otherwise, they will be based on the job class's options. + # job options. Otherwise, they will be based on the job class's options. # - # Any options valid for a worker class's sidekiq_options are also available here. + # Any options valid for a job class's sidekiq_options are also available here. # # All options must be strings, not symbols. NB: because we are serializing to JSON, all # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of @@ -67,13 +67,15 @@ # Returns a unique Job ID. If middleware stops the job, nil will be returned instead. # # Example: - # push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar']) + # push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar']) # def push(item) normed = normalize_item(item) - payload = process_single(item["class"], normed) - + payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do + normed + end if payload + verify_json(payload) raw_push([payload]) payload["jid"] end @@ -101,12 +103,17 @@ raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) }) raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size + jid = items.delete("jid") + raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1 + normed = normalize_item(items) payloads = args.map.with_index { |job_args, index| copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12)) copy["at"] = (at.is_a?(Array) ? at[index] : at) if at - - result = process_single(items["class"], copy) + result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do + verify_json(copy) + copy + end result || nil }.compact @@ -119,8 +126,8 @@ # # pool = ConnectionPool.new { Redis.new } # Sidekiq::Client.via(pool) do - # SomeWorker.perform_async(1,2,3) - # SomeOtherWorker.perform_async(1,2,3) + # SomeJob.perform_async(1,2,3) + # SomeOtherJob.perform_async(1,2,3) # end # # Generally this is only needed for very large Sidekiq installs processing @@ -145,10 +152,10 @@ end # Resque compatibility helpers. Note all helpers - # should go through Worker#client_push. + # should go through Sidekiq::Job#client_push. # # Example usage: - # Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar') + # Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar') # # Messages are enqueued to the 'default' queue. # @@ -157,14 +164,14 @@ end # Example usage: - # Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar') + # Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar') # def enqueue_to(queue, klass, *args) klass.client_push("queue" => queue, "class" => klass, "args" => args) end # Example usage: - # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar') + # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar') # def enqueue_to_in(queue, interval, klass, *args) int = interval.to_f @@ -178,7 +185,7 @@ end # Example usage: - # Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar') + # Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar') # def enqueue_in(interval, klass, *args) klass.perform_in(interval, *args) @@ -194,7 +201,7 @@ conn.pipelined do |pipeline| atomic_push(pipeline, payloads) end - rescue Redis::BaseError => ex + rescue RedisConnection.adapter::BaseError => ex # 2550 Failover can cause the server to become a replica, need # to disconnect and reopen the socket to get back to the primary. # 4495 Use the same logic if we have a "Not enough replicas" error from the primary @@ -213,7 +220,7 @@ def atomic_push(conn, payloads) if payloads.first.key?("at") - conn.zadd("schedule", payloads.map { |hash| + conn.zadd("schedule", payloads.flat_map { |hash| at = hash.delete("at").to_s [at, Sidekiq.dump_json(hash)] }) @@ -224,17 +231,9 @@ entry["enqueued_at"] = now Sidekiq.dump_json(entry) } - conn.sadd("queues", queue) + conn.sadd("queues", [queue]) conn.lpush("queue:#{queue}", to_push) end end - - def process_single(worker_class, item) - queue = item["queue"] - - middleware.invoke(worker_class, item, queue, @redis_pool) do - item - end - end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/cli.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/cli.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/cli.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/cli.rb 2022-10-26 06:38:14.000000000 +0000 @@ -9,18 +9,23 @@ require "fileutils" require "sidekiq" +require "sidekiq/component" require "sidekiq/launcher" -require "sidekiq/util" -module Sidekiq +module Sidekiq # :nodoc: class CLI - include Util + include Sidekiq::Component include Singleton unless $TESTING attr_accessor :launcher attr_accessor :environment + attr_accessor :config + + def parse(args = ARGV.dup) + @config = Sidekiq + @config[:error_handlers].clear + @config[:error_handlers] << @config.method(:default_error_handler) - def parse(args = ARGV) setup_options(args) initialize_logger validate! @@ -36,7 +41,7 @@ def run(boot_app: true) boot_application if boot_app - if environment == "development" && $stdout.tty? && Sidekiq.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty) + if environment == "development" && $stdout.tty? && @config.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty) print_banner end logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app? @@ -67,7 +72,7 @@ # touch the connection pool so it is created before we # fire startup and start multithreading. - info = Sidekiq.redis_info + info = @config.redis_info ver = info["redis_version"] raise "You are connecting to Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4" @@ -85,22 +90,22 @@ # Since the user can pass us a connection pool explicitly in the initializer, we # need to verify the size is large enough or else Sidekiq's performance is dramatically slowed. - cursize = Sidekiq.redis_pool.size - needed = Sidekiq.options[:concurrency] + 2 + cursize = @config.redis_pool.size + needed = @config[:concurrency] + 2 raise "Your pool of #{cursize} Redis connections is too small, please increase the size to at least #{needed}" if cursize < needed # cache process identity - Sidekiq.options[:identity] = identity + @config[:identity] = identity # Touch middleware so it isn't lazy loaded by multiple threads, #3043 - Sidekiq.server_middleware + @config.server_middleware # Before this point, the process is initializing with just the main thread. # Starting here the process will now have multiple threads running. fire_event(:startup, reverse: false, reraise: true) - logger.debug { "Client Middleware: #{Sidekiq.client_middleware.map(&:klass).join(", ")}" } - logger.debug { "Server Middleware: #{Sidekiq.server_middleware.map(&:klass).join(", ")}" } + logger.debug { "Client Middleware: #{@config.client_middleware.map(&:klass).join(", ")}" } + logger.debug { "Server Middleware: #{@config.server_middleware.map(&:klass).join(", ")}" } launch(self_read) end @@ -110,13 +115,13 @@ logger.info "Starting processing, hit Ctrl-C to stop" end - @launcher = Sidekiq::Launcher.new(options) + @launcher = Sidekiq::Launcher.new(@config) begin launcher.run - while (readable_io = self_read.wait_readable) - signal = readable_io.gets.strip + while self_read.wait_readable + signal = self_read.gets.strip handle_signal(signal) end rescue Interrupt @@ -173,25 +178,25 @@ # Heroku sends TERM and then waits 30 seconds for process to exit. "TERM" => ->(cli) { raise Interrupt }, "TSTP" => ->(cli) { - Sidekiq.logger.info "Received TSTP, no longer accepting new work" + cli.logger.info "Received TSTP, no longer accepting new work" cli.launcher.quiet }, "TTIN" => ->(cli) { Thread.list.each do |thread| - Sidekiq.logger.warn "Thread TID-#{(thread.object_id ^ ::Process.pid).to_s(36)} #{thread.name}" + cli.logger.warn "Thread TID-#{(thread.object_id ^ ::Process.pid).to_s(36)} #{thread.name}" if thread.backtrace - Sidekiq.logger.warn thread.backtrace.join("\n") + cli.logger.warn thread.backtrace.join("\n") else - Sidekiq.logger.warn "" + cli.logger.warn "" end end } } - UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" } + UNHANDLED_SIGNAL_HANDLER = ->(cli) { cli.logger.info "No signal handler registered, ignoring" } SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER def handle_signal(sig) - Sidekiq.logger.debug "Got #{sig} signal" + logger.debug "Got #{sig} signal" SIGNAL_HANDLERS[sig].call(self) end @@ -237,7 +242,7 @@ config_dir = if File.directory?(opts[:require].to_s) File.join(opts[:require], "config") else - File.join(options[:require], "config") + File.join(@config[:require], "config") end %w[sidekiq.yml sidekiq.yml.erb].each do |config_file| @@ -254,27 +259,23 @@ opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"] # merge with defaults - options.merge!(opts) - end - - def options - Sidekiq.options + @config.merge!(opts) end def boot_application ENV["RACK_ENV"] = ENV["RAILS_ENV"] = environment - if File.directory?(options[:require]) + if File.directory?(@config[:require]) require "rails" if ::Rails::VERSION::MAJOR < 5 raise "Sidekiq no longer supports this version of Rails" else require "sidekiq/rails" - require File.expand_path("#{options[:require]}/config/environment.rb") + require File.expand_path("#{@config[:require]}/config/environment.rb") end - options[:tag] ||= default_tag + @config[:tag] ||= default_tag else - require options[:require] + require @config[:require] end end @@ -291,18 +292,18 @@ end def validate! - if !File.exist?(options[:require]) || - (File.directory?(options[:require]) && !File.exist?("#{options[:require]}/config/application.rb")) + if !File.exist?(@config[:require]) || + (File.directory?(@config[:require]) && !File.exist?("#{@config[:require]}/config/application.rb")) logger.info "==================================================================" logger.info " Please point Sidekiq to a Rails application or a Ruby file " - logger.info " to load your worker classes with -r [DIR|FILE]." + logger.info " to load your job classes with -r [DIR|FILE]." logger.info "==================================================================" logger.info @parser die(1) end [:concurrency, :timeout].each do |opt| - raise ArgumentError, "#{opt}: #{options[opt]} is not a valid value" if options.key?(opt) && options[opt].to_i <= 0 + raise ArgumentError, "#{opt}: #{@config[opt]} is not a valid value" if @config[opt].to_i <= 0 end end @@ -336,7 +337,7 @@ parse_queue opts, queue, weight end - o.on "-r", "--require [PATH|DIR]", "Location of Rails application with workers or file to require" do |arg| + o.on "-r", "--require [PATH|DIR]", "Location of Rails application with jobs or file to require" do |arg| opts[:require] = arg end @@ -376,7 +377,7 @@ end def initialize_logger - Sidekiq.logger.level = ::Logger::DEBUG if options[:verbose] + @config.logger.level = ::Logger::DEBUG if @config[:verbose] end def parse_config(path) @@ -425,3 +426,4 @@ end require "sidekiq/systemd" +require "sidekiq/metrics/tracking" if ENV["SIDEKIQ_METRICS_BETA"] diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/component.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/component.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/component.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/component.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,65 @@ +module Sidekiq + ## + # Sidekiq::Component assumes a config instance is available at @config + module Component # :nodoc: + attr_reader :config + + def watchdog(last_words) + yield + rescue Exception => ex + handle_exception(ex, {context: last_words}) + raise ex + end + + def safe_thread(name, &block) + Thread.new do + Thread.current.name = name + watchdog(name, &block) + end + end + + def logger + config.logger + end + + def redis(&block) + config.redis(&block) + end + + def tid + Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36) + end + + def hostname + ENV["DYNO"] || Socket.gethostname + end + + def process_nonce + @@process_nonce ||= SecureRandom.hex(6) + end + + def identity + @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}" + end + + def handle_exception(ex, ctx = {}) + config.handle_exception(ex, ctx) + end + + def fire_event(event, options = {}) + oneshot = options.fetch(:oneshot, true) + reverse = options[:reverse] + reraise = options[:reraise] + + arr = config[:lifecycle_events][event] + arr.reverse! if reverse + arr.each do |block| + block.call + rescue => ex + handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event}) + raise ex if reraise + end + arr.clear if oneshot # once we've fired an event, we never fire it again + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/delay.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/delay.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/delay.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/delay.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,6 +1,6 @@ # frozen_string_literal: true -module Sidekiq +module Sidekiq # :nodoc: module Extensions def self.enable_delay! warn "Sidekiq's Delayed Extensions will be removed in Sidekiq 7.0", uplevel: 1 diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/exception_handler.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/exception_handler.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/exception_handler.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/exception_handler.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -# frozen_string_literal: true - -require "sidekiq" - -module Sidekiq - module ExceptionHandler - class Logger - def call(ex, ctx) - Sidekiq.logger.warn(Sidekiq.dump_json(ctx)) unless ctx.empty? - Sidekiq.logger.warn("#{ex.class.name}: #{ex.message}") - Sidekiq.logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil? - end - - Sidekiq.error_handlers << Sidekiq::ExceptionHandler::Logger.new - end - - def handle_exception(ex, ctx = {}) - Sidekiq.error_handlers.each do |handler| - handler.call(ex, ctx) - rescue => ex - Sidekiq.logger.error "!!! ERROR HANDLER THREW AN ERROR !!!" - Sidekiq.logger.error ex - Sidekiq.logger.error ex.backtrace.join("\n") unless ex.backtrace.nil? - end - end - end -end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/extensions/generic_proxy.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/extensions/generic_proxy.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/extensions/generic_proxy.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/extensions/generic_proxy.rb 2022-10-26 06:38:14.000000000 +0000 @@ -10,7 +10,7 @@ def initialize(performable, target, options = {}) @performable = performable @target = target - @opts = options + @opts = options.transform_keys(&:to_s) end def method_missing(name, *args) diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/fetch.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/fetch.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/fetch.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/fetch.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,14 +1,16 @@ # frozen_string_literal: true require "sidekiq" +require "sidekiq/component" -module Sidekiq +module Sidekiq # :nodoc: class BasicFetch + include Sidekiq::Component # We want the fetch operation to timeout every few seconds so the thread # can check if the process is shutting down. TIMEOUT = 2 - UnitOfWork = Struct.new(:queue, :job) { + UnitOfWork = Struct.new(:queue, :job, :config) { def acknowledge # nothing to do end @@ -18,20 +20,20 @@ end def requeue - Sidekiq.redis do |conn| + config.redis do |conn| conn.rpush(queue, job) end end } - def initialize(options) - raise ArgumentError, "missing queue list" unless options[:queues] - @options = options - @strictly_ordered_queues = !!@options[:strict] - @queues = @options[:queues].map { |q| "queue:#{q}" } + def initialize(config) + raise ArgumentError, "missing queue list" unless config[:queues] + @config = config + @strictly_ordered_queues = !!@config[:strict] + @queues = @config[:queues].map { |q| "queue:#{q}" } if @strictly_ordered_queues @queues.uniq! - @queues << TIMEOUT + @queues << {timeout: TIMEOUT} end end @@ -44,30 +46,30 @@ return nil end - work = Sidekiq.redis { |conn| conn.brpop(*qs) } - UnitOfWork.new(*work) if work + queue, job = redis { |conn| conn.brpop(*qs) } + UnitOfWork.new(queue, job, config) if queue end def bulk_requeue(inprogress, options) return if inprogress.empty? - Sidekiq.logger.debug { "Re-queueing terminated jobs" } + logger.debug { "Re-queueing terminated jobs" } jobs_to_requeue = {} inprogress.each do |unit_of_work| jobs_to_requeue[unit_of_work.queue] ||= [] jobs_to_requeue[unit_of_work.queue] << unit_of_work.job end - Sidekiq.redis do |conn| + redis do |conn| conn.pipelined do |pipeline| jobs_to_requeue.each do |queue, jobs| pipeline.rpush(queue, jobs) end end end - Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis") + logger.info("Pushed #{inprogress.size} jobs back to Redis") rescue => ex - Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}") + logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}") end # Creating the Redis#brpop command takes into account any @@ -81,7 +83,7 @@ else permute = @queues.shuffle permute.uniq! - permute << TIMEOUT + permute << {timeout: TIMEOUT} permute end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/job_retry.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/job_retry.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/job_retry.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/job_retry.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,10 +1,8 @@ # frozen_string_literal: true -require "sidekiq/scheduled" -require "sidekiq/api" - require "zlib" require "base64" +require "sidekiq/component" module Sidekiq ## @@ -25,11 +23,11 @@ # # A job looks like: # - # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true } + # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true } # # The 'retry' option also accepts a number (in place of 'true'): # - # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 } + # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 } # # The job will be retried this number of times before giving up. (If simply # 'true', Sidekiq retries 25 times) @@ -53,11 +51,11 @@ # # Sidekiq.options[:max_retries] = 7 # - # or limit the number of retries for a particular worker and send retries to + # or limit the number of retries for a particular job and send retries to # a low priority queue with: # - # class MyWorker - # include Sidekiq::Worker + # class MyJob + # include Sidekiq::Job # sidekiq_options retry: 10, retry_queue: 'low' # end # @@ -66,17 +64,18 @@ class Skip < Handled; end - include Sidekiq::Util + include Sidekiq::Component DEFAULT_MAX_RETRY_ATTEMPTS = 25 - def initialize(options = {}) - @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS) + def initialize(options) + @config = options + @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS end # The global retry handler requires only the barest of data. # We want to be able to retry as much as possible so we don't - # require the worker to be instantiated. + # require the job to be instantiated. def global(jobstr, queue) yield rescue Handled => ex @@ -90,7 +89,7 @@ msg = Sidekiq.load_json(jobstr) if msg["retry"] - attempt_retry(nil, msg, queue, e) + process_retry(nil, msg, queue, e) else Sidekiq.death_handlers.each do |handler| handler.call(msg, e) @@ -103,14 +102,14 @@ end # The local retry support means that any errors that occur within - # this block can be associated with the given worker instance. + # this block can be associated with the given job instance. # This is required to support the `sidekiq_retries_exhausted` block. # # Note that any exception from the block is wrapped in the Skip # exception so the global block does not reprocess the error. The # Skip exception is unwrapped within Sidekiq::Processor#process before # calling the handle_exception handlers. - def local(worker, jobstr, queue) + def local(jobinst, jobstr, queue) yield rescue Handled => ex raise ex @@ -123,11 +122,11 @@ msg = Sidekiq.load_json(jobstr) if msg["retry"].nil? - msg["retry"] = worker.class.get_sidekiq_options["retry"] + msg["retry"] = jobinst.class.get_sidekiq_options["retry"] end raise e unless msg["retry"] - attempt_retry(worker, msg, queue, e) + process_retry(jobinst, msg, queue, e) # We've handled this error associated with this job, don't # need to handle it at the global level raise Skip @@ -135,10 +134,10 @@ private - # Note that +worker+ can be nil here if an error is raised before we can - # instantiate the worker instance. All access must be guarded and + # Note that +jobinst+ can be nil here if an error is raised before we can + # instantiate the job instance. All access must be guarded and # best effort. - def attempt_retry(worker, msg, queue, exception) + def process_retry(jobinst, msg, queue, exception) max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries) msg["queue"] = (msg["retry_queue"] || queue) @@ -169,24 +168,54 @@ msg["error_backtrace"] = compress_backtrace(lines) end - if count < max_retry_attempts - delay = delay_for(worker, count, exception) - # Logging here can break retries if the logging device raises ENOSPC #3979 - # logger.debug { "Failure! Retry #{count} in #{delay} seconds" } - retry_at = Time.now.to_f + delay - payload = Sidekiq.dump_json(msg) - Sidekiq.redis do |conn| - conn.zadd("retry", retry_at.to_s, payload) - end - else - # Goodbye dear message, you (re)tried your best I'm sure. - retries_exhausted(worker, msg, exception) + # Goodbye dear message, you (re)tried your best I'm sure. + return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts + + strategy, delay = delay_for(jobinst, count, exception) + case strategy + when :discard + return # poof! + when :kill + return retries_exhausted(jobinst, msg, exception) + end + + # Logging here can break retries if the logging device raises ENOSPC #3979 + # logger.debug { "Failure! Retry #{count} in #{delay} seconds" } + jitter = rand(10) * (count + 1) + retry_at = Time.now.to_f + delay + jitter + payload = Sidekiq.dump_json(msg) + redis do |conn| + conn.zadd("retry", retry_at.to_s, payload) + end + end + + # returns (strategy, seconds) + def delay_for(jobinst, count, exception) + rv = begin + # sidekiq_retry_in can return two different things: + # 1. When to retry next, as an integer of seconds + # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default + jobinst&.sidekiq_retry_in_block&.call(count, exception) + rescue Exception => e + handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"}) + nil + end + + delay = (count**4) + 15 + if Integer === rv && rv > 0 + delay = rv + elsif rv == :discard + return [:discard, nil] # do nothing, job goes poof + elsif rv == :kill + return [:kill, nil] end + + [:default, delay] end - def retries_exhausted(worker, msg, exception) + def retries_exhausted(jobinst, msg, exception) begin - block = worker&.sidekiq_retries_exhausted_block + block = jobinst&.sidekiq_retries_exhausted_block block&.call(msg, exception) rescue => e handle_exception(e, {context: "Error calling retries_exhausted", job: msg}) @@ -194,7 +223,7 @@ send_to_morgue(msg) unless msg["dead"] == false - Sidekiq.death_handlers.each do |handler| + config.death_handlers.each do |handler| handler.call(msg, exception) rescue => e handle_exception(e, {context: "Error calling death handler", job: msg}) @@ -204,7 +233,15 @@ def send_to_morgue(msg) logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" } payload = Sidekiq.dump_json(msg) - DeadSet.new.kill(payload, notify_failure: false) + now = Time.now.to_f + + config.redis do |conn| + conn.multi do |xa| + xa.zadd("dead", now.to_s, payload) + xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds]) + xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs]) + end + end end def retry_attempts_from(msg_retry, default) @@ -215,22 +252,6 @@ end end - def delay_for(worker, count, exception) - jitter = rand(10) * (count + 1) - if worker&.sidekiq_retry_in_block - custom_retry_in = retry_in(worker, count, exception).to_i - return custom_retry_in + jitter if custom_retry_in > 0 - end - (count**4) + 15 + jitter - end - - def retry_in(worker, count, exception) - worker.sidekiq_retry_in_block.call(count, exception) - rescue Exception => e - handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"}) - nil - end - def exception_caused_by_shutdown?(e, checked_causes = []) return false unless e.cause diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/job_util.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/job_util.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/job_util.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/job_util.rb 2022-10-26 06:38:14.000000000 +0000 @@ -4,7 +4,8 @@ module Sidekiq module JobUtil # These functions encapsulate various job utilities. - # They must be simple and free from side effects. + + TRANSIENT_ATTRIBUTES = %w[] def validate(item) raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args") @@ -12,16 +13,19 @@ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String) raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric) raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array) + end - if Sidekiq.options[:on_complex_arguments] == :raise + def verify_json(item) + job_class = item["wrapped"] || item["class"] + if Sidekiq[:on_complex_arguments] == :raise msg = <<~EOM - Job arguments to #{item["class"]} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices. + Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices. To disable this error, remove `Sidekiq.strict_args!` from your initializer. EOM raise(ArgumentError, msg) unless json_safe?(item) - elsif Sidekiq.options[:on_complex_arguments] == :warn + elsif Sidekiq[:on_complex_arguments] == :warn Sidekiq.logger.warn <<~EOM unless json_safe?(item) - Job arguments to #{item["class"]} do not serialize to JSON safely. This will raise an error in + Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today by calling `Sidekiq.strict_args!` during Sidekiq initialization. EOM @@ -39,20 +43,22 @@ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == "" + # remove job attributes which aren't necessary to persist into Redis + TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) } + + item["jid"] ||= SecureRandom.hex(12) item["class"] = item["class"].to_s item["queue"] = item["queue"].to_s - item["jid"] ||= SecureRandom.hex(12) item["created_at"] ||= Time.now.to_f - item end def normalized_hash(item_class) if item_class.is_a?(Class) - raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options) + raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options) item_class.get_sidekiq_options else - Sidekiq.default_worker_options + Sidekiq.default_job_options end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/launcher.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/launcher.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/launcher.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/launcher.rb 2022-10-26 06:38:14.000000000 +0000 @@ -3,11 +3,12 @@ require "sidekiq/manager" require "sidekiq/fetch" require "sidekiq/scheduled" +require "sidekiq/ring_buffer" module Sidekiq # The Launcher starts the Manager and Poller threads and provides the process heartbeat. class Launcher - include Util + include Sidekiq::Component STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years @@ -15,18 +16,18 @@ proc { "sidekiq" }, proc { Sidekiq::VERSION }, proc { |me, data| data["tag"] }, - proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" }, + proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" }, proc { |me, data| "stopping" if me.stopping? } ] attr_accessor :manager, :poller, :fetcher def initialize(options) + @config = options options[:fetch] ||= BasicFetch.new(options) @manager = Sidekiq::Manager.new(options) - @poller = Sidekiq::Scheduled::Poller.new + @poller = Sidekiq::Scheduled::Poller.new(options) @done = false - @options = options end def run @@ -43,11 +44,9 @@ @poller.terminate end - # Shuts down the process. This method does not - # return until all work is complete and cleaned up. - # It can take up to the timeout to complete. + # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete. def stop - deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout] + deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout] @done = true @manager.quiet @@ -55,10 +54,10 @@ @manager.stop(deadline) - # Requeue everything in case there was a worker who grabbed work while stopped + # Requeue everything in case there was a thread which fetched a job while the process was stopped. # This call is a no-op in Sidekiq but necessary for Sidekiq Pro. - strategy = @options[:fetch] - strategy.bulk_requeue([], @options) + strategy = @config[:fetch] + strategy.bulk_requeue([], @config) clear_heartbeat end @@ -76,17 +75,19 @@ heartbeat sleep BEAT_PAUSE end - Sidekiq.logger.info("Heartbeat stopping...") + logger.info("Heartbeat stopping...") end def clear_heartbeat + flush_stats + # Remove record from Redis since we are shutting down. # Note we don't stop the heartbeat thread; if the process # doesn't actually exit, it'll reappear in the Web UI. - Sidekiq.redis do |conn| + redis do |conn| conn.pipelined do |pipeline| - pipeline.srem("processes", identity) - pipeline.unlink("#{identity}:workers") + pipeline.srem("processes", [identity]) + pipeline.unlink("#{identity}:work") end end rescue @@ -99,7 +100,7 @@ ❤ end - def self.flush_stats + def flush_stats fails = Processor::FAILURE.reset procd = Processor::PROCESSED.reset return if fails + procd == 0 @@ -123,7 +124,6 @@ Sidekiq.logger.warn("Unable to flush stats: #{ex}") end end - at_exit(&method(:flush_stats)) def ❤ key = identity @@ -132,12 +132,11 @@ begin fails = Processor::FAILURE.reset procd = Processor::PROCESSED.reset - curstate = Processor::WORKER_STATE.dup + curstate = Processor::WORK_STATE.dup - workers_key = "#{key}:workers" nowdate = Time.now.utc.strftime("%Y-%m-%d") - Sidekiq.redis do |conn| + redis do |conn| conn.multi do |transaction| transaction.incrby("stat:processed", procd) transaction.incrby("stat:processed:#{nowdate}", procd) @@ -146,12 +145,16 @@ transaction.incrby("stat:failed", fails) transaction.incrby("stat:failed:#{nowdate}", fails) transaction.expire("stat:failed:#{nowdate}", STATS_TTL) + end - transaction.unlink(workers_key) + # work is the current set of executing jobs + work_key = "#{key}:work" + conn.pipelined do |transaction| + transaction.unlink(work_key) curstate.each_pair do |tid, hash| - transaction.hset(workers_key, tid, Sidekiq.dump_json(hash)) + transaction.hset(work_key, tid, Sidekiq.dump_json(hash)) end - transaction.expire(workers_key, 60) + transaction.expire(work_key, 60) end end @@ -160,15 +163,15 @@ fails = procd = 0 kb = memory_usage(::Process.pid) - _, exists, _, _, msg = Sidekiq.redis { |conn| + _, exists, _, _, msg = redis { |conn| conn.multi { |transaction| - transaction.sadd("processes", key) + transaction.sadd("processes", [key]) transaction.exists?(key) transaction.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "rtt_us", rtt, - "quiet", @done, + "quiet", @done.to_s, "rss", kb) transaction.expire(key, 60) transaction.rpop("#{key}-signals") @@ -177,6 +180,7 @@ # first heartbeat or recovering from an outage and need to reestablish our heartbeat fire_event(:heartbeat) unless exists + fire_event(:beat, oneshot: false) return unless msg @@ -198,7 +202,7 @@ def check_rtt a = b = 0 - Sidekiq.redis do |x| + redis do |x| a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond) x.ping b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond) @@ -209,12 +213,12 @@ # Workable is < 10,000µs # Log a warning if it's a disaster. if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL } - Sidekiq.logger.warn <<~EOM + logger.warn <<~EOM Your Redis network connection is performing extremely poorly. Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000. Ensure Redis is running in the same AZ or datacenter as Sidekiq. If these values are close to 100,000, that means your Sidekiq process may be - CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039 + CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039 EOM RTT_READINGS.reset end @@ -246,10 +250,10 @@ "hostname" => hostname, "started_at" => Time.now.to_f, "pid" => ::Process.pid, - "tag" => @options[:tag] || "", - "concurrency" => @options[:concurrency], - "queues" => @options[:queues].uniq, - "labels" => @options[:labels], + "tag" => @config[:tag] || "", + "concurrency" => @config[:concurrency], + "queues" => @config[:queues].uniq, + "labels" => @config[:labels], "identity" => identity } end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/logger.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/logger.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/logger.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/logger.rb 2022-10-26 06:38:14.000000000 +0000 @@ -18,7 +18,7 @@ end def self.add(k, v) - Thread.current[:sidekiq_context][k] = v + current[k] = v end end @@ -35,24 +35,10 @@ nil end - def debug? - level <= 0 - end - - def info? - level <= 1 - end - - def warn? - level <= 2 - end - - def error? - level <= 3 - end - - def fatal? - level <= 4 + LEVELS.each do |level, numeric_level| + define_method("#{level}?") do + local_level.nil? ? super() : local_level <= numeric_level + end end def local_level diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/manager.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/manager.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/manager.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/manager.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,6 +1,5 @@ # frozen_string_literal: true -require "sidekiq/util" require "sidekiq/processor" require "sidekiq/fetch" require "set" @@ -21,37 +20,34 @@ # the shutdown process. The other tasks are performed by other threads. # class Manager - include Util + include Sidekiq::Component attr_reader :workers - attr_reader :options def initialize(options = {}) + @config = options logger.debug { options.inspect } - @options = options @count = options[:concurrency] || 10 raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1 @done = false @workers = Set.new @count.times do - @workers << Processor.new(self, options) + @workers << Processor.new(@config, &method(:processor_result)) end @plock = Mutex.new end def start - @workers.each do |x| - x.start - end + @workers.each(&:start) end def quiet return if @done @done = true - logger.info { "Terminating quiet workers" } - @workers.each { |x| x.terminate } + logger.info { "Terminating quiet threads" } + @workers.each(&:terminate) fire_event(:quiet, reverse: true) end @@ -65,24 +61,18 @@ sleep PAUSE_TIME return if @workers.empty? - logger.info { "Pausing to allow workers to finish..." } + logger.info { "Pausing to allow jobs to finish..." } wait_for(deadline) { @workers.empty? } return if @workers.empty? hard_shutdown end - def processor_stopped(processor) - @plock.synchronize do - @workers.delete(processor) - end - end - - def processor_died(processor, reason) + def processor_result(processor, reason = nil) @plock.synchronize do @workers.delete(processor) unless @done - p = Processor.new(self, options) + p = Processor.new(@config, &method(:processor_result)) @workers << p p.start end @@ -96,7 +86,7 @@ private def hard_shutdown - # We've reached the timeout and we still have busy workers. + # We've reached the timeout and we still have busy threads. # They must die but their jobs shall live on. cleanup = nil @plock.synchronize do @@ -106,17 +96,17 @@ if cleanup.size > 0 jobs = cleanup.map { |p| p.job }.compact - logger.warn { "Terminating #{cleanup.size} busy worker threads" } - logger.warn { "Work still in progress #{jobs.inspect}" } + logger.warn { "Terminating #{cleanup.size} busy threads" } + logger.debug { "Jobs still in progress #{jobs.inspect}" } # Re-enqueue unfinished jobs # NOTE: You may notice that we may push a job back to redis before - # the worker thread is terminated. This is ok because Sidekiq's + # the thread is terminated. This is ok because Sidekiq's # contract says that jobs are run AT LEAST once. Process termination # is delayed until we're certain the jobs are back in Redis because # it is worse to lose a job than to run it twice. - strategy = @options[:fetch] - strategy.bulk_requeue(jobs, @options) + strategy = @config[:fetch] + strategy.bulk_requeue(jobs, @config) end cleanup.each do |processor| @@ -129,5 +119,18 @@ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3 wait_for(deadline) { @workers.empty? } end + + # hack for quicker development / testing environment #2774 + PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5 + + # Wait for the orblock to be true or the deadline passed. + def wait_for(deadline, &condblock) + remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + while remaining > PAUSE_TIME + return if condblock.call + sleep PAUSE_TIME + remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + end + end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/deploy.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/deploy.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/deploy.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/deploy.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,47 @@ +require "sidekiq" +require "time" + +# This file is designed to be required within the user's +# deployment script; it should need a bare minimum of dependencies. +# +# require "sidekiq/metrics/deploy" +# gitdesc = `git log -1 --format="%h %s"`.strip +# d = Sidekiq::Metrics::Deploy.new +# d.mark(label: gitdesc) +# +# Note that you cannot mark more than once per minute. This is a feature, not a bug. +module Sidekiq + module Metrics + class Deploy + MARK_TTL = 90 * 24 * 60 * 60 # 90 days + + def initialize(pool = Sidekiq.redis_pool) + @pool = pool + end + + def mark(at: Time.now, label: "") + # we need to round the timestamp so that we gracefully + # handle an excepted common error in marking deploys: + # having every process mark its deploy, leading + # to N marks for each deploy. Instead we round the time + # to the minute so that multple marks within that minute + # will all naturally rollup into one mark per minute. + whence = at.utc + floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0) + datecode = floor.strftime("%Y%m%d") + key = "#{datecode}-marks" + @pool.with do |c| + c.pipelined do |pipe| + pipe.hsetnx(key, floor.iso8601, label) + pipe.expire(key, MARK_TTL) + end + end + end + + def fetch(date = Time.now.utc.to_date) + datecode = date.strftime("%Y%m%d") + @pool.with { |c| c.hgetall("#{datecode}-marks") } + end + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/query.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/query.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/query.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/query.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,153 @@ +require "sidekiq" +require "date" +require "set" + +require "sidekiq/metrics/shared" + +module Sidekiq + module Metrics + # Allows caller to query for Sidekiq execution metrics within Redis. + # Caller sets a set of attributes to act as filters. {#fetch} will call + # Redis and return a Hash of results. + # + # NB: all metrics and times/dates are UTC only. We specifically do not + # support timezones. + class Query + def initialize(pool: Sidekiq.redis_pool, now: Time.now) + @time = now.utc + @pool = pool + @klass = nil + end + + # Get metric data for all jobs from the last hour + def top_jobs(minutes: 60) + result = Result.new + + time = @time + redis_results = @pool.with do |conn| + conn.pipelined do |pipe| + minutes.times do |idx| + key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}" + pipe.hgetall key + result.prepend_bucket time + time -= 60 + end + end + end + + time = @time + redis_results.each do |hash| + hash.each do |k, v| + kls, metric = k.split("|") + result.job_results[kls].add_metric metric, time, v.to_i + end + time -= 60 + end + + result.marks = fetch_marks(result.starts_at..result.ends_at) + + result + end + + def for_job(klass, minutes: 60) + result = Result.new + + time = @time + redis_results = @pool.with do |conn| + conn.pipelined do |pipe| + minutes.times do |idx| + key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}" + pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f" + result.prepend_bucket time + time -= 60 + end + end + end + + time = @time + @pool.with do |conn| + redis_results.each do |(ms, p, f)| + result.job_results[klass].add_metric "ms", time, ms.to_i if ms + result.job_results[klass].add_metric "p", time, p.to_i if p + result.job_results[klass].add_metric "f", time, f.to_i if f + result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time) + time -= 60 + end + end + + result.marks = fetch_marks(result.starts_at..result.ends_at) + + result + end + + class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks) + def initialize + super + self.buckets = [] + self.marks = [] + self.job_results = Hash.new { |h, k| h[k] = JobResult.new } + end + + def prepend_bucket(time) + buckets.unshift time.strftime("%H:%M") + self.ends_at ||= time + self.starts_at = time + end + end + + class JobResult < Struct.new(:series, :hist, :totals) + def initialize + super + self.series = Hash.new { |h, k| h[k] = Hash.new(0) } + self.hist = Hash.new { |h, k| h[k] = [] } + self.totals = Hash.new(0) + end + + def add_metric(metric, time, value) + totals[metric] += value + series[metric][time.strftime("%H:%M")] += value + + # Include timing measurements in seconds for convenience + add_metric("s", time, value / 1000.0) if metric == "ms" + end + + def add_hist(time, hist_result) + hist[time.strftime("%H:%M")] = hist_result + end + + def total_avg(metric = "ms") + completed = totals["p"] - totals["f"] + totals[metric].to_f / completed + end + + def series_avg(metric = "ms") + series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result| + completed = series.dig("p", bucket) - series.dig("f", bucket) + result[bucket] = completed == 0 ? 0 : value.to_f / completed + end + end + end + + class MarkResult < Struct.new(:time, :label) + def bucket + time.strftime("%H:%M") + end + end + + private + + def fetch_marks(time_range) + [].tap do |result| + marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") } + + marks.each do |timestamp, label| + time = Time.parse(timestamp) + if time_range.cover? time + result << MarkResult.new(time, label) + end + end + end + end + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/shared.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/shared.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/shared.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/shared.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,94 @@ +require "concurrent" + +module Sidekiq + module Metrics + # TODO Support apps without concurrent-ruby + Counter = ::Concurrent::AtomicFixnum + + # Implements space-efficient but statistically useful histogram storage. + # A precise time histogram stores every time. Instead we break times into a set of + # known buckets and increment counts of the associated time bucket. Even if we call + # the histogram a million times, we'll still only store 26 buckets. + # NB: needs to be thread-safe or resiliant to races. + # + # To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters + # per bucket per klass per minute. It's unlikely that most people will be executing more + # than 1000 job/sec for a full minute of a specific type. + class Histogram + include Enumerable + + # This number represents the maximum milliseconds for this bucket. + # 20 means all job executions up to 20ms, e.g. if a job takes + # 280ms, it'll increment bucket[7]. Note we can track job executions + # up to about 5.5 minutes. After that, it's assumed you're probably + # not too concerned with its performance. + BUCKET_INTERVALS = [ + 20, 30, 45, 65, 100, + 150, 225, 335, 500, 750, + 1100, 1700, 2500, 3800, 5750, + 8500, 13000, 20000, 30000, 45000, + 65000, 100000, 150000, 225000, 335000, + Float::INFINITY # the "maybe your job is too long" bucket + ] + LABELS = [ + "20ms", "30ms", "45ms", "65ms", "100ms", + "150ms", "225ms", "335ms", "500ms", "750ms", + "1.1s", "1.7s", "2.5s", "3.8s", "5.75s", + "8.5s", "13s", "20s", "30s", "45s", + "65s", "100s", "150s", "225s", "335s", + "Slow" + ] + + FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \ + GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \ + GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \ + GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \ + GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \ + GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \ + GET u16 #24 GET u16 #25".split + + def each + buckets.each { |counter| yield counter.value } + end + + def label(idx) + LABELS[idx] + end + + attr_reader :buckets + def initialize(klass) + @klass = klass + @buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new } + end + + def record_time(ms) + index_to_use = BUCKET_INTERVALS.each_index do |idx| + break idx if ms < BUCKET_INTERVALS[idx] + end + + @buckets[index_to_use].increment + end + + def fetch(conn, now = Time.now) + window = now.utc.strftime("%d-%H:%-M") + key = "#{@klass}-#{window}" + conn.bitfield(key, *FETCH) + end + + def persist(conn, now = Time.now) + buckets, @buckets = @buckets, [] + window = now.utc.strftime("%d-%H:%-M") + key = "#{@klass}-#{window}" + cmd = [key, "OVERFLOW", "SAT"] + buckets.each_with_index do |counter, idx| + val = counter.value + cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0 + end + + conn.bitfield(*cmd) if cmd.size > 3 + conn.expire(key, 86400) + key + end + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/tracking.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/tracking.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/metrics/tracking.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/metrics/tracking.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,134 @@ +require "time" +require "sidekiq" +require "sidekiq/metrics/shared" + +# This file contains the components which track execution metrics within Sidekiq. +module Sidekiq + module Metrics + class ExecutionTracker + include Sidekiq::Component + + def initialize(config) + @config = config + @jobs = Hash.new(0) + @totals = Hash.new(0) + @grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) } + @lock = Mutex.new + end + + def track(queue, klass) + start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond) + time_ms = 0 + begin + begin + yield + ensure + finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond) + time_ms = finish - start + end + # We don't track time for failed jobs as they can have very unpredictable + # execution times. more important to know average time for successful jobs so we + # can better recognize when a perf regression is introduced. + @lock.synchronize { + @grams[klass].record_time(time_ms) + @jobs["#{klass}|ms"] += time_ms + @totals["ms"] += time_ms + } + rescue Exception + @lock.synchronize { + @jobs["#{klass}|f"] += 1 + @totals["f"] += 1 + } + raise + ensure + @lock.synchronize { + @jobs["#{klass}|p"] += 1 + @totals["p"] += 1 + } + end + end + + LONG_TERM = 90 * 24 * 60 * 60 + MID_TERM = 7 * 24 * 60 * 60 + SHORT_TERM = 8 * 60 * 60 + + def flush(time = Time.now) + totals, jobs, grams = reset + procd = totals["p"] + fails = totals["f"] + return if procd == 0 && fails == 0 + + now = time.utc + nowdate = now.strftime("%Y%m%d") + nowhour = now.strftime("%Y%m%d|%-H") + nowmin = now.strftime("%Y%m%d|%-H:%-M") + count = 0 + + redis do |conn| + if grams.size > 0 + conn.pipelined do |pipe| + grams.each do |_, gram| + gram.persist(pipe, now) + end + end + end + + [ + ["j", jobs, nowdate, LONG_TERM], + ["j", jobs, nowhour, MID_TERM], + ["j", jobs, nowmin, SHORT_TERM] + ].each do |prefix, data, bucket, ttl| + # Quietly seed the new 7.0 stats format so migration is painless. + conn.pipelined do |xa| + stats = "#{prefix}|#{bucket}" + # logger.debug "Flushing metrics #{stats}" + data.each_pair do |key, value| + xa.hincrby stats, key, value + count += 1 + end + xa.expire(stats, ttl) + end + end + logger.info "Flushed #{count} metrics" + count + end + end + + private + + def reset + @lock.synchronize { + array = [@totals, @jobs, @grams] + @totals = Hash.new(0) + @jobs = Hash.new(0) + @grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) } + array + } + end + end + + class Middleware + include Sidekiq::ServerMiddleware + + def initialize(options) + @exec = options + end + + def call(_instance, hash, queue, &block) + @exec.track(queue, hash["wrapped"] || hash["class"], &block) + end + end + end +end + +if ENV["SIDEKIQ_METRICS_BETA"] == "1" + Sidekiq.configure_server do |config| + exec = Sidekiq::Metrics::ExecutionTracker.new(config) + config.server_middleware do |chain| + chain.add Sidekiq::Metrics::Middleware, exec + end + config.on(:beat) do + exec.flush + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/chain.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/chain.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/chain.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/chain.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,82 +1,102 @@ # frozen_string_literal: true +require "sidekiq/middleware/modules" + module Sidekiq # Middleware is code configured to run before/after - # a message is processed. It is patterned after Rack + # a job is processed. It is patterned after Rack # middleware. Middleware exists for the client side # (pushing jobs onto the queue) as well as the server # side (when jobs are actually processed). # + # Callers will register middleware Classes and Sidekiq will + # create new instances of the middleware for every job. This + # is important so that instance state is not shared accidentally + # between job executions. + # # To add middleware for the client: # - # Sidekiq.configure_client do |config| - # config.client_middleware do |chain| - # chain.add MyClientHook + # Sidekiq.configure_client do |config| + # config.client_middleware do |chain| + # chain.add MyClientHook + # end # end - # end # # To modify middleware for the server, just call # with another block: # - # Sidekiq.configure_server do |config| - # config.server_middleware do |chain| - # chain.add MyServerHook - # chain.remove ActiveRecord + # Sidekiq.configure_server do |config| + # config.server_middleware do |chain| + # chain.add MyServerHook + # chain.remove ActiveRecord + # end # end - # end # # To insert immediately preceding another entry: # - # Sidekiq.configure_client do |config| - # config.client_middleware do |chain| - # chain.insert_before ActiveRecord, MyClientHook + # Sidekiq.configure_client do |config| + # config.client_middleware do |chain| + # chain.insert_before ActiveRecord, MyClientHook + # end # end - # end # # To insert immediately after another entry: # - # Sidekiq.configure_client do |config| - # config.client_middleware do |chain| - # chain.insert_after ActiveRecord, MyClientHook + # Sidekiq.configure_client do |config| + # config.client_middleware do |chain| + # chain.insert_after ActiveRecord, MyClientHook + # end # end - # end # # This is an example of a minimal server middleware: # - # class MyServerHook - # def call(worker_instance, msg, queue) - # puts "Before work" - # yield - # puts "After work" + # class MyServerHook + # include Sidekiq::ServerMiddleware + # + # def call(job_instance, msg, queue) + # logger.info "Before job" + # redis {|conn| conn.get("foo") } # do something in Redis + # yield + # logger.info "After job" + # end # end - # end # # This is an example of a minimal client middleware, note # the method must return the result or the job will not push # to Redis: # - # class MyClientHook - # def call(worker_class, msg, queue, redis_pool) - # puts "Before push" - # result = yield - # puts "After push" - # result + # class MyClientHook + # include Sidekiq::ClientMiddleware + # + # def call(job_class, msg, queue, redis_pool) + # logger.info "Before push" + # result = yield + # logger.info "After push" + # result + # end # end - # end # module Middleware class Chain include Enumerable + # A unique instance of the middleware chain is created for + # each job executed in order to be thread-safe. + # @param copy [Sidekiq::Middleware::Chain] New instance of Chain + # @returns nil def initialize_copy(copy) copy.instance_variable_set(:@entries, entries.dup) + nil end + # Iterate through each middleware in the chain def each(&block) entries.each(&block) end - def initialize + # @api private + def initialize(config = nil) # :nodoc: + @config = config @entries = nil yield self if block_given? end @@ -85,38 +105,55 @@ @entries ||= [] end + # Remove all middleware matching the given Class + # @param klass [Class] def remove(klass) entries.delete_if { |entry| entry.klass == klass } end + # Add the given middleware to the end of the chain. + # Sidekiq will call `klass.new(*args)` to create a clean + # copy of your middleware for every job executed. + # + # chain.add(Statsd::Metrics, { collector: "localhost:8125" }) + # + # @param klass [Class] Your middleware class + # @param *args [Array] Set of arguments to pass to every instance of your middleware def add(klass, *args) remove(klass) - entries << Entry.new(klass, *args) + entries << Entry.new(@config, klass, *args) end + # Identical to {#add} except the middleware is added to the front of the chain. def prepend(klass, *args) remove(klass) - entries.insert(0, Entry.new(klass, *args)) + entries.insert(0, Entry.new(@config, klass, *args)) end + # Inserts +newklass+ before +oldklass+ in the chain. + # Useful if one middleware must run before another middleware. def insert_before(oldklass, newklass, *args) i = entries.index { |entry| entry.klass == newklass } - new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i) + new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i) i = entries.index { |entry| entry.klass == oldklass } || 0 entries.insert(i, new_entry) end + # Inserts +newklass+ after +oldklass+ in the chain. + # Useful if one middleware must run after another middleware. def insert_after(oldklass, newklass, *args) i = entries.index { |entry| entry.klass == newklass } - new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i) + new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i) i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1 entries.insert(i + 1, new_entry) end + # @return [Boolean] if the given class is already in the chain def exists?(klass) any? { |entry| entry.klass == klass } end + # @return [Boolean] if the chain contains no middleware def empty? @entries.nil? || @entries.empty? end @@ -129,6 +166,8 @@ entries.clear end + # Used by Sidekiq to execute the middleware at runtime + # @api private def invoke(*args) return yield if empty? @@ -146,16 +185,21 @@ private + # Represents each link in the middleware chain + # @api private class Entry attr_reader :klass - def initialize(klass, *args) + def initialize(config, klass, *args) + @config = config @klass = klass @args = args end def make_new - @klass.new(*@args) + x = @klass.new(*@args) + x.config = @config if @config && x.respond_to?(:config=) + x end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/current_attributes.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/current_attributes.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/current_attributes.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/current_attributes.rb 2022-10-26 06:38:14.000000000 +0000 @@ -15,22 +15,28 @@ # module CurrentAttributes class Save + include Sidekiq::ClientMiddleware + def initialize(cattr) @klass = cattr end def call(_, job, _, _) attrs = @klass.attributes - if job.has_key?("cattr") - job["cattr"].merge!(attrs) - else - job["cattr"] = attrs + if attrs.any? + if job.has_key?("cattr") + job["cattr"].merge!(attrs) + else + job["cattr"] = attrs + end end yield end end class Load + include Sidekiq::ServerMiddleware + def initialize(cattr) @klass = cattr end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/i18n.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/i18n.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/i18n.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/i18n.rb 2022-10-26 06:38:14.000000000 +0000 @@ -10,16 +10,18 @@ # Get the current locale and store it in the message # to be sent to Sidekiq. class Client - def call(_worker, msg, _queue, _redis) - msg["locale"] ||= I18n.locale + include Sidekiq::ClientMiddleware + def call(_jobclass, job, _queue, _redis) + job["locale"] ||= I18n.locale yield end end # Pull the msg locale out and set the current thread to use it. class Server - def call(_worker, msg, _queue, &block) - I18n.with_locale(msg.fetch("locale", I18n.default_locale), &block) + include Sidekiq::ServerMiddleware + def call(_jobclass, job, _queue, &block) + I18n.with_locale(job.fetch("locale", I18n.default_locale), &block) end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/modules.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/modules.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/middleware/modules.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/middleware/modules.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,21 @@ +module Sidekiq + # Server-side middleware must import this Module in order + # to get access to server resources during `call`. + module ServerMiddleware + attr_accessor :config + def redis_pool + config.redis_pool + end + + def logger + config.logger + end + + def redis(&block) + config.redis(&block) + end + end + + # no difference for now + ClientMiddleware = ServerMiddleware +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/monitor.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/monitor.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/monitor.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/monitor.rb 2022-10-26 06:38:14.000000000 +0000 @@ -17,7 +17,7 @@ end send(section) rescue => e - puts "Couldn't get status: #{e}" + abort "Couldn't get status: #{e}" end def all diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/paginator.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/paginator.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/paginator.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/paginator.rb 2022-10-26 06:38:14.000000000 +0000 @@ -19,9 +19,9 @@ total_size, items = conn.multi { |transaction| transaction.zcard(key) if rev - transaction.zrevrange(key, starting, ending, with_scores: true) + transaction.zrevrange(key, starting, ending, withscores: true) else - transaction.zrange(key, starting, ending, with_scores: true) + transaction.zrange(key, starting, ending, withscores: true) end } [current_page, total_size, items] diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/processor.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/processor.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/processor.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/processor.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,6 +1,5 @@ # frozen_string_literal: true -require "sidekiq/util" require "sidekiq/fetch" require "sidekiq/job_logger" require "sidekiq/job_retry" @@ -11,33 +10,34 @@ # # 1. fetches a job from Redis # 2. executes the job - # a. instantiate the Worker + # a. instantiate the job class # b. run the middleware chain # c. call #perform # - # A Processor can exit due to shutdown (processor_stopped) - # or due to an error during job execution (processor_died) + # A Processor can exit due to shutdown or due to + # an error during job execution. # # If an error occurs in the job execution, the # Processor calls the Manager to create a new one # to replace itself and exits. # class Processor - include Util + include Sidekiq::Component attr_reader :thread attr_reader :job - def initialize(mgr, options) - @mgr = mgr + def initialize(options, &block) + @callback = block @down = false @done = false @job = nil @thread = nil + @config = options @strategy = options[:fetch] @reloader = options[:reloader] || proc { |&block| block.call } @job_logger = (options[:job_logger] || Sidekiq::JobLogger).new - @retrier = Sidekiq::JobRetry.new + @retrier = Sidekiq::JobRetry.new(options) end def terminate(wait = false) @@ -66,26 +66,26 @@ def run process_one until @done - @mgr.processor_stopped(self) + @callback.call(self) rescue Sidekiq::Shutdown - @mgr.processor_stopped(self) + @callback.call(self) rescue Exception => ex - @mgr.processor_died(self, ex) + @callback.call(self, ex) end - def process_one + def process_one(&block) @job = fetch process(@job) if @job @job = nil end def get_one - work = @strategy.retrieve_work + uow = @strategy.retrieve_work if @down logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" } @down = nil end - work + uow rescue Sidekiq::Shutdown rescue => ex handle_fetch_exception(ex) @@ -130,10 +130,10 @@ # Effectively this block denotes a "unit of work" to Rails. @reloader.call do klass = constantize(job_hash["class"]) - worker = klass.new - worker.jid = job_hash["jid"] - @retrier.local(worker, jobstr, queue) do - yield worker + inst = klass.new + inst.jid = job_hash["jid"] + @retrier.local(inst, jobstr, queue) do + yield inst end end end @@ -142,9 +142,9 @@ end end - def process(work) - jobstr = work.job - queue = work.queue_name + def process(uow) + jobstr = uow.job + queue = uow.queue_name # Treat malformed JSON as a special case: job goes straight to the morgue. job_hash = nil @@ -152,16 +152,22 @@ job_hash = Sidekiq.load_json(jobstr) rescue => ex handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr}) - # we can't notify because the job isn't a valid hash payload. - DeadSet.new.kill(jobstr, notify_failure: false) - return work.acknowledge + now = Time.now.to_f + config.redis do |conn| + conn.multi do |xa| + xa.zadd("dead", now.to_s, jobstr) + xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds]) + xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs]) + end + end + return uow.acknowledge end ack = false begin - dispatch(job_hash, queue, jobstr) do |worker| - Sidekiq.server_middleware.invoke(worker, job_hash, queue) do - execute_job(worker, job_hash["args"]) + dispatch(job_hash, queue, jobstr) do |inst| + @config.server_middleware.invoke(inst, job_hash, queue) do + execute_job(inst, job_hash["args"]) end end ack = true @@ -174,7 +180,7 @@ # signals that we created a retry successfully. We can acknowlege the job. ack = true e = h.cause || h - handle_exception(e, {context: "Job raised exception", job: job_hash, jobstr: jobstr}) + handle_exception(e, {context: "Job raised exception", job: job_hash}) raise e rescue Exception => ex # Unexpected error! This is very bad and indicates an exception that got past @@ -186,14 +192,14 @@ if ack # We don't want a shutdown signal to interrupt job acknowledgment. Thread.handle_interrupt(Sidekiq::Shutdown => :never) do - work.acknowledge + uow.acknowledge end end end end - def execute_job(worker, cloned_args) - worker.perform(*cloned_args) + def execute_job(inst, cloned_args) + inst.perform(*cloned_args) end # Ruby doesn't provide atomic counters out of the box so we'll @@ -219,39 +225,39 @@ end # jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here - class SharedWorkerState + class SharedWorkState def initialize - @worker_state = {} + @work_state = {} @lock = Mutex.new end def set(tid, hash) - @lock.synchronize { @worker_state[tid] = hash } + @lock.synchronize { @work_state[tid] = hash } end def delete(tid) - @lock.synchronize { @worker_state.delete(tid) } + @lock.synchronize { @work_state.delete(tid) } end def dup - @lock.synchronize { @worker_state.dup } + @lock.synchronize { @work_state.dup } end def size - @lock.synchronize { @worker_state.size } + @lock.synchronize { @work_state.size } end def clear - @lock.synchronize { @worker_state.clear } + @lock.synchronize { @work_state.clear } end end PROCESSED = Counter.new FAILURE = Counter.new - WORKER_STATE = SharedWorkerState.new + WORK_STATE = SharedWorkState.new def stats(jobstr, queue) - WORKER_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i}) + WORK_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i}) begin yield @@ -259,7 +265,7 @@ FAILURE.incr raise ensure - WORKER_STATE.delete(tid) + WORK_STATE.delete(tid) PROCESSED.incr end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/rails.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/rails.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/rails.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/rails.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,6 +1,6 @@ # frozen_string_literal: true -require "sidekiq/worker" +require "sidekiq/job" module Sidekiq class Rails < ::Rails::Engine @@ -33,28 +33,35 @@ # end initializer "sidekiq.active_job_integration" do ActiveSupport.on_load(:active_job) do - include ::Sidekiq::Worker::Options unless respond_to?(:sidekiq_options) + include ::Sidekiq::Job::Options unless respond_to?(:sidekiq_options) end end initializer "sidekiq.rails_logger" do - Sidekiq.configure_server do |_| - # This is the integration code necessary so that if code uses `Rails.logger.info "Hello"`, + Sidekiq.configure_server do |config| + # This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`, # it will appear in the Sidekiq console with all of the job context. See #5021 and # https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84 - unless ::Rails.logger == ::Sidekiq.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout) - ::Rails.logger.extend(::ActiveSupport::Logger.broadcast(::Sidekiq.logger)) + unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout) + ::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger)) end end end + config.before_configuration do + dep = ActiveSupport::Deprecation.new("7.0", "Sidekiq") + dep.deprecate_methods(Sidekiq.singleton_class, + default_worker_options: :default_job_options, + "default_worker_options=": :default_job_options=) + end + # This hook happens after all initializers are run, just before returning # from config/environment.rb back to sidekiq/cli.rb. # # None of this matters on the client-side, only within the Sidekiq process itself. config.after_initialize do - Sidekiq.configure_server do |_| - Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new + Sidekiq.configure_server do |config| + config[:reloader] = Sidekiq::Rails::Reloader.new end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/redis_client_adapter.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/redis_client_adapter.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/redis_client_adapter.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/redis_client_adapter.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require "connection_pool" +require "redis_client" +require "redis_client/decorator" +require "uri" + +module Sidekiq + class RedisClientAdapter + BaseError = RedisClient::Error + CommandError = RedisClient::CommandError + + module CompatMethods + def info + @client.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h } + end + + def evalsha(sha, keys, argv) + @client.call("EVALSHA", sha, keys.size, *keys, *argv) + end + + def brpoplpush(*args) + @client.blocking_call(false, "BRPOPLPUSH", *args) + end + + def brpop(*args) + @client.blocking_call(false, "BRPOP", *args) + end + + def set(*args) + @client.call("SET", *args) { |r| r == "OK" } + end + ruby2_keywords :set if respond_to?(:ruby2_keywords, true) + + def sismember(*args) + @client.call("SISMEMBER", *args) { |c| c > 0 } + end + + def exists?(key) + @client.call("EXISTS", key) { |c| c > 0 } + end + + private + + def method_missing(*args, &block) + @client.call(*args, *block) + end + ruby2_keywords :method_missing if respond_to?(:ruby2_keywords, true) + + def respond_to_missing?(name, include_private = false) + super # Appease the linter. We can't tell what is a valid command. + end + end + + CompatClient = RedisClient::Decorator.create(CompatMethods) + + class CompatClient + %i[scan sscan zscan hscan].each do |method| + alias_method :"#{method}_each", method + undef_method method + end + + def disconnect! + @client.close + end + + def connection + {id: @client.id} + end + + def redis + self + end + + def _client + @client + end + + def message + yield nil, @queue.pop + end + + # NB: this method does not return + def subscribe(chan) + @queue = ::Queue.new + + pubsub = @client.pubsub + pubsub.call("subscribe", chan) + + loop do + evt = pubsub.next_event + next if evt.nil? + next unless evt[0] == "message" && evt[1] == chan + + (_, _, msg) = evt + @queue << msg + yield self + end + end + end + + def initialize(options) + opts = client_opts(options) + @config = if opts.key?(:sentinels) + RedisClient.sentinel(**opts) + else + RedisClient.config(**opts) + end + end + + def new_client + CompatClient.new(@config.new_client) + end + + private + + def client_opts(options) + opts = options.dup + + if opts[:namespace] + Sidekiq.logger.error("Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature isn't supported by redis-client. " \ + "Either use the redis adapter or remove the namespace.") + Kernel.exit(-127) + end + + opts.delete(:size) + opts.delete(:pool_timeout) + + if opts[:network_timeout] + opts[:timeout] = opts[:network_timeout] + opts.delete(:network_timeout) + end + + if opts[:driver] + opts[:driver] = opts[:driver].to_sym + end + + opts[:name] = opts.delete(:master_name) if opts.key?(:master_name) + opts[:role] = opts[:role].to_sym if opts.key?(:role) + opts.delete(:url) if opts.key?(:sentinels) + + # Issue #3303, redis-rb will silently retry an operation. + # This can lead to duplicate jobs if Sidekiq::Client's LPUSH + # is performed twice but I believe this is much, much rarer + # than the reconnect silently fixing a problem; we keep it + # on by default. + opts[:reconnect_attempts] ||= 1 + + opts + end + end +end + +Sidekiq::RedisConnection.adapter = Sidekiq::RedisClientAdapter diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/redis_connection.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/redis_connection.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/redis_connection.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/redis_connection.rb 2022-10-26 06:38:14.000000000 +0000 @@ -5,8 +5,79 @@ require "uri" module Sidekiq - class RedisConnection + module RedisConnection + class RedisAdapter + BaseError = Redis::BaseError + CommandError = Redis::CommandError + + def initialize(options) + warn("Usage of the 'redis' gem within Sidekiq itself is deprecated, Sidekiq 7.0 will only use the new, simpler 'redis-client' gem", caller) if ENV["SIDEKIQ_REDIS_CLIENT"] == "1" + @options = options + end + + def new_client + namespace = @options[:namespace] + + client = Redis.new client_opts(@options) + if namespace + begin + require "redis/namespace" + Redis::Namespace.new(namespace, redis: client) + rescue LoadError + Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \ + "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.") + exit(-127) + end + else + client + end + end + + private + + def client_opts(options) + opts = options.dup + if opts[:namespace] + opts.delete(:namespace) + end + + if opts[:network_timeout] + opts[:timeout] = opts[:network_timeout] + opts.delete(:network_timeout) + end + + # Issue #3303, redis-rb will silently retry an operation. + # This can lead to duplicate jobs if Sidekiq::Client's LPUSH + # is performed twice but I believe this is much, much rarer + # than the reconnect silently fixing a problem; we keep it + # on by default. + opts[:reconnect_attempts] ||= 1 + + opts + end + end + + @adapter = RedisAdapter + class << self + attr_reader :adapter + + # RedisConnection.adapter = :redis + # RedisConnection.adapter = :redis_client + def adapter=(adapter) + raise "no" if adapter == self + result = case adapter + when :redis + RedisAdapter + when Class + adapter + else + require "sidekiq/#{adapter}_adapter" + nil + end + @adapter = result if result + end + def create(options = {}) symbolized_options = options.transform_keys(&:to_sym) @@ -19,26 +90,27 @@ elsif Sidekiq.server? # Give ourselves plenty of connections. pool is lazy # so we won't create them until we need them. - Sidekiq.options[:concurrency] + 5 + Sidekiq[:concurrency] + 5 elsif ENV["RAILS_MAX_THREADS"] Integer(ENV["RAILS_MAX_THREADS"]) else 5 end - verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server? + verify_sizing(size, Sidekiq[:concurrency]) if Sidekiq.server? pool_timeout = symbolized_options[:pool_timeout] || 1 log_info(symbolized_options) + redis_config = adapter.new(symbolized_options) ConnectionPool.new(timeout: pool_timeout, size: size) do - build_client(symbolized_options) + redis_config.new_client end end private - # Sidekiq needs a lot of concurrent Redis connections. + # Sidekiq needs many concurrent Redis connections. # # We need a connection for each Processor. # We need a connection for Pro's real-time change listener @@ -47,48 +119,7 @@ # - enterprise's leader election # - enterprise's cron support def verify_sizing(size, concurrency) - raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2) - end - - def build_client(options) - namespace = options[:namespace] - - client = Redis.new client_opts(options) - if namespace - begin - require "redis/namespace" - Redis::Namespace.new(namespace, redis: client) - rescue LoadError - Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \ - "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.") - exit(-127) - end - else - client - end - end - - def client_opts(options) - opts = options.dup - if opts[:namespace] - opts.delete(:namespace) - end - - if opts[:network_timeout] - opts[:timeout] = opts[:network_timeout] - opts.delete(:network_timeout) - end - - opts[:driver] ||= Redis::Connection.drivers.last || "ruby" - - # Issue #3303, redis-rb will silently retry an operation. - # This can lead to duplicate jobs if Sidekiq::Client's LPUSH - # is performed twice but I believe this is much, much rarer - # than the reconnect silently fixing a problem; we keep it - # on by default. - opts[:reconnect_attempts] ||= 1 - - opts + raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2) end def log_info(options) @@ -110,9 +141,9 @@ sentinel[:password] = redacted if sentinel[:password] end if Sidekiq.server? - Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}") + Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with #{adapter.name} options #{scrubbed_options}") else - Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}") + Sidekiq.logger.debug("#{Sidekiq::NAME} client with #{adapter.name} options #{scrubbed_options}") end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/ring_buffer.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/ring_buffer.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/ring_buffer.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/ring_buffer.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,29 @@ +require "forwardable" + +module Sidekiq + class RingBuffer + include Enumerable + extend Forwardable + def_delegators :@buf, :[], :each, :size + + def initialize(size, default = 0) + @size = size + @buf = Array.new(size, default) + @index = 0 + end + + def <<(element) + @buf[@index % @size] = element + @index += 1 + element + end + + def buffer + @buf + end + + def reset(default = 0) + @buf.fill(default) + end + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/scheduled.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/scheduled.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/scheduled.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/scheduled.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,8 +1,7 @@ # frozen_string_literal: true require "sidekiq" -require "sidekiq/util" -require "sidekiq/api" +require "sidekiq/component" module Sidekiq module Scheduled @@ -52,8 +51,8 @@ @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE) end - conn.evalsha(@lua_zpopbyscore_sha, keys: keys, argv: argv) - rescue Redis::CommandError => e + conn.evalsha(@lua_zpopbyscore_sha, keys, argv) + rescue RedisConnection.adapter::CommandError => e raise unless e.message.start_with?("NOSCRIPT") @lua_zpopbyscore_sha = nil @@ -67,12 +66,13 @@ # just pops the job back onto its original queue so the # workers can pick it up like any other job. class Poller - include Util + include Sidekiq::Component INITIAL_WAIT = 10 - def initialize - @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new + def initialize(options) + @config = options + @enq = (options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new @sleeper = ConnectionPool::TimedStack.new @done = false @thread = nil @@ -100,7 +100,7 @@ enqueue wait end - Sidekiq.logger.info("Scheduler exiting...") + logger.info("Scheduler exiting...") } end @@ -147,13 +147,16 @@ # As we run more processes, the scheduling interval average will approach an even spread # between 0 and poll interval so we don't need this artifical boost. # - if process_count < 10 + count = process_count + interval = poll_interval_average(count) + + if count < 10 # For small clusters, calculate a random interval that is ±50% the desired average. - poll_interval_average * rand + poll_interval_average.to_f / 2 + interval * rand + interval.to_f / 2 else # With 10+ processes, we should have enough randomness to get decent polling # across the entire timespan - poll_interval_average * rand + interval * rand end end @@ -170,38 +173,64 @@ # the same time: the thundering herd problem. # # We only do this if poll_interval_average is unset (the default). - def poll_interval_average - Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval + def poll_interval_average(count) + @config[:poll_interval_average] || scaled_poll_interval(count) end # Calculates an average poll interval based on the number of known Sidekiq processes. # This minimizes a single point of failure by dispersing check-ins but without taxing # Redis if you run many Sidekiq processes. - def scaled_poll_interval - process_count * Sidekiq.options[:average_scheduled_poll_interval] + def scaled_poll_interval(process_count) + process_count * @config[:average_scheduled_poll_interval] end def process_count - # The work buried within Sidekiq::ProcessSet#cleanup can be - # expensive at scale. Cut it down by 90% with this counter. - # NB: This method is only called by the scheduler thread so we - # don't need to worry about the thread safety of +=. - pcount = Sidekiq::ProcessSet.new(@count_calls % 10 == 0).size + pcount = Sidekiq.redis { |conn| conn.scard("processes") } pcount = 1 if pcount == 0 - @count_calls += 1 pcount end + # A copy of Sidekiq::ProcessSet#cleanup because server + # should never depend on sidekiq/api. + def cleanup + # dont run cleanup more than once per minute + return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) } + + count = 0 + Sidekiq.redis do |conn| + procs = conn.sscan_each("processes").to_a + heartbeats = conn.pipelined { |pipeline| + procs.each do |key| + pipeline.hget(key, "info") + end + } + + # the hash named key has an expiry of 60 seconds. + # if it's not found, that means the process has not reported + # in to Redis and probably died. + to_prune = procs.select.with_index { |proc, i| + heartbeats[i].nil? + } + count = conn.srem("processes", to_prune) unless to_prune.empty? + end + count + end + def initial_wait - # Have all processes sleep between 5-15 seconds. 10 seconds - # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number + # Have all processes sleep between 5-15 seconds. 10 seconds to give time for + # the heartbeat to register (if the poll interval is going to be calculated by the number # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time. total = 0 - total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average] + total += INITIAL_WAIT unless @config[:poll_interval_average] total += (5 * rand) @sleeper.pop(total) rescue Timeout::Error + ensure + # periodically clean out the `processes` set in Redis which can collect + # references to dead processes over time. The process count affects how + # often we scan for scheduled jobs. + cleanup end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/testing/inline.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/testing/inline.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/testing/inline.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/testing/inline.rb 2022-10-26 06:38:14.000000000 +0000 @@ -4,7 +4,7 @@ ## # The Sidekiq inline infrastructure overrides perform_async so that it -# actually calls perform instead. This allows workers to be run inline in a +# actually calls perform instead. This allows jobs to be run inline in a # testing environment. # # This is similar to `Resque.inline = true` functionality. @@ -15,8 +15,8 @@ # # $external_variable = 0 # -# class ExternalWorker -# include Sidekiq::Worker +# class ExternalJob +# include Sidekiq::Job # # def perform # $external_variable = 1 @@ -24,7 +24,7 @@ # end # # assert_equal 0, $external_variable -# ExternalWorker.perform_async +# ExternalJob.perform_async # assert_equal 1, $external_variable # Sidekiq::Testing.inline! diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/testing.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/testing.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/testing.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/testing.rb 2022-10-26 06:38:14.000000000 +0000 @@ -101,20 +101,20 @@ ## # The Queues class is only for testing the fake queue implementation. # There are 2 data structures involved in tandem. This is due to the - # Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference + # Rspec syntax of change(HardJob.jobs, :size). It keeps a reference # to the array. Because the array was dervied from a filter of the total # jobs enqueued, it appeared as though the array didn't change. # # To solve this, we'll keep 2 hashes containing the jobs. One with keys based - # on the queue, and another with keys of the worker names, so the array for - # QueueWorker.jobs is a straight reference to a real array. + # on the queue, and another with keys of the job type, so the array for + # HardJob.jobs is a straight reference to a real array. # # Queue-based hash: # # { # "default"=>[ # { - # "class"=>"TestTesting::QueueWorker", + # "class"=>"TestTesting::HardJob", # "args"=>[1, 2], # "retry"=>true, # "queue"=>"default", @@ -124,12 +124,12 @@ # ] # } # - # Worker-based hash: + # Job-based hash: # # { - # "TestTesting::QueueWorker"=>[ + # "TestTesting::HardJob"=>[ # { - # "class"=>"TestTesting::QueueWorker", + # "class"=>"TestTesting::HardJob", # "args"=>[1, 2], # "retry"=>true, # "queue"=>"default", @@ -144,14 +144,14 @@ # require 'sidekiq/testing' # # assert_equal 0, Sidekiq::Queues["default"].size - # HardWorker.perform_async(:something) + # HardJob.perform_async(:something) # assert_equal 1, Sidekiq::Queues["default"].size # assert_equal :something, Sidekiq::Queues["default"].first['args'][0] # - # You can also clear all workers' jobs: + # You can also clear all jobs: # # assert_equal 0, Sidekiq::Queues["default"].size - # HardWorker.perform_async(:something) + # HardJob.perform_async(:something) # Sidekiq::Queues.clear_all # assert_equal 0, Sidekiq::Queues["default"].size # @@ -170,35 +170,36 @@ def push(queue, klass, job) jobs_by_queue[queue] << job - jobs_by_worker[klass] << job + jobs_by_class[klass] << job end def jobs_by_queue @jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] } end - def jobs_by_worker - @jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] } + def jobs_by_class + @jobs_by_class ||= Hash.new { |hash, key| hash[key] = [] } end + alias_method :jobs_by_worker, :jobs_by_class def delete_for(jid, queue, klass) jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid } - jobs_by_worker[klass].delete_if { |job| job["jid"] == jid } + jobs_by_class[klass].delete_if { |job| job["jid"] == jid } end def clear_for(queue, klass) - jobs_by_queue[queue].clear - jobs_by_worker[klass].clear + jobs_by_queue[queue.to_s].clear + jobs_by_class[klass].clear end def clear_all jobs_by_queue.clear - jobs_by_worker.clear + jobs_by_class.clear end end end - module Worker + module Job ## # The Sidekiq testing infrastructure overrides perform_async # so that it does not actually touch the network. Instead it @@ -212,16 +213,16 @@ # # require 'sidekiq/testing' # - # assert_equal 0, HardWorker.jobs.size - # HardWorker.perform_async(:something) - # assert_equal 1, HardWorker.jobs.size - # assert_equal :something, HardWorker.jobs[0]['args'][0] + # assert_equal 0, HardJob.jobs.size + # HardJob.perform_async(:something) + # assert_equal 1, HardJob.jobs.size + # assert_equal :something, HardJob.jobs[0]['args'][0] # # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size # MyMailer.delay.send_welcome_email('foo@example.com') # assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size # - # You can also clear and drain all workers' jobs: + # You can also clear and drain all job types: # # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size # assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size @@ -241,14 +242,14 @@ # # RSpec.configure do |config| # config.before(:each) do - # Sidekiq::Worker.clear_all + # Sidekiq::Job.clear_all # end # end # # or for acceptance testing, i.e. with cucumber: # # AfterStep do - # Sidekiq::Worker.drain_all + # Sidekiq::Job.drain_all # end # # When I sign up as "foo@example.com" @@ -262,7 +263,7 @@ # Jobs queued for this worker def jobs - Queues.jobs_by_worker[to_s] + Queues.jobs_by_class[to_s] end # Clear all jobs for this worker @@ -288,11 +289,11 @@ end def process_job(job) - worker = new - worker.jid = job["jid"] - worker.bid = job["bid"] if worker.respond_to?(:bid=) - Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do - execute_job(worker, job["args"]) + inst = new + inst.jid = job["jid"] + inst.bid = job["bid"] if inst.respond_to?(:bid=) + Sidekiq::Testing.server_middleware.invoke(inst, job, job["queue"]) do + execute_job(inst, job["args"]) end end @@ -306,18 +307,18 @@ Queues.jobs_by_queue.values.flatten end - # Clear all queued jobs across all workers + # Clear all queued jobs def clear_all Queues.clear_all end - # Drain all queued jobs across all workers + # Drain (execute) all queued jobs def drain_all while jobs.any? - worker_classes = jobs.map { |job| job["class"] }.uniq + job_classes = jobs.map { |job| job["class"] }.uniq - worker_classes.each do |worker_class| - Sidekiq::Testing.constantize(worker_class).drain + job_classes.each do |job_class| + Sidekiq::Testing.constantize(job_class).drain end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/transaction_aware_client.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/transaction_aware_client.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/transaction_aware_client.rb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/transaction_aware_client.rb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "securerandom" +require "sidekiq/client" + +module Sidekiq + class TransactionAwareClient + def initialize(redis_pool) + @redis_client = Client.new(redis_pool) + end + + def push(item) + # pre-allocate the JID so we can return it immediately and + # save it to the database as part of the transaction. + item["jid"] ||= SecureRandom.hex(12) + AfterCommitEverywhere.after_commit { @redis_client.push(item) } + item["jid"] + end + + ## + # We don't provide transactionality for push_bulk because we don't want + # to hold potentially hundreds of thousands of job records in memory due to + # a long running enqueue process. + def push_bulk(items) + @redis_client.push_bulk(items) + end + end +end + +## +# Use `Sidekiq.transactional_push!` in your sidekiq.rb initializer +module Sidekiq + def self.transactional_push! + begin + require "after_commit_everywhere" + rescue LoadError + Sidekiq.logger.error("You need to add after_commit_everywhere to your Gemfile to use Sidekiq's transactional client") + raise + end + + default_job_options["client_class"] = Sidekiq::TransactionAwareClient + Sidekiq::JobUtil::TRANSIENT_ATTRIBUTES << "client_class" + true + end +end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/util.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/util.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/util.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/util.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# frozen_string_literal: true - -require "forwardable" -require "socket" -require "securerandom" -require "sidekiq/exception_handler" - -module Sidekiq - ## - # This module is part of Sidekiq core and not intended for extensions. - # - - class RingBuffer - include Enumerable - extend Forwardable - def_delegators :@buf, :[], :each, :size - - def initialize(size, default = 0) - @size = size - @buf = Array.new(size, default) - @index = 0 - end - - def <<(element) - @buf[@index % @size] = element - @index += 1 - element - end - - def buffer - @buf - end - - def reset(default = 0) - @buf.fill(default) - end - end - - module Util - include ExceptionHandler - - # hack for quicker development / testing environment #2774 - PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5 - - # Wait for the orblock to be true or the deadline passed. - def wait_for(deadline, &condblock) - remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - while remaining > PAUSE_TIME - return if condblock.call - sleep PAUSE_TIME - remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - end - end - - def watchdog(last_words) - yield - rescue Exception => ex - handle_exception(ex, {context: last_words}) - raise ex - end - - def safe_thread(name, &block) - Thread.new do - Thread.current.name = name - watchdog(name, &block) - end - end - - def logger - Sidekiq.logger - end - - def redis(&block) - Sidekiq.redis(&block) - end - - def tid - Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36) - end - - def hostname - ENV["DYNO"] || Socket.gethostname - end - - def process_nonce - @@process_nonce ||= SecureRandom.hex(6) - end - - def identity - @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}" - end - - def fire_event(event, options = {}) - reverse = options[:reverse] - reraise = options[:reraise] - - arr = Sidekiq.options[:lifecycle_events][event] - arr.reverse! if reverse - arr.each do |block| - block.call - rescue => ex - handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event}) - raise ex if reraise - end - arr.clear - end - end -end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/version.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/version.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/version.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/version.rb 2022-10-26 06:38:14.000000000 +0000 @@ -1,5 +1,5 @@ # frozen_string_literal: true module Sidekiq - VERSION = "6.4.1" + VERSION = "6.5.7" end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/action.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/action.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/action.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/action.rb 2022-10-26 06:38:14.000000000 +0000 @@ -15,11 +15,11 @@ end def halt(res) - throw :halt, [res, {"Content-Type" => "text/plain"}, [res.to_s]] + throw :halt, [res, {"content-type" => "text/plain"}, [res.to_s]] end def redirect(location) - throw :halt, [302, {"Location" => "#{request.base_url}#{location}"}, []] + throw :halt, [302, {"location" => "#{request.base_url}#{location}"}, []] end def params @@ -68,7 +68,7 @@ end def json(payload) - [200, {"Content-Type" => "application/json", "Cache-Control" => "private, no-store"}, [Sidekiq.dump_json(payload)]] + [200, {"content-type" => "application/json", "cache-control" => "private, no-store"}, [Sidekiq.dump_json(payload)]] end def initialize(env, block) diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/application.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/application.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/application.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/application.rb 2022-10-26 06:38:14.000000000 +0000 @@ -60,6 +60,19 @@ erb(:dashboard) end + get "/metrics" do + q = Sidekiq::Metrics::Query.new + @query_result = q.top_jobs + erb(:metrics) + end + + get "/metrics/:name" do + @name = route_params[:name] + q = Sidekiq::Metrics::Query.new + @query_result = q.for_job(@name) + erb(:metrics_for_job) + end + get "/busy" do erb(:busy) end @@ -299,7 +312,7 @@ def call(env) action = self.class.match(env) - return [404, {"Content-Type" => "text/plain", "X-Cascade" => "pass"}, ["Not Found"]] unless action + return [404, {"content-type" => "text/plain", "x-cascade" => "pass"}, ["Not Found"]] unless action app = @klass resp = catch(:halt) do @@ -316,10 +329,10 @@ else # rendered content goes here headers = { - "Content-Type" => "text/html", - "Cache-Control" => "private, no-store", - "Content-Language" => action.locale, - "Content-Security-Policy" => CSP_HEADER + "content-type" => "text/html", + "cache-control" => "private, no-store", + "content-language" => action.locale, + "content-security-policy" => CSP_HEADER } # we'll let Rack calculate Content-Length for us. [200, headers, [resp]] diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/csrf_protection.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/csrf_protection.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/csrf_protection.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/csrf_protection.rb 2022-10-26 06:38:14.000000000 +0000 @@ -143,7 +143,7 @@ one_time_pad = SecureRandom.random_bytes(token.length) encrypted_token = xor_byte_strings(one_time_pad, token) masked_token = one_time_pad + encrypted_token - Base64.strict_encode64(masked_token) + Base64.urlsafe_encode64(masked_token) end # Essentially the inverse of +mask_token+. @@ -169,7 +169,7 @@ end def decode_token(token) - Base64.strict_decode64(token) + Base64.urlsafe_decode64(token) end def xor_byte_strings(s1, s2) diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/helpers.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/helpers.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web/helpers.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web/helpers.rb 2022-10-26 06:38:14.000000000 +0000 @@ -15,7 +15,7 @@ # so extensions can be localized @strings[lang] ||= settings.locales.each_with_object({}) do |path, global| find_locale_files(lang).each do |file| - strs = YAML.load(File.open(file)) + strs = YAML.safe_load(File.open(file)) global.merge!(strs[lang]) end end @@ -140,14 +140,37 @@ params[:direction] == "asc" ? "↑" : "↓" end - def workers - @workers ||= Sidekiq::Workers.new + def workset + @work ||= Sidekiq::WorkSet.new end def processes @processes ||= Sidekiq::ProcessSet.new end + # Sorts processes by hostname following the natural sort order so that + # 'worker.1' < 'worker.2' < 'worker.10' < 'worker.20' + # '2.1.1.1' < '192.168.0.2' < '192.168.0.10' + def sorted_processes + @sorted_processes ||= begin + return processes unless processes.all? { |p| p["hostname"] } + + split_characters = /[._-]+/ + + padding = processes.flat_map { |p| p["hostname"].split(split_characters) }.map(&:size).max + + processes.to_a.sort_by do |process| + process["hostname"].split(split_characters).map do |substring| + # Left-pad the substring with '0' if it starts with a number or 'a' + # otherwise, so that '25' < 192' < 'a' ('025' < '192' < 'aaa') + padding_char = substring[0].match?(/\d/) ? "0" : "a" + + substring.rjust(padding, padding_char) + end + end + end + end + def stats @stats ||= Sidekiq::Stats.new end @@ -175,7 +198,7 @@ end def current_status - workers.size == 0 ? "idle" : "active" + workset.size == 0 ? "idle" : "active" end def relative_time(time) @@ -301,7 +324,7 @@ end def environment_title_prefix - environment = Sidekiq.options[:environment] || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development" + environment = Sidekiq[:environment] || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development" "[#{environment.upcase}] " unless environment == "production" end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/web.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/web.rb 2022-10-26 06:38:14.000000000 +0000 @@ -33,6 +33,10 @@ "Dead" => "morgue" } + if ENV["SIDEKIQ_METRICS_BETA"] == "1" + DEFAULT_TABS["Metrics"] = "metrics" + end + class << self def settings self @@ -144,7 +148,7 @@ m = middlewares rules = [] - rules = [[:all, {"Cache-Control" => "public, max-age=86400"}]] unless ENV["SIDEKIQ_WEB_TESTING"] + rules = [[:all, {"cache-control" => "public, max-age=86400"}]] unless ENV["SIDEKIQ_WEB_TESTING"] ::Rack::Builder.new do use Rack::Static, urls: ["/stylesheets", "/images", "/javascripts"], diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/worker.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/worker.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq/worker.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq/worker.rb 2022-10-26 06:38:14.000000000 +0000 @@ -82,7 +82,7 @@ end def get_sidekiq_options # :nodoc: - self.sidekiq_options_hash ||= Sidekiq.default_worker_options + self.sidekiq_options_hash ||= Sidekiq.default_job_options end def sidekiq_class_attribute(*attrs) @@ -175,16 +175,18 @@ def initialize(klass, opts) @klass = klass - @opts = opts + # NB: the internal hash always has stringified keys + @opts = opts.transform_keys(&:to_s) # ActiveJob compatibility - interval = @opts.delete(:wait_until) || @opts.delete(:wait) + interval = @opts.delete("wait_until") || @opts.delete("wait") at(interval) if interval end def set(options) - interval = options.delete(:wait_until) || options.delete(:wait) - @opts.merge!(options) + hash = options.transform_keys(&:to_s) + interval = hash.delete("wait_until") || @opts.delete("wait") + @opts.merge!(hash) at(interval) if interval self end @@ -200,7 +202,7 @@ # Explicit inline execution of a job. Returns nil if the job did not # execute, true otherwise. def perform_inline(*args) - raw = @opts.merge("args" => args, "class" => @klass).transform_keys(&:to_s) + raw = @opts.merge("args" => args, "class" => @klass) # validate and normalize payload item = normalize_item(raw) @@ -235,11 +237,9 @@ alias_method :perform_sync, :perform_inline def perform_bulk(args, batch_size: 1_000) - hash = @opts.transform_keys(&:to_s) - pool = Thread.current[:sidekiq_via_pool] || @klass.get_sidekiq_options["pool"] || Sidekiq.redis_pool - client = Sidekiq::Client.new(pool) + client = @klass.build_client result = args.each_slice(batch_size).flat_map do |slice| - client.push_bulk(hash.merge("class" => @klass, "args" => slice)) + client.push_bulk(@opts.merge("class" => @klass, "args" => slice)) end result.is_a?(Enumerator::Lazy) ? result.force : result @@ -293,6 +293,7 @@ def perform_inline(*args) Setter.new(self, {}).perform_inline(*args) end + alias_method :perform_sync, :perform_inline ## # Push a large number of jobs to Redis, while limiting the batch of @@ -339,7 +340,7 @@ # Legal options: # # queue - use a named queue for this Worker, default 'default' - # retry - enable the RetryJobs middleware for this Worker, *true* to use the default + # retry - enable retries via JobRetry, *true* to use the default # or *Integer* count # backtrace - whether to save any error backtrace in the retry payload to display in web UI, # can be true, false or an integer number of lines to save, default *false* @@ -347,15 +348,22 @@ # # In practice, any option is allowed. This is the main mechanism to configure the # options for a specific job. + # + # These options will be saved into the serialized job when enqueued by + # the client. def sidekiq_options(opts = {}) super end def client_push(item) # :nodoc: - pool = Thread.current[:sidekiq_via_pool] || get_sidekiq_options["pool"] || Sidekiq.redis_pool - stringified_item = item.transform_keys(&:to_s) + raise ArgumentError, "Job payloads should contain no Symbols: #{item}" if item.any? { |k, v| k.is_a?(::Symbol) } + build_client.push(item) + end - Sidekiq::Client.new(pool).push(stringified_item) + def build_client # :nodoc: + pool = Thread.current[:sidekiq_via_pool] || get_sidekiq_options["pool"] || Sidekiq.redis_pool + client_class = get_sidekiq_options["client_class"] || Sidekiq::Client + client_class.new(pool) end end end diff -Nru ruby-sidekiq-6.4.1+dfsg/lib/sidekiq.rb ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq.rb --- ruby-sidekiq-6.4.1+dfsg/lib/sidekiq.rb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/lib/sidekiq.rb 2022-10-26 06:38:14.000000000 +0000 @@ -5,6 +5,7 @@ require "sidekiq/logger" require "sidekiq/client" +require "sidekiq/transaction_aware_client" require "sidekiq/worker" require "sidekiq/job" require "sidekiq/redis_connection" @@ -33,18 +34,16 @@ startup: [], quiet: [], shutdown: [], - heartbeat: [] + # triggers when we fire the first heartbeat on startup OR repairing a network partition + heartbeat: [], + # triggers on EVERY heartbeat call, every 10 seconds + beat: [] }, dead_max_jobs: 10_000, dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months reloader: proc { |&block| block.call } } - DEFAULT_WORKER_OPTIONS = { - "retry" => true, - "queue" => "default" - } - FAKE_INFO = { "redis_version" => "9.9.9", "uptime_in_days" => "9999", @@ -57,19 +56,84 @@ puts "Calm down, yo." end + # config.concurrency = 5 + def self.concurrency=(val) + self[:concurrency] = Integer(val) + end + + # config.queues = %w( high default low ) # strict + # config.queues = %w( high,3 default,2 low,1 ) # weighted + # config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random + # + # With weighted priority, queue will be checked first (weight / total) of the time. + # high will be checked first (3/6) or 50% of the time. + # I'd recommend setting weights between 1-10. Weights in the hundreds or thousands + # are ridiculous and unnecessarily expensive. You can get random queue ordering + # by explicitly setting all weights to 1. + def self.queues=(val) + self[:queues] = Array(val).each_with_object([]) do |qstr, memo| + name, weight = qstr.split(",") + self[:strict] = false if weight.to_i > 0 + [weight.to_i, 1].max.times do + memo << name + end + end + end + + ### Private APIs + def self.default_error_handler(ex, ctx) + logger.warn(dump_json(ctx)) unless ctx.empty? + logger.warn("#{ex.class.name}: #{ex.message}") + logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil? + end + + # DEFAULT_ERROR_HANDLER is a constant that allows the default error handler to + # be referenced. It must be defined here, after the default_error_handler + # method is defined. + DEFAULT_ERROR_HANDLER = method(:default_error_handler) + + @config = DEFAULTS.dup def self.options - @options ||= DEFAULTS.dup + logger.warn "`config.options[:key] = value` is deprecated, use `config[:key] = value`: #{caller(1..2)}" + @config end def self.options=(opts) - @options = opts + logger.warn "config.options = hash` is deprecated, use `config.merge!(hash)`: #{caller(1..2)}" + @config = opts end + def self.[](key) + @config[key] + end + + def self.[]=(key, val) + @config[key] = val + end + + def self.merge!(hash) + @config.merge!(hash) + end + + def self.fetch(*args, &block) + @config.fetch(*args, &block) + end + + def self.handle_exception(ex, ctx = {}) + self[:error_handlers].each do |handler| + handler.call(ex, ctx) + rescue => ex + logger.error "!!! ERROR HANDLER THREW AN ERROR !!!" + logger.error ex + logger.error ex.backtrace.join("\n") unless ex.backtrace.nil? + end + end + ### + ## # Configuration for Sidekiq server, use like: # # Sidekiq.configure_server do |config| - # config.redis = { :namespace => 'myapp', :size => 25, :url => 'redis://myhost:8877/0' } # config.server_middleware do |chain| # chain.add MyServerHook # end @@ -82,7 +146,7 @@ # Configuration for Sidekiq client, use like: # # Sidekiq.configure_client do |config| - # config.redis = { :namespace => 'myapp', :size => 1, :url => 'redis://myhost:8877/0' } + # config.redis = { size: 1, url: 'redis://myhost:8877/0' } # end def self.configure_client yield self unless server? @@ -98,7 +162,7 @@ retryable = true begin yield conn - rescue Redis::BaseError => ex + rescue RedisConnection.adapter::BaseError => ex # 2550 Failover can cause the server to become a replica, need # to disconnect and reopen the socket to get back to the primary. # 4495 Use the same logic if we have a "Not enough replicas" error from the primary @@ -123,7 +187,7 @@ else conn.info end - rescue Redis::CommandError => ex + rescue RedisConnection.adapter::CommandError => ex # 2850 return fake version when INFO command has (probably) been renamed raise unless /unknown command/.match?(ex.message) FAKE_INFO @@ -131,19 +195,19 @@ end def self.redis_pool - @redis ||= Sidekiq::RedisConnection.create + @redis ||= RedisConnection.create end def self.redis=(hash) @redis = if hash.is_a?(ConnectionPool) hash else - Sidekiq::RedisConnection.create(hash) + RedisConnection.create(hash) end end def self.client_middleware - @client_chain ||= Middleware::Chain.new + @client_chain ||= Middleware::Chain.new(self) yield @client_chain if block_given? @client_chain end @@ -155,16 +219,23 @@ end def self.default_server_middleware - Middleware::Chain.new + Middleware::Chain.new(self) + end + + def self.default_worker_options=(hash) # deprecated + @default_job_options = default_job_options.merge(hash.transform_keys(&:to_s)) end - def self.default_worker_options=(hash) - # stringify - @default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s)) + def self.default_job_options=(hash) + @default_job_options = default_job_options.merge(hash.transform_keys(&:to_s)) end - def self.default_worker_options - defined?(@default_worker_options) ? @default_worker_options : DEFAULT_WORKER_OPTIONS + def self.default_worker_options # deprecated + @default_job_options ||= {"retry" => true, "queue" => "default"} + end + + def self.default_job_options + @default_job_options ||= {"retry" => true, "queue" => "default"} end ## @@ -177,7 +248,7 @@ # end # end def self.death_handlers - options[:death_handlers] + self[:death_handlers] end def self.load_json(string) @@ -202,7 +273,7 @@ end def self.logger - @logger ||= Sidekiq::Logger.new($stdout, level: Logger::INFO) + @logger ||= Sidekiq::Logger.new($stdout, level: :info) end def self.logger=(logger) @@ -220,13 +291,17 @@ defined?(Sidekiq::Pro) end + def self.ent? + defined?(Sidekiq::Enterprise) + end + # How frequently Redis should be checked by a random Sidekiq process for # scheduled and retriable jobs. Each individual process will take turns by # waiting some multiple of this value. # # See sidekiq/scheduled.rb for an in-depth explanation of this value def self.average_scheduled_poll_interval=(interval) - options[:average_scheduled_poll_interval] = interval + self[:average_scheduled_poll_interval] = interval end # Register a proc to handle any error which occurs within the Sidekiq process. @@ -237,7 +312,7 @@ # # The default error handler logs errors to Sidekiq.logger. def self.error_handlers - options[:error_handlers] + self[:error_handlers] end # Register a block to run at a point in the Sidekiq lifecycle. @@ -250,20 +325,20 @@ # end def self.on(event, &block) raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol) - raise ArgumentError, "Invalid event name: #{event}" unless options[:lifecycle_events].key?(event) - options[:lifecycle_events][event] << block + raise ArgumentError, "Invalid event name: #{event}" unless self[:lifecycle_events].key?(event) + self[:lifecycle_events][event] << block end def self.strict_args!(mode = :raise) - options[:on_complex_arguments] = mode + self[:on_complex_arguments] = mode end - # We are shutting down Sidekiq but what about workers that + # We are shutting down Sidekiq but what about threads that # are working on some long job? This error is - # raised in workers that have not finished within the hard + # raised in jobs that have not finished within the hard # timeout limit. This is needed to rollback db transactions, # otherwise Ruby's Thread#kill will commit. See #377. - # DO NOT RESCUE THIS ERROR IN YOUR WORKERS + # DO NOT RESCUE THIS ERROR IN YOUR JOBS class Shutdown < Interrupt; end end diff -Nru ruby-sidekiq-6.4.1+dfsg/README.md ruby-sidekiq-6.5.7+dfsg3/README.md --- ruby-sidekiq-6.4.1+dfsg/README.md 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/README.md 2022-10-26 06:38:14.000000000 +0000 @@ -36,7 +36,7 @@ Installation ----------------- - gem install sidekiq + bundle add sidekiq Getting Started diff -Nru ruby-sidekiq-6.4.1+dfsg/sidekiq.gemspec ruby-sidekiq-6.5.7+dfsg3/sidekiq.gemspec --- ruby-sidekiq-6.4.1+dfsg/sidekiq.gemspec 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/sidekiq.gemspec 2022-10-26 06:38:14.000000000 +0000 @@ -22,7 +22,7 @@ "source_code_uri" => "https://github.com/mperham/sidekiq" } - gem.add_dependency "redis", ">= 4.2.0" - gem.add_dependency "connection_pool", ">= 2.2.2" + gem.add_dependency "redis", "<5", ">= 4.5.0" + gem.add_dependency "connection_pool", ">= 2.2.5" gem.add_dependency "rack", "~> 2.0" end diff -Nru ruby-sidekiq-6.4.1+dfsg/web/assets/javascripts/metrics.js ruby-sidekiq-6.5.7+dfsg3/web/assets/javascripts/metrics.js --- ruby-sidekiq-6.4.1+dfsg/web/assets/javascripts/metrics.js 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/assets/javascripts/metrics.js 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,262 @@ +if (window.matchMedia('(prefers-color-scheme: dark)').matches) { + Chart.defaults.borderColor = "#333" + Chart.defaults.color = "#aaa" +} + +class BaseChart { + constructor(id, options) { + this.ctx = document.getElementById(id); + this.options = options + this.fallbackColor = "#999"; + this.colors = [ + // Colors taken from https://www.chartjs.org/docs/latest/samples/utils.html + "#537bc4", + "#4dc9f6", + "#f67019", + "#f53794", + "#acc236", + "#166a8f", + "#00a950", + "#58595b", + "#8549ba", + "#991b1b", + ]; + + this.chart = new Chart(this.ctx, { + type: this.options.chartType, + data: { labels: this.options.labels, datasets: this.datasets }, + options: this.chartOptions, + }); + } + + addMarksToChart() { + this.options.marks.forEach(([bucket, label], i) => { + this.chart.options.plugins.annotation.annotations[`deploy-${i}`] = { + type: "line", + xMin: bucket, + xMax: bucket, + borderColor: "rgba(220, 38, 38, 0.4)", + borderWidth: 2, + }; + }); + } +} + +class JobMetricsOverviewChart extends BaseChart { + constructor(id, options) { + super(id, { ...options, chartType: "line" }); + this.swatches = []; + + this.addMarksToChart(); + this.chart.update(); + } + + registerSwatch(id) { + const el = document.getElementById(id); + el.onchange = () => this.toggle(el.value, el.checked); + this.swatches[el.value] = el; + this.updateSwatch(el.value); + } + + updateSwatch(kls) { + const el = this.swatches[kls]; + const ds = this.chart.data.datasets.find((ds) => ds.label == kls); + el.checked = !!ds; + el.style.color = ds ? ds.borderColor : null; + } + + toggle(kls, visible) { + if (visible) { + this.chart.data.datasets.push(this.dataset(kls)); + } else { + const i = this.chart.data.datasets.findIndex((ds) => ds.label == kls); + this.colors.unshift(this.chart.data.datasets[i].borderColor); + this.chart.data.datasets.splice(i, 1); + } + + this.updateSwatch(kls); + this.chart.update(); + } + + dataset(kls) { + const color = this.colors.shift() || this.fallbackColor; + + return { + label: kls, + data: this.options.series[kls], + borderColor: color, + backgroundColor: color, + borderWidth: 2, + pointRadius: 2, + }; + } + + get datasets() { + return Object.entries(this.options.series) + .filter(([kls, _]) => this.options.visible.includes(kls)) + .map(([kls, _]) => this.dataset(kls)); + } + + get chartOptions() { + return { + aspectRatio: 4, + scales: { + y: { + beginAtZero: true, + title: { + text: "Total execution time (sec)", + display: true, + }, + }, + }, + interaction: { + mode: "x", + }, + plugins: { + legend: { + display: false, + }, + tooltip: { + callbacks: { + title: (items) => `${items[0].label} UTC`, + label: (item) => + `${item.dataset.label}: ${item.parsed.y.toFixed(1)} seconds`, + footer: (items) => { + const bucket = items[0].label; + const marks = this.options.marks.filter(([b, _]) => b == bucket); + return marks.map(([b, msg]) => `Deploy: ${msg}`); + }, + }, + }, + }, + }; + } +} + +class HistTotalsChart extends BaseChart { + constructor(id, options) { + super(id, { ...options, chartType: "bar" }); + } + + get datasets() { + return [{ + data: this.options.series, + backgroundColor: this.colors[0], + borderWidth: 0, + }]; + } + + get chartOptions() { + return { + aspectRatio: 6, + scales: { + y: { + beginAtZero: true, + title: { + text: "Total jobs", + display: true, + }, + }, + }, + interaction: { + mode: "x", + }, + plugins: { + legend: { + display: false, + }, + tooltip: { + callbacks: { + label: (item) => `${item.parsed.y} jobs`, + }, + }, + }, + }; + } +} + +class HistBubbleChart extends BaseChart { + constructor(id, options) { + super(id, { ...options, chartType: "bubble" }); + + this.addMarksToChart(); + this.chart.update(); + } + + get datasets() { + const data = []; + let maxCount = 0; + + Object.entries(this.options.hist).forEach(([bucket, hist]) => { + hist.forEach((count, histBucket) => { + if (count > 0) { + data.push({ + x: bucket, + // histogram data is ordered fastest to slowest, but this.histIntervals is + // slowest to fastest (so it displays correctly on the chart). + y: + this.options.histIntervals[this.options.histIntervals.length - 1 - histBucket] / + 1000, + count: count, + }); + + if (count > maxCount) maxCount = count; + } + }); + }); + + // Chart.js will not calculate the bubble size. We have to do that. + const maxRadius = this.ctx.offsetWidth / this.options.labels.length; + const minRadius = 1 + const multiplier = (maxRadius / maxCount) * 1.5; + data.forEach((entry) => { + entry.r = entry.count * multiplier + minRadius; + }); + + return [{ + data: data, + backgroundColor: "#537bc4", + borderColor: "#537bc4", + }]; + } + + get chartOptions() { + return { + aspectRatio: 3, + scales: { + x: { + type: "category", + labels: this.options.labels, + }, + y: { + title: { + text: "Execution time (sec)", + display: true, + }, + }, + }, + interaction: { + mode: "x", + }, + plugins: { + legend: { + display: false, + }, + tooltip: { + callbacks: { + title: (items) => `${items[0].raw.x} UTC`, + label: (item) => + `${item.parsed.y} seconds: ${item.raw.count} job${ + item.raw.count == 1 ? "" : "s" + }`, + footer: (items) => { + const bucket = items[0].raw.x; + const marks = this.options.marks.filter(([b, _]) => b == bucket); + return marks.map(([b, msg]) => `Deploy: ${msg}`); + }, + }, + }, + }, + }; + } +} diff -Nru ruby-sidekiq-6.4.1+dfsg/web/assets/stylesheets/application.css ruby-sidekiq-6.5.7+dfsg3/web/assets/stylesheets/application.css --- ruby-sidekiq-6.4.1+dfsg/web/assets/stylesheets/application.css 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/assets/stylesheets/application.css 2022-10-26 06:38:14.000000000 +0000 @@ -67,10 +67,15 @@ padding: 0 20px; } -h3 { +h1, h2, h3 { + font-size: 24px; line-height: 45px; } +.header-with-subheader h2 { + margin-top: -18px; +} + .centered { text-align: center; } @@ -202,8 +207,7 @@ .navbar .navbar-brand .status { color: #585454; - display: inline-block; - width: 75px; + display: inline; } @@ -955,3 +959,41 @@ padding: 3px 7px; margin-left: 5px; } + +.metrics-swatch-wrapper { + display: flex; + align-items: center; + gap: 6px; +} + +.metrics-swatch[type=checkbox] { + display: inline-block; + width: 16px; + height: 16px; + margin: 0; + border-radius: 2px; + appearance: none; + -webkit-appearance: none; + -moz-appearance: none; + border: 1px solid #bbb; + color: white; + background-color: currentColor; +} + +/* We need to add the checkmark since we've taken over the appearance */ +.metrics-swatch[type=checkbox]:checked { + border-color: currentColor; + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); + background-size: 100% 100%; + background-position: center; + background-repeat: no-repeat; +} + +.metrics-swatch[type=checkbox]:focus { + outline: 1px solid #888; + outline-offset: 2px; +} + +canvas { + margin: 20px 0 30px; +} diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/el.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/el.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/el.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/el.yml 2022-10-26 06:38:14.000000000 +0000 @@ -6,11 +6,12 @@ Namespace: Namespace Realtime: Τρέχουσα Κατάσταση History: Ιστορικό - Busy: Απασχολημένο - Processed: Επεξεργάστηκε - Failed: Απέτυχε - Scheduled: Προγραματίστηκε - Retries: Προσπάθειες + Busy: Υπό επεξεργασία + Utilization: Σε χρήση + Processed: Επεξεργάστηκαν + Failed: Απέτυχαν + Scheduled: Προγραμματισμένα + Retries: Επαναλήψεις Enqueued: Μπήκαν στην στοίβα Worker: Εργάτης LivePoll: Τρέχουσα Κατάσταση @@ -20,40 +21,42 @@ Job: Εργασία Arguments: Ορίσματα Extras: Extras - Started: Ξεκίνησαν + Started: Ξεκίνησε ShowAll: Εμφάνιση Όλων CurrentMessagesInQueue: Τρέχουσες εργασίες %{queue} Delete: Διαγραφή AddToQueue: Προσθήκη στην στοίβα - AreYouSureDeleteJob: Θέλετε να διαγράψετε την εργασία αυτη; - AreYouSureDeleteQueue: Θέλετε να διαγράψετε την %{queue} στοίβα? + AreYouSureDeleteJob: Θέλετε να διαγράψετε αυτή την εργασία; + AreYouSureDeleteQueue: Θέλετε να διαγράψετε την στοίβα %{queue}; Αυτό θα διαγράψει όλες τις εργασίες εντός της στοίβας, θα εμφανιστεί ξανά εάν προωθήσετε περισσότερες εργασίες σε αυτήν στο μέλλον. Queues: Στοίβες Size: Μέγεθος Actions: Ενέργειες - NextRetry: Επόμενη προσπάθεια - RetryCount: Αριθμός προσπαθειών - RetryNow: Προσπάθησε τώρα - LastRetry: Τελευταία προσπάθεια + NextRetry: Επόμενη Προσπάθεια + RetryCount: Αριθμός Προσπαθειών + RetryNow: Επανάληψη Τώρα + # Kill: Kill + LastRetry: Τελευταία Προσπάθεια OriginallyFailed: Αρχικές Αποτυχίες - AreYouSure: Είστε σίγουρος? - DeleteAll: Διαγραφή όλων + AreYouSure: Είστε σίγουρος; + DeleteAll: Διαγραφή Όλων RetryAll: Επανάληψη Όλων - NoRetriesFound: Δεν βρέθηκαν προσπάθειες + # KillAll: Kill All + NoRetriesFound: Δεν βρέθηκαν εργασίες προς επαναλήψη Error: Σφάλμα ErrorClass: Κλάση σφάλματος ErrorMessage: Μήνυμα Σφάλματος - ErrorBacktrace: Σφάλμα Backtrace + ErrorBacktrace: Backtrace Σφάλματος GoBack: ← Πίσω NoScheduledFound: Δεν βρέθηκαν προγραμματισμένες εργασίες When: Πότε ScheduledJobs: Προγραμματισμένες Εργασίες - idle: αδρανής - active: ενεργή + idle: αδρανές + active: ενεργό Version: Έκδοση Connections: Συνδέσεις MemoryUsage: Χρήση Μνήμης PeakMemoryUsage: Μέγιστη Χρήση Μνήμης - Uptime: Διάρκεια Λειτουργείας (ημέρες) + Uptime: Ημέρες Λειτουργίας OneWeek: 1 εβδομάδα OneMonth: 1 μήνας ThreeMonths: 3 μήνες @@ -62,7 +65,28 @@ DeadJobs: Αδρανείς Εργασίες NoDeadJobsFound: Δεν βρέθηκαν αδρανείς εργασίες Dead: Αδρανείς + Process: Διεργασία Processes: Διεργασίες + Name: Όνομα Thread: Νήμα Threads: Νήματα Jobs: Εργασίες + Paused: Σε παύση + Stop: Διακοπή + Quiet: Σίγαση + StopAll: Διακοπή Όλων + QuietAll: Σίγαση Όλων + PollingInterval: Συχνότητα Ανανέωσης + Plugins: Πρόσθετα + NotYetEnqueued: Δεν προστέθηκε στην στοίβα ακόμη + CreatedAt: Δημιουργήθηκε στις + BackToApp: Πίσω στην Εφαρμογή + Latency: Καθυστέρηση + Pause: Παύση + Unpause: Κατάργηση Παύσης + Metrics: Μετρήσεις + NoDataFound: Δεν βρέθηκαν δεδομένα + ExecutionTime: Συνολικός Χρόνος Εκτέλεσης + AvgExecutionTime: Μέσος Χρόνος Εκτέλεσης + # Context: Context + diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/en.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/en.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/en.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/en.yml 2022-10-26 06:38:14.000000000 +0000 @@ -84,3 +84,10 @@ Latency: Latency Pause: Pause Unpause: Unpause + Metrics: Metrics + NoDataFound: No data found + ExecutionTime: Total Execution Time + AvgExecutionTime: Average Execution Time + Context: Context + Bucket: Bucket + NoJobMetricsFound: No recent job metrics were found diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/ja.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/ja.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/ja.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/ja.yml 2022-10-26 06:38:14.000000000 +0000 @@ -84,3 +84,10 @@ Latency: レイテンシ Pause: 一時停止 Unpause: 一時停止を解除 + Metrics: メトリクス + NoDataFound: データが見つかりませんでした + ExecutionTime: 合計実行時間 + AvgExecutionTime: 平均実行時間 + Context: コンテキスト + Bucket: バケット + NoJobMetricsFound: 直近のジョブメトリクスが見つかりませんでした diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/pt-br.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/pt-br.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/pt-br.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/pt-br.yml 2022-10-26 06:38:14.000000000 +0000 @@ -8,6 +8,7 @@ History: Histórico Busy: Ocupados Processed: Processados + Utilization: Utilização Failed: Falhas Scheduled: Agendados Retries: Tentativas @@ -26,18 +27,20 @@ Delete: Apagar AddToQueue: Adicionar à fila AreYouSureDeleteJob: Deseja deletar esta tarefa? - AreYouSureDeleteQueue: Deseja deletar a %{queue} fila? + AreYouSureDeleteQueue: Deseja deletar a fila %{queue}? Isso irá deletar todas as tarefas desta fila. Queues: Filas Size: Tamanho Actions: Ações NextRetry: Próxima Tentativa RetryCount: Número de Tentativas RetryNow: Tentar novamente agora + Kill: Matar LastRetry: Última tentativa OriginallyFailed: Falhou originalmente AreYouSure: Tem certeza? DeleteAll: Apagar tudo RetryAll: Tentar tudo novamente + KillAll: Matar todas NoRetriesFound: Nenhuma tentativa encontrada Error: Erro ErrorClass: Classe de erro @@ -58,11 +61,26 @@ OneMonth: 1 mês ThreeMonths: 3 meses SixMonths: 6 meses - Failures : Falhas - DeadJobs : Tarefas mortas - NoDeadJobsFound : Nenhuma tarefa morta foi encontrada - Dead : Morta - Processes : Processos - Thread : Thread - Threads : Threads - Jobs : Tarefas + Failures: Falhas + DeadJobs: Tarefas mortas + NoDeadJobsFound: Nenhuma tarefa morta foi encontrada + Dead: Morta + Process: Processo + Processes: Processos + Name: Nome + Thread: Thread + Threads: Threads + Jobs: Tarefas + Paused: Pausado + Stop: Parar + Quiet: Silenciar + StopAll: Parar Todos + QuietAll: Silenciar Todos + PollingInterval: Intervalo de Polling + Plugins: Plug-ins + NotYetEnqueued: Ainda não enfileirado + CreatedAt: Criado em + BackToApp: De volta ao aplicativo + Latency: Latência + Pause: Pausar + Unpause: Despausar diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/zh-cn.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/zh-cn.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/zh-cn.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/zh-cn.yml 2022-10-26 06:38:14.000000000 +0000 @@ -7,6 +7,7 @@ Realtime: 实时 History: 历史记录 Busy: 执行中 + Utilization: 利用率 Processed: 已处理 Failed: 已失败 Scheduled: 已计划 @@ -17,15 +18,15 @@ StopPolling: 停止轮询 Queue: 队列 Class: 类别 - Job: 作业 + Job: 任务 Arguments: 参数 Extras: 额外的 Started: 已开始 ShowAll: 显示全部 - CurrentMessagesInQueue: 目前在%{queue}的作业 + CurrentMessagesInQueue: 目前在%{queue}的任务 Delete: 删除 AddToQueue: 添加至队列 - AreYouSureDeleteJob: 你确定要删除这个作业么? + AreYouSureDeleteJob: 你确定要删除这个任务么? AreYouSureDeleteQueue: 你确定要删除%{queue}这个队列? Queues: 队列 Size: 容量 @@ -33,20 +34,22 @@ NextRetry: 下次重试 RetryCount: 重试次數 RetryNow: 现在重试 + Kill: 终止 LastRetry: 最后一次重试 OriginallyFailed: 原本已失败 AreYouSure: 你确定? - DeleteAll: 删除全部 - RetryAll: 重试全部 + DeleteAll: 全部删除 + RetryAll: 全部重试 + KillAll: 全部终止 NoRetriesFound: 沒有发现可重试 Error: 错误 ErrorClass: 错误类别 ErrorMessage: 错误消息 - ErrorBacktrace: 错误的回调追踪 + ErrorBacktrace: 错误细节 GoBack: ← 返回 - NoScheduledFound: 沒有发现计划作业 + NoScheduledFound: 沒有发现计划任务 When: 当 - ScheduledJobs: 计划作业 + ScheduledJobs: 计划任务 idle: 闲置 active: 活动中 Version: 版本 @@ -59,10 +62,32 @@ ThreeMonths: 三个月 SixMonths: 六个月 Failures: 失败 - DeadJobs: 已停滞作业 - NoDeadJobsFound: 沒有发现任何已停滞的作业 + DeadJobs: 已停滞任务 + NoDeadJobsFound: 沒有发现任何已停滞的任务 Dead: 已停滞 + Process: 进程 Processes: 处理中 + Name: 名称 Thread: 线程 Threads: 线程 - Jobs: 作业 + Jobs: 任务 + Paused: 已暫停 + Stop: 強制暫停 + Quiet: 暫停 + StopAll: 全部強制暫停 + QuietAll: 全部暫停 + PollingInterval: 輪詢週期 + Plugins: 套件 + NotYetEnqueued: 尚未進入佇列 + CreatedAt: 建立時間 + BackToApp: 回首頁 + Latency: 延時 + Pause: 暫停 + Unpause: 取消暂停 + Metrics: 指标 + NoDataFound: 无数据 + TotalExecutionTime: 总执行时间 + AvgExecutionTime: 平均执行时间 + Context: 上下文 + Bucket: 桶 + NoJobMetricsFound: 无任务相关指标数据 diff -Nru ruby-sidekiq-6.4.1+dfsg/web/locales/zh-tw.yml ruby-sidekiq-6.5.7+dfsg3/web/locales/zh-tw.yml --- ruby-sidekiq-6.4.1+dfsg/web/locales/zh-tw.yml 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/locales/zh-tw.yml 2022-10-26 06:38:14.000000000 +0000 @@ -7,6 +7,7 @@ Realtime: 即時 History: 歷史資料 Busy: 忙碌 + Utilization: 使用率 Processed: 已處理 Failed: 已失敗 Scheduled: 已排程 @@ -25,26 +26,28 @@ CurrentMessagesInQueue: 目前在%{queue}的工作 Delete: 刪除 AddToQueue: 增加至佇列 - AreYouSureDeleteJob: 你確定要刪除這個工作嗎? - AreYouSureDeleteQueue: 你確定要刪除%{queue}這個佇列? + AreYouSureDeleteJob: 確定要刪除這個工作嗎? + AreYouSureDeleteQueue: 確定要刪除%{queue}佇列?這會刪除佇列裡的所有工作,佇列將會在有新工作時重新出現。 Queues: 佇列 Size: 容量 Actions: 動作 NextRetry: 下次重試 RetryCount: 重試次數 RetryNow: 馬上重試 + Kill: 取消 LastRetry: 最後一次重試 OriginallyFailed: 原本已失敗 AreYouSure: 你確定? - DeleteAll: 刪除全部 - RetryAll: 重試全部 - NoRetriesFound: 沒有發現可重試 + DeleteAll: 全部刪除 + RetryAll: 全部重試 + KillAll: 全部取消 + NoRetriesFound: 找無可重試的工作 Error: 錯誤 ErrorClass: 錯誤類別 ErrorMessage: 錯誤訊息 - ErrorBacktrace: 錯誤的回調追踨 + ErrorBacktrace: 詳細錯誤訊息 GoBack: ← 返回 - NoScheduledFound: 沒有發現已排程的工作 + NoScheduledFound: 找無已排程的工作 When: 當 ScheduledJobs: 已排程的工作 idle: 閒置 @@ -62,7 +65,29 @@ DeadJobs: 停滯工作 NoDeadJobsFound: 沒有發現任何停滯的工作 Dead: 停滯 + Process: 程序 Processes: 處理中 + Name: 名稱 Thread: 執行緒 Threads: 執行緒 Jobs: 工作 + Paused: 已暫停 + Stop: 強制暫停 + Quiet: 暫停 + StopAll: 全部強制暫停 + QuietAll: 全部暫停 + PollingInterval: 輪詢週期 + Plugins: 套件 + NotYetEnqueued: 尚未進入佇列 + CreatedAt: 建立時間 + BackToApp: 回首頁 + Latency: 延時 + Pause: 暫停 + Unpause: 取消暫停 + Metrics: 計量 + NoDataFound: 找無資料 + TotalExecutionTime: 總執行時間 + AvgExecutionTime: 平均執行時間 + Context: 上下文 + Bucket: 桶 + NoJobMetricsFound: 找無工作相關計量資料 diff -Nru ruby-sidekiq-6.4.1+dfsg/web/views/busy.erb ruby-sidekiq-6.5.7+dfsg3/web/views/busy.erb --- ruby-sidekiq-6.4.1+dfsg/web/views/busy.erb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/views/busy.erb 2022-10-26 06:38:14.000000000 +0000 @@ -15,7 +15,7 @@

<%= t('Threads') %>

-

<%= ws = workers.size; number_with_delimiter(ws) %>

+

<%= ws = workset.size; number_with_delimiter(ws) %>

<%= t('Busy') %>

@@ -48,13 +48,13 @@ <%= t('Name') %> <%= t('Started') %> - <%= t('RSS') %>? + <%= t('RSS') %>? <%= t('Threads') %> <%= t('Busy') %>   <% lead = processes.leader %> - <% processes.each do |process| %> + <% sorted_processes.each do |process| %> <%= "#{process['hostname']}:#{process['pid']}" %> @@ -109,7 +109,7 @@ <%= t('Arguments') %> <%= t('Started') %> - <% workers.each do |process, thread, msg| %> + <% workset.each do |process, thread, msg| %> <% job = Sidekiq::JobRecord.new(msg['payload']) %> <%= process %> diff -Nru ruby-sidekiq-6.4.1+dfsg/web/views/dashboard.erb ruby-sidekiq-6.5.7+dfsg3/web/views/dashboard.erb --- ruby-sidekiq-6.4.1+dfsg/web/views/dashboard.erb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/views/dashboard.erb 2022-10-26 06:38:14.000000000 +0000 @@ -1,3 +1,4 @@ +

diff -Nru ruby-sidekiq-6.4.1+dfsg/web/views/metrics.erb ruby-sidekiq-6.5.7+dfsg3/web/views/metrics.erb --- ruby-sidekiq-6.4.1+dfsg/web/views/metrics.erb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/views/metrics.erb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,69 @@ + + + + +

Total execution time

+ +<% + table_limit = 20 + chart_limit = 5 + job_results = @query_result.job_results.sort_by { |(kls, jr)| jr.totals["s"] }.reverse.first(table_limit) + visible_kls = job_results.first(chart_limit).map(&:first) +%> + + + + + +

Most Time-Consuming Jobs

+ +
+ + + + + + + + + + <% if job_results.any? %> + <% job_results.each_with_index do |(kls, jr), i| %> + + + + + + + + <% end %> + <% else %> + + <% end %> + +
<%= t('Name') %><%= t('Processed') %><%= t('Failed') %><%= t('ExecutionTime') %><%= t('AvgExecutionTime') %>
+
+ <% id = "metrics-swatch-#{kls}" %> + + <%= kls %> +
+ +
<%= jr.dig("totals", "p") %><%= jr.dig("totals", "f") %><%= jr.dig("totals", "s").round(2) %> seconds<%= jr.total_avg("s").round(2) %> seconds
<%= t("NoDataFound") %>
+
+ +

Data from <%= @query_result.starts_at %> to <%= @query_result.ends_at %>

diff -Nru ruby-sidekiq-6.4.1+dfsg/web/views/metrics_for_job.erb ruby-sidekiq-6.5.7+dfsg3/web/views/metrics_for_job.erb --- ruby-sidekiq-6.4.1+dfsg/web/views/metrics_for_job.erb 1970-01-01 00:00:00.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/views/metrics_for_job.erb 2022-10-26 06:38:14.000000000 +0000 @@ -0,0 +1,87 @@ + + + + +<% + job_result = @query_result.job_results[@name] + hist_totals = job_result.hist.values.first.zip(*job_result.hist.values[1..-1]).map(&:sum) + bucket_labels =Sidekiq::Metrics::Histogram::LABELS + bucket_intervals =Sidekiq::Metrics::Histogram::BUCKET_INTERVALS.reverse + + # Replace INFINITY since it can't be represented as JSON + bucket_intervals[0] = bucket_intervals[1] * 2 +%> + +<% if job_result.totals["s"] > 0 %> +
+

+ <%= t(:metrics).to_s.titleize %> / + <%= h @name %> +

+

Histogram summary

+
+ + + + + +

Performance over time

+ + + + + +
+ + + + + + + + + + <% @query_result.buckets.reverse.each do |bucket| %> + + + + + <% if (total_sec = job_result.series.dig("s", bucket)) > 0 %> + + + <% else %> + + + <% end %> + + <% end %> + +
<%= t('Time') %><%= t('Processed') %><%= t('Failed') %><%= t('ExecutionTime') %><%= t('AvgExecutionTime') %>
<%= bucket %><%= job_result.series.dig("p", bucket) %><%= job_result.series.dig("f", bucket) %><%= total_sec.round(2) %> seconds<%= job_result.series_avg("s")[bucket].round(2) %> seconds
+
+

Data from <%= @query_result.starts_at %> to <%= @query_result.ends_at %>

+<% else %> +

+ <%= t(:metrics).to_s.titleize %> / + <%= h @name %> +

+ +
<%= t('NoJobMetricsFound') %>
+<% end %> diff -Nru ruby-sidekiq-6.4.1+dfsg/web/views/_nav.erb ruby-sidekiq-6.5.7+dfsg3/web/views/_nav.erb --- ruby-sidekiq-6.4.1+dfsg/web/views/_nav.erb 2022-04-03 13:56:51.000000000 +0000 +++ ruby-sidekiq-6.5.7+dfsg3/web/views/_nav.erb 2022-10-26 06:38:14.000000000 +0000 @@ -1,7 +1,7 @@