Skip to content

Instantly share code, notes, and snippets.

@dany1468
Created June 14, 2017 05:00
Show Gist options
  • Select an option

  • Save dany1468/6f1855ff97bf99f0e6b28ff4dd972e32 to your computer and use it in GitHub Desktop.

Select an option

Save dany1468/6f1855ff97bf99f0e6b28ff4dd972e32 to your computer and use it in GitHub Desktop.
Sidekiq で最大回数リトライ後に失敗した場合出すログに例外のバックトレースを含める ref: http://qiita.com/dany1468/items/f3fcbfbee615074ced45
def perform(card_charge_id)
charge = CardCharge.find(card_charge_id)
charge.void_transaction
Emailer.charge_refunded(charge).deliver
end
class FailingWorker
include Sidekiq::Worker
sidekiq_retries_exhausted do |msg, e|
Sidekiq.logger.warn "Failed #{msg['class']} with #{msg['args']}: #{msg['error_message']}"
end
def perform(*args)
raise "or I don't work"
end
end
Sidekiq.configure_server do |config|
config.default_retries_exhausted = -> (job, ex) do
Sidekiq.logger.info "#{job['class']} job is now dead"
end
end
class FailingWorker
include Sidekiq::Worker
sidekiq_retries_exhausted do |msg, e|
Sidekiq.logger.warn "Failed #{msg['class']} with #{msg['args']}: #{msg['error_message']} : #{e.backtrace.join("\n")}"
end
def perform(*args)
raise "or I don't work"
end
end
class FailingWorker
include Sidekiq::Worker
sidekiq_options backtrace: true # default は false
sidekiq_options backtrace: 100 # 数値を指定した場合は保存する backtrace の行数。backtrace は配列で取得できるので。
{
"retry_count": 2, // number of times we've retried so far
"error_message": "wrong number of arguments (2 for 3)", // the exception message
"error_class": "ArgumentError", // the exception class
"error_backtrace": ["line 0", "line 1", ...], // some or all of the exception's backtrace, optional, array of strings
"failed_at": 1234567890, // the first time the job failed
"retried_at": 1234567890 // the last time the job failed
}
module Sidekiq
class JobRetry
# 🏁 yield 内で middleware の実行と、Worker での Job の処理を行うため、ユーザーが書いたコードで発生する例外はここで rescue される。
def local(worker, msg, queue)
yield
rescue Skip => ex
raise ex
rescue Sidekiq::Shutdown => ey
# ignore, will be pushed back onto queue during hard_shutdown
raise ey
rescue Exception => e
# ignore, will be pushed back onto queue during hard_shutdown
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
if msg['retry'] == nil
msg['retry'] = worker.class.get_sidekiq_options['retry']
end
raise e unless msg['retry']
# 🏁 リトライが有効であれば attempt_retry に進む
attempt_retry(worker, msg, queue, e)
# We've handled this error associated with this job, don't
# need to handle it at the global level
raise Skip
end
# Note that +worker+ can be nil here if an error is raised before we can
# instantiate the worker instance. All access must be guarded and
# best effort.
def attempt_retry(worker, msg, queue, exception)
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
msg['queue'] = if msg['retry_queue']
msg['retry_queue']
else
queue
end
# 📝 以下で exception から message と class を取り出し、msg の error_message と error_class に入れている
# App code can stuff all sorts of crazy binary data into the error message
# that won't convert to JSON.
m = exception.message.to_s[0, 10_000]
if m.respond_to?(:scrub!)
m.force_encoding("utf-8")
m.scrub!
end
msg['error_message'] = m
msg['error_class'] = exception.class.name
count = if msg['retry_count']
msg['retried_at'] = Time.now.to_f
msg['retry_count'] += 1
else
msg['failed_at'] = Time.now.to_f
msg['retry_count'] = 0
end
# 📝 backtrace オプションが true / Integer の場合のみ msg の error_backtrace に exception の backtrace が入る
if msg['backtrace'] == true
msg['error_backtrace'] = exception.backtrace
elsif !msg['backtrace']
# do nothing
elsif msg['backtrace'].to_i != 0
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
end
# 🏁 最大リトライ数を超えると retries_exhausted に移動
if count < max_retry_attempts
delay = delay_for(worker, count, exception)
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
retry_at = Time.now.to_f + delay
payload = Sidekiq.dump_json(msg)
Sidekiq.redis do |conn|
conn.zadd('retry', retry_at.to_s, payload)
end
else
# Goodbye dear message, you (re)tried your best I'm sure.
retries_exhausted(worker, msg, exception)
end
end
def retries_exhausted(worker, msg, exception)
logger.debug { "Retries exhausted for job" }
begin
# 🏁 ここで sidekiq_retries_exhausted or default_retries_exhausted を呼んでます。
block = worker && worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
block.call(msg, exception) if block
rescue => e
handle_exception(e, { context: "Error calling retries_exhausted for #{msg['class']}", job: msg })
end
# 💀 死亡キューに送られます
send_to_morgue(msg) unless msg['dead'] == false
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment