From 7dad1281442d2f27990bc7e7cd3aad5594fa29d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrius=20Laukavi=C4=8Dius?= Date: Sun, 28 Jan 2024 16:03:29 +0200 Subject: [PATCH] [FIX] queue_job: max retry When job fails because of concurrent update error, it does not respect max retries set by the job. Problem is ``perform`` method logic that handles re-try is never called, because ``runjob`` in controller that triggers jobs, catches expected exception and silences it. Though it is done to not pollute logs. So for now, adding extra check before job is run, to make sure max retries are handled if it reached it. --- queue_job/controllers/main.py | 2 ++ queue_job/job.py | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index da42b53e99..61478335df 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -122,6 +122,8 @@ def retry_postpone(job, message, seconds=None): job.store() env.cr.commit() + # FIXME: this exception never triggers up, so it never reaches + # `perform` method that handles retries. except RetryableJobError as err: # delay the job later, requeue retry_postpone(job, str(err), seconds=err.seconds) diff --git a/queue_job/job.py b/queue_job/job.py index 1a61881e30..a1a65f5997 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -512,6 +512,10 @@ def perform(self): The job is executed with the user which has initiated it. """ + if self.max_retries and self.retry >= self.max_retries: + raise FailedJobError( + "Max. retries (%d) reached: %s" % (self.max_retries, self._uuid) + ) self.retry += 1 try: self.result = self.func(*tuple(self.args), **self.kwargs)