From a4b7a995a6e410617c3d18ec933cf1c0593ed45c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 2 Jan 2026 19:36:25 +0100 Subject: [PATCH 01/14] [FIX] queue_job: set exec_time readonly --- queue_job/models/queue_job.py | 1 + 1 file changed, 1 insertion(+) diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index df33e2c7c5..c7257d074b 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -104,6 +104,7 @@ class QueueJob(models.Model): exec_time = fields.Float( string="Execution Time (avg)", group_operator="avg", + readonly=True, help="Time required to execute this job in seconds. Average when grouped.", ) date_cancelled = fields.Datetime(readonly=True) From a9013ec4b1c39d58e93a49846a5eda6e9f132015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 2 Jan 2026 15:12:40 +0100 Subject: [PATCH 02/14] queue_job: declare sbidoul as maintainer --- queue_job/README.rst | 13 +++--- queue_job/__manifest__.py | 2 +- queue_job/static/description/index.html | 54 +++++++++++-------------- test_queue_job/__manifest__.py | 1 + 4 files changed, 32 insertions(+), 38 deletions(-) diff --git a/queue_job/README.rst b/queue_job/README.rst index f22fd7bc10..bb46ba8374 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -1,7 +1,3 @@ -.. image:: https://odoo-community.org/readme-banner-image - :target: https://odoo-community.org/get-involved?utm_source=readme - :alt: Odoo Community Association - ========= Job Queue ========= @@ -17,7 +13,7 @@ Job Queue .. |badge1| image:: https://img.shields.io/badge/maturity-Mature-brightgreen.png :target: https://odoo-community.org/page/development-status :alt: Mature -.. |badge2| image:: https://img.shields.io/badge/license-LGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/licence-LGPL--3-blue.png :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fqueue-lightgray.png?logo=github @@ -697,10 +693,13 @@ promote its widespread use. .. |maintainer-guewen| image:: https://github.com/guewen.png?size=40px :target: https://github.com/guewen :alt: guewen +.. |maintainer-sbidoul| image:: https://github.com/sbidoul.png?size=40px + :target: https://github.com/sbidoul + :alt: sbidoul -Current `maintainer `__: +Current `maintainers `__: -|maintainer-guewen| +|maintainer-guewen| |maintainer-sbidoul| This module is part of the `OCA/queue `_ project on GitHub. diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py index f32b20e2e2..5a88b8e7fa 100644 --- a/queue_job/__manifest__.py +++ b/queue_job/__manifest__.py @@ -29,7 +29,7 @@ }, "installable": True, "development_status": "Mature", - "maintainers": ["guewen"], + "maintainers": ["guewen", "sbidoul"], "post_init_hook": "post_init_hook", "post_load": "post_load", } diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index 82bed11d0f..3dcf44c283 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -3,7 +3,7 @@ -README.rst +Job Queue -
+
+

Job Queue

- - -Odoo Community Association - -
-

Job Queue

-

Mature License: LGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

+

Mature License: LGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

This addon adds an integrated Job Queue to Odoo.

It allows to postpone method calls executed asynchronously.

Jobs are executed in the background by a Jobrunner, in their own transaction.

@@ -446,11 +441,11 @@

Job Queue

-

Installation

+

Installation

Be sure to have the requests library.

-

Configuration

+

Configuration

  • Using environment variables and command line:
    • Adjust environment variables (optional):
-

Usage

+

Usage

To use this module, you need to:

  1. Go to Job Queue menu
-

Developers

+

Developers

-

Delaying jobs

+

Delaying jobs

The fast way to enqueue a job for a method is to use with_delay() on a record or model:

@@ -626,7 +621,7 @@ 

Delaying jobs

-

Enqueing Job Options

+

Enqueing Job Options

  • priority: default is 10, the closest it is to 0, the faster it will be executed
  • @@ -643,7 +638,7 @@

    Enqueing Job Options

-

Configure default options for jobs

+

Configure default options for jobs

In earlier versions, jobs could be configured using the @job decorator. This is now obsolete, they can be configured using optional queue.job.function and queue.job.channel XML records.

@@ -764,7 +759,7 @@

Configure default options for job without delaying any jobs.

-

Testing

+

Testing

Asserting enqueued jobs

The recommended way to test jobs, rather than running them directly and synchronously is to split the tests in two parts:

@@ -884,14 +879,14 @@

Testing

-

Tips and tricks

+

Tips and tricks

  • Idempotency (https://www.restapitutorial.com/lessons/idempotency.html): The queue_job should be idempotent so they can be retried several times without impact on the data.
  • The job should test at the very beginning its relevance: the moment the job will be executed is unknown by design. So the first task of a job should be to check if the related work is still relevant at the moment of the execution.
-

Patterns

+

Patterns

Through the time, two main patterns emerged:

  1. For data exposed to users, a model should store the data and the model should be the creator of the job. The job is kept hidden from the users
  2. @@ -901,7 +896,7 @@

    Patterns

-

Known issues / Roadmap

+

Known issues / Roadmap

  • After creating a new database or installing queue_job on an existing database, Odoo must be restarted for the runner to detect it.
  • @@ -922,7 +917,7 @@

    Known issues / Roadmap

-

Changelog

+

Changelog

-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -952,16 +947,16 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • Camptocamp
  • ACSONE SA/NV
-

Contributors

+

Contributors

-

Maintainers

+

Maintainers

This module is maintained by the OCA.

Odoo Community Association @@ -986,13 +981,12 @@

Maintainers

OCA, or the Odoo Community Association, is a nonprofit organization whose mission is to support the collaborative development of Odoo features and promote its widespread use.

-

Current maintainer:

-

guewen

+

Current maintainers:

+

guewen sbidoul

This module is part of the OCA/queue project on GitHub.

You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.

-
diff --git a/test_queue_job/__manifest__.py b/test_queue_job/__manifest__.py index c3a29bf0c5..76d624e69d 100644 --- a/test_queue_job/__manifest__.py +++ b/test_queue_job/__manifest__.py @@ -14,5 +14,6 @@ "data/queue_job_function_data.xml", "security/ir.model.access.csv", ], + "maintainers": ["sbidoul"], "installable": True, } From d18a882d8bae7ff2964a91c692510e05b27bdafd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 2 Jan 2026 13:54:38 +0100 Subject: [PATCH 03/14] [IMP] queue_job: add job_duration parameter to test job This allows creating test job with a long duration for stress testing. --- queue_job/controllers/main.py | 15 +++++++++++++-- queue_job/models/queue_job.py | 5 ++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 4addf1be23..3a309e43af 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -187,6 +187,7 @@ def create_test_job( description="Test job", size=1, failure_rate=0, + job_duration=0, ): """Create test jobs @@ -207,6 +208,12 @@ def create_test_job( except (ValueError, TypeError): failure_rate = 0 + if job_duration is not None: + try: + job_duration = float(job_duration) + except (ValueError, TypeError): + job_duration = 0 + if not (0 <= failure_rate <= 1): raise BadRequest("failure_rate must be between 0 and 1") @@ -235,6 +242,7 @@ def create_test_job( channel=channel, description=description, failure_rate=failure_rate, + job_duration=job_duration, ) if size > 1: @@ -245,6 +253,7 @@ def create_test_job( channel=channel, description=description, failure_rate=failure_rate, + job_duration=job_duration, ) return "" @@ -256,6 +265,7 @@ def _create_single_test_job( description="Test job", size=1, failure_rate=0, + job_duration=0, ): delayed = ( http.request.env["queue.job"] @@ -265,7 +275,7 @@ def _create_single_test_job( channel=channel, description=description, ) - ._test_job(failure_rate=failure_rate) + ._test_job(failure_rate=failure_rate, job_duration=job_duration) ) return "job uuid: %s" % (delayed.db_record().uuid,) @@ -279,6 +289,7 @@ def _create_graph_test_jobs( channel=None, description="Test job", failure_rate=0, + job_duration=0, ): model = http.request.env["queue.job"] current_count = 0 @@ -301,7 +312,7 @@ def _create_graph_test_jobs( max_retries=max_retries, channel=channel, description="%s #%d" % (description, current_count), - )._test_job(failure_rate=failure_rate) + )._test_job(failure_rate=failure_rate, job_duration=job_duration) ) grouping = random.choice(possible_grouping_methods) diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index c7257d074b..d538a2a75c 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -3,6 +3,7 @@ import logging import random +import time from datetime import datetime, timedelta from odoo import _, api, exceptions, fields, models @@ -458,7 +459,9 @@ def related_action_open_record(self): ) return action - def _test_job(self, failure_rate=0): + def _test_job(self, failure_rate=0, job_duration=0): _logger.info("Running test job.") if random.random() <= failure_rate: raise JobError("Job failed") + if job_duration: + time.sleep(job_duration) From c650e2876cf57b6620cd387d95105b032287a98f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 2 Jan 2026 15:00:27 +0100 Subject: [PATCH 04/14] [FIX] queue_job: fix retry mechanisme for job dependencies When a SerializationFailure occurs when updating the state of dependent jobs, the cursor is not usable anymore so the retry failed with `current transaction is aborted`. A savepoint fixes that. --- queue_job/controllers/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 3a309e43af..1e3e3860de 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -49,14 +49,15 @@ def _enqueue_dependent_jobs(self, env, job): tries = 0 while True: try: - job.enqueue_waiting() + with job.env.cr.savepoint(): + job.enqueue_waiting() except OperationalError as err: # Automatically retry the typical transaction serialization # errors if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE: - _logger.info( + _logger.error( "%s, maximum number of tries reached to update dependencies", errorcodes.lookup(err.pgcode), ) From c27d65353ecf3a6cb0b1f2c80d6821436fd86da5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 3 Jan 2026 11:34:18 +0100 Subject: [PATCH 05/14] [IMP] queue_job: use state constant in lock function --- queue_job/job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/queue_job/job.py b/queue_job/job.py index 790e07d90e..b1b4edf9b5 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -278,11 +278,11 @@ def lock(self): queue_job WHERE uuid = %s - AND state='started' + AND state = %s ) FOR UPDATE; """, - [self.uuid], + [self.uuid, STARTED], ) # 1 job should be locked From 6948a2137ec707908603b3273035bf532247d2a4 Mon Sep 17 00:00:00 2001 From: Zina Rasoamanana Date: Mon, 15 Sep 2025 10:45:39 +0200 Subject: [PATCH 06/14] [RMV] queue_job: remove test_requeue_dead_job --- queue_job/tests/__init__.py | 1 - queue_job/tests/test_requeue_dead_job.py | 133 ----------------------- 2 files changed, 134 deletions(-) delete mode 100644 queue_job/tests/test_requeue_dead_job.py diff --git a/queue_job/tests/__init__.py b/queue_job/tests/__init__.py index 1062acdc25..16bcdff96b 100644 --- a/queue_job/tests/__init__.py +++ b/queue_job/tests/__init__.py @@ -8,4 +8,3 @@ from . import test_model_job_function from . import test_queue_job_protected_write from . import test_wizards -from . import test_requeue_dead_job diff --git a/queue_job/tests/test_requeue_dead_job.py b/queue_job/tests/test_requeue_dead_job.py deleted file mode 100644 index c6c82a2f4d..0000000000 --- a/queue_job/tests/test_requeue_dead_job.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2025 ACSONE SA/NV -# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). -from contextlib import closing -from datetime import datetime, timedelta - -from odoo.tests.common import TransactionCase - -from odoo.addons.queue_job.job import Job -from odoo.addons.queue_job.jobrunner.runner import Database - - -class TestRequeueDeadJob(TransactionCase): - def create_dummy_job(self, uuid): - """ - Create dummy job for tests - """ - return ( - self.env["queue.job"] - .with_context( - _job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL, - ) - .create( - { - "uuid": uuid, - "user_id": self.env.user.id, - "state": "pending", - "model_name": "queue.job", - "method_name": "write", - } - ) - ) - - def get_locks(self, uuid, cr=None): - """ - Retrieve lock rows - """ - if cr is None: - cr = self.env.cr - - cr.execute( - """ - SELECT - queue_job_id - FROM - queue_job_lock - WHERE - queue_job_id IN ( - SELECT - id - FROM - queue_job - WHERE - uuid = %s - ) - FOR UPDATE SKIP LOCKED - """, - [uuid], - ) - - return cr.fetchall() - - def test_add_lock_record(self): - queue_job = self.create_dummy_job("test_add_lock") - job_obj = Job.load(self.env, queue_job.uuid) - - job_obj.set_started() - self.assertEqual(job_obj.state, "started") - - locks = self.get_locks(job_obj.uuid) - - self.assertEqual(1, len(locks)) - - def test_lock(self): - queue_job = self.create_dummy_job("test_lock") - job_obj = Job.load(self.env, queue_job.uuid) - - job_obj.set_started() - job_obj.store() - - locks = self.get_locks(job_obj.uuid) - - self.assertEqual(1, len(locks)) - - # commit to update queue_job records in DB - self.env.cr.commit() # pylint: disable=E8102 - - job_obj.lock() - - with closing(self.env.registry.cursor()) as new_cr: - locks = self.get_locks(job_obj.uuid, new_cr) - - # Row should be locked - self.assertEqual(0, len(locks)) - - # clean up - queue_job.unlink() - - self.env.cr.commit() # pylint: disable=E8102 - - # because we committed the cursor, the savepoint of the test method is - # gone, and this would break TransactionCase cleanups - self.cr.execute("SAVEPOINT test_%d" % self._savepoint_id) - - def test_requeue_dead_jobs(self): - uuid = "test_requeue_dead_jobs" - - queue_job = self.create_dummy_job(uuid) - job_obj = Job.load(self.env, queue_job.uuid) - - job_obj.set_enqueued() - # simulate enqueuing was in the past - job_obj.date_enqueued = datetime.now() - timedelta(minutes=1) - job_obj.set_started() - - job_obj.store() - self.env.cr.commit() # pylint: disable=E8102 - - # requeue dead jobs using current cursor - query = Database(self.env.cr.dbname)._query_requeue_dead_jobs() - self.env.cr.execute(query) - - uuids_requeued = self.env.cr.fetchall() - - self.assertEqual(len(uuids_requeued), 1) - self.assertEqual(uuids_requeued[0][0], uuid) - - # clean up - queue_job.unlink() - self.env.cr.commit() # pylint: disable=E8102 - - # because we committed the cursor, the savepoint of the test method is - # gone, and this would break TransactionCase cleanups - self.cr.execute("SAVEPOINT test_%d" % self._savepoint_id) From 430ba229f737b21baa79249c9e20c395c50421d7 Mon Sep 17 00:00:00 2001 From: Zina Rasoamanana Date: Tue, 3 Jun 2025 16:11:21 +0200 Subject: [PATCH 07/14] [IMP] queue_job: remove DB commits within test of requeue --- test_queue_job/__manifest__.py | 1 + test_queue_job/data/queue_job_test_job.xml | 18 ++++ test_queue_job/models/test_models.py | 31 ++++++ test_queue_job/tests/__init__.py | 1 + test_queue_job/tests/test_autovacuum.py | 20 +++- test_queue_job/tests/test_requeue_dead_job.py | 101 ++++++++++++++++++ 6 files changed, 167 insertions(+), 5 deletions(-) create mode 100644 test_queue_job/data/queue_job_test_job.xml create mode 100644 test_queue_job/tests/test_requeue_dead_job.py diff --git a/test_queue_job/__manifest__.py b/test_queue_job/__manifest__.py index 76d624e69d..484f21d855 100644 --- a/test_queue_job/__manifest__.py +++ b/test_queue_job/__manifest__.py @@ -13,6 +13,7 @@ "data/queue_job_channel_data.xml", "data/queue_job_function_data.xml", "security/ir.model.access.csv", + "data/queue_job_test_job.xml", ], "maintainers": ["sbidoul"], "installable": True, diff --git a/test_queue_job/data/queue_job_test_job.xml b/test_queue_job/data/queue_job_test_job.xml new file mode 100644 index 0000000000..8a28ab70a0 --- /dev/null +++ b/test_queue_job/data/queue_job_test_job.xml @@ -0,0 +1,18 @@ + + + + + + diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index 03fa792137..2e85be8057 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -1,6 +1,8 @@ # Copyright 2016 Camptocamp SA # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) +from datetime import datetime, timedelta + from odoo import api, fields, models from odoo.addons.queue_job.delay import chain @@ -29,6 +31,35 @@ def testing_related__url(self, **kwargs): "url": kwargs["url"].format(subject=subject), } + @api.model + def _create_test_started_job(self, uuid): + """Create started jobs to be used within tests""" + self.env["queue.job"].with_context( + _job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL, + ).create( + { + "uuid": uuid, + "state": "started", + "model_name": "queue.job", + "method_name": "write", + } + ) + + @api.model + def _create_test_enqueued_job(self, uuid): + """Create enqueued jobs to be used within tests""" + self.env["queue.job"].with_context( + _job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL, + ).create( + { + "uuid": uuid, + "state": "enqueued", + "model_name": "queue.job", + "method_name": "write", + "date_enqueued": datetime.now() - timedelta(minutes=1), + } + ) + class ModelTestQueueJob(models.Model): diff --git a/test_queue_job/tests/__init__.py b/test_queue_job/tests/__init__.py index 0405022ce0..62347148e5 100644 --- a/test_queue_job/tests/__init__.py +++ b/test_queue_job/tests/__init__.py @@ -7,3 +7,4 @@ from . import test_job_function from . import test_related_actions from . import test_delay_mocks +from . import test_requeue_dead_job diff --git a/test_queue_job/tests/test_autovacuum.py b/test_queue_job/tests/test_autovacuum.py index 09730a4fea..97aebcba1e 100644 --- a/test_queue_job/tests/test_autovacuum.py +++ b/test_queue_job/tests/test_autovacuum.py @@ -28,12 +28,16 @@ def test_autovacuum(self): date_done = datetime.now() - timedelta(days=29) stored.write({"date_done": date_done}) self.env["queue.job"].autovacuum() - self.assertEqual(len(self.env["queue.job"].search([])), 1) + self.assertEqual( + len(self.env["queue.job"].search([("channel", "!=", False)])), 1 + ) date_done = datetime.now() - timedelta(days=31) stored.write({"date_done": date_done}) self.env["queue.job"].autovacuum() - self.assertEqual(len(self.env["queue.job"].search([])), 0) + self.assertEqual( + len(self.env["queue.job"].search([("channel", "!=", False)])), 0 + ) def test_autovacuum_multi_channel(self): root_channel = self.env.ref("queue_job.channel_root") @@ -48,11 +52,17 @@ def test_autovacuum_multi_channel(self): {"channel": channel_60days.complete_name, "date_done": date_done} ) - self.assertEqual(len(self.env["queue.job"].search([])), 2) + self.assertEqual( + len(self.env["queue.job"].search([("channel", "!=", False)])), 2 + ) self.env["queue.job"].autovacuum() - self.assertEqual(len(self.env["queue.job"].search([])), 1) + self.assertEqual( + len(self.env["queue.job"].search([("channel", "!=", False)])), 1 + ) date_done = datetime.now() - timedelta(days=61) job_60days.write({"date_done": date_done}) self.env["queue.job"].autovacuum() - self.assertEqual(len(self.env["queue.job"].search([])), 0) + self.assertEqual( + len(self.env["queue.job"].search([("channel", "!=", False)])), 0 + ) diff --git a/test_queue_job/tests/test_requeue_dead_job.py b/test_queue_job/tests/test_requeue_dead_job.py new file mode 100644 index 0000000000..a6328fed76 --- /dev/null +++ b/test_queue_job/tests/test_requeue_dead_job.py @@ -0,0 +1,101 @@ +# Copyright 2025 ACSONE SA/NV +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +from contextlib import closing +from datetime import datetime, timedelta + +from odoo.tests import tagged + +from odoo.addons.queue_job.job import Job +from odoo.addons.queue_job.jobrunner.runner import Database + +from .common import JobCommonCase + + +@tagged("post_install", "-at_install") +class TestRequeueDeadJob(JobCommonCase): + def _get_demo_job(self, uuid): + # job created during load of demo data + job = self.env["queue.job"].search( + [ + ("uuid", "=", uuid), + ], + limit=1, + ) + + self.assertTrue( + job, + f"Demo data queue job {uuid} should be loaded in order" + " to make this tests work", + ) + + return job + + def get_locks(self, uuid, cr=None): + """ + Retrieve lock rows + """ + if cr is None: + cr = self.env.cr + + cr.execute( + """ + SELECT + queue_job_id + FROM + queue_job_lock + WHERE + queue_job_id IN ( + SELECT + id + FROM + queue_job + WHERE + uuid = %s + ) + FOR UPDATE SKIP LOCKED + """, + [uuid], + ) + + return cr.fetchall() + + def test_add_lock_record(self): + queue_job = self._get_demo_job("test_started_job") + self.assertEqual(len(queue_job), 1) + job_obj = Job.load(self.env, queue_job.uuid) + + job_obj.set_started() + self.assertEqual(job_obj.state, "started") + + locks = self.get_locks(job_obj.uuid) + + self.assertEqual(1, len(locks)) + + def test_lock(self): + queue_job = self._get_demo_job("test_started_job") + job_obj = Job.load(self.env, queue_job.uuid) + + job_obj.set_started() + job_obj.lock() + + with closing(self.env.registry.cursor()) as new_cr: + locks = self.get_locks(job_obj.uuid, new_cr) + + # Row should be locked + self.assertEqual(0, len(locks)) + + def test_requeue_dead_jobs(self): + queue_job = self._get_demo_job("test_enqueued_job") + job_obj = Job.load(self.env, queue_job.uuid) + + job_obj.set_enqueued() + job_obj.set_started() + job_obj.date_enqueued = datetime.now() - timedelta(minutes=1) + job_obj.store() + + # requeue dead jobs using current cursor + query = Database(self.env.cr.dbname)._query_requeue_dead_jobs() + self.env.cr.execute(query) + + uuids_requeued = self.env.cr.fetchall() + self.assertTrue(queue_job.uuid in j[0] for j in uuids_requeued) From 4e25ff0088bf129c2754c59f2df7be51e7cb43b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 2 Jan 2026 12:18:38 +0100 Subject: [PATCH 08/14] [IMP] queue_job: refactor job acquisition In this commit we cleanly separate the job acquisition (i.e. verifying the job is in the exepected state, marking it started and locking it) from job execution. We also avoid trying to start the job if it is already locked by using SKIP LOCKED and exiting early. Indeed in such situations the job is likely already being handled by another worker so there is no point trying to start it, so we exit early and let it be handled either by the other worker or the dead job requeuer. --- queue_job/controllers/main.py | 57 ++++++++++++------- queue_job/job.py | 19 +++---- test_queue_job/tests/__init__.py | 1 + test_queue_job/tests/common.py | 10 ++++ test_queue_job/tests/test_acquire_job.py | 51 +++++++++++++++++ test_queue_job/tests/test_requeue_dead_job.py | 17 ------ 6 files changed, 107 insertions(+), 48 deletions(-) create mode 100644 test_queue_job/tests/test_acquire_job.py diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 1e3e3860de..e964bc5edd 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -26,15 +26,47 @@ class RunJobController(http.Controller): - def _try_perform_job(self, env, job): - """Try to perform the job.""" + @classmethod + def _acquire_job(cls, env: api.Environment, job_uuid: str) -> Job | None: + """Acquire a job for execution. + + - make sure it is in ENQUEUED state + - mark it as STARTED and commit the state change + - acquire the job lock + + If successful, return the Job instance, otherwise return None. This + function may fail to acquire the job is not in the expected state or is + already locked by another worker. + """ + env.cr.execute( + "SELECT uuid FROM queue_job WHERE uuid=%s AND state=%s " + "FOR UPDATE SKIP LOCKED", + (job_uuid, ENQUEUED), + ) + if not env.cr.fetchone(): + _logger.warning( + "was requested to run job %s, but it does not exist, " + "or is not in state %s, or is being handled by another worker", + job_uuid, + ENQUEUED, + ) + return None + job = Job.load(env, job_uuid) + assert job and job.state == ENQUEUED job.set_started() job.store() env.cr.commit() - job.lock() + if not job.lock(): + _logger.warning( + "was requested to run job %s, but it could not be locked", + job_uuid, + ) + return None + return job + def _try_perform_job(self, env, job): + """Try to perform the job, mark it done and commit if successful.""" _logger.debug("%s started", job) - job.perform() # Triggers any stored computed fields before calling 'set_done' # so that will be part of the 'exec_time' @@ -94,23 +126,10 @@ def retry_postpone(job, message, seconds=None): job.set_pending(reset_retry=False) job.store() - # ensure the job to run is in the correct state and lock the record - env.cr.execute( - "SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE", - (job_uuid, ENQUEUED), - ) - if not env.cr.fetchone(): - _logger.warning( - "was requested to run job %s, but it does not exist, " - "or is not in state %s", - job_uuid, - ENQUEUED, - ) + job = self._acquire_job(env, job_uuid) + if not job: return "" - job = Job.load(env, job_uuid) - assert job and job.state == ENQUEUED - try: try: self._try_perform_job(env, job) diff --git a/queue_job/job.py b/queue_job/job.py index b1b4edf9b5..755d37ab38 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -236,7 +236,7 @@ def load_many(cls, env, job_uuids): recordset = cls.db_records_from_uuids(env, job_uuids) return {cls._load_from_db_record(record) for record in recordset} - def add_lock_record(self): + def add_lock_record(self) -> None: """ Create row in db to be locked while the job is being performed. """ @@ -256,13 +256,11 @@ def add_lock_record(self): [self.uuid], ) - def lock(self): - """ - Lock row of job that is being performed + def lock(self) -> bool: + """Lock row of job that is being performed. - If a job cannot be locked, - it means that the job wasn't started, - a RetryableJobError is thrown. + Return False if a job cannot be locked: it means that the job is not in + STARTED state or is already locked by another worker. """ self.env.cr.execute( """ @@ -280,16 +278,13 @@ def lock(self): uuid = %s AND state = %s ) - FOR UPDATE; + FOR UPDATE SKIP LOCKED; """, [self.uuid, STARTED], ) # 1 job should be locked - if 1 != len(self.env.cr.fetchall()): - raise RetryableJobError( - f"Trying to lock job that wasn't started, uuid: {self.uuid}" - ) + return bool(self.env.cr.fetchall()) @classmethod def _load_from_db_record(cls, job_db_record): diff --git a/test_queue_job/tests/__init__.py b/test_queue_job/tests/__init__.py index 62347148e5..0cfacebdf3 100644 --- a/test_queue_job/tests/__init__.py +++ b/test_queue_job/tests/__init__.py @@ -1,3 +1,4 @@ +from . import test_acquire_job from . import test_autovacuum from . import test_delayable from . import test_dependencies diff --git a/test_queue_job/tests/common.py b/test_queue_job/tests/common.py index a32fcc380a..c1f7d88ca0 100644 --- a/test_queue_job/tests/common.py +++ b/test_queue_job/tests/common.py @@ -20,3 +20,13 @@ def _create_job(self): stored = Job.db_record_from_uuid(self.env, test_job.uuid) self.assertEqual(len(stored), 1) return stored + + def _get_demo_job(self, uuid): + # job created during load of demo data + job = self.env["queue.job"].search([("uuid", "=", uuid)], limit=1) + self.assertTrue( + job, + f"Demo data queue job {uuid!r} should be loaded in order " + "to make this test work", + ) + return job diff --git a/test_queue_job/tests/test_acquire_job.py b/test_queue_job/tests/test_acquire_job.py new file mode 100644 index 0000000000..3f0c92a2be --- /dev/null +++ b/test_queue_job/tests/test_acquire_job.py @@ -0,0 +1,51 @@ +# Copyright 2026 ACSONE SA/NV +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +import logging +from unittest import mock + +from odoo.tests import tagged + +from odoo.addons.queue_job.controllers.main import RunJobController + +from .common import JobCommonCase + + +@tagged("post_install", "-at_install") +class TestRequeueDeadJob(JobCommonCase): + def test_acquire_enqueued_job(self): + job_record = self._get_demo_job(uuid="test_enqueued_job") + self.assertFalse( + self.env["queue.job.lock"].search( + [("queue_job_id", "=", job_record.id)], + ), + "A job lock record should not exist at this point", + ) + with mock.patch.object( + self.env.cr, "commit", mock.Mock(side_effect=self.env.flush_all) + ) as mock_commit: + job = RunJobController._acquire_job(self.env, job_uuid="test_enqueued_job") + mock_commit.assert_called_once() + self.assertIsNotNone(job) + self.assertEqual(job.uuid, "test_enqueued_job") + self.assertEqual(job.state, "started") + self.assertTrue( + self.env["queue.job.lock"].search( + [("queue_job_id", "=", job_record.id)] + ), + "A job lock record should exist at this point", + ) + + def test_acquire_started_job(self): + with ( + mock.patch.object( + self.env.cr, "commit", mock.Mock(side_effect=self.env.flush_all) + ) as mock_commit, + self.assertLogs(level=logging.WARNING) as logs, + ): + job = RunJobController._acquire_job(self.env, "test_started_job") + mock_commit.assert_not_called() + self.assertIsNone(job) + self.assertIn( + "was requested to run job test_started_job, but it does not exist", + logs.output[0], + ) diff --git a/test_queue_job/tests/test_requeue_dead_job.py b/test_queue_job/tests/test_requeue_dead_job.py index a6328fed76..3be5f6ffc6 100644 --- a/test_queue_job/tests/test_requeue_dead_job.py +++ b/test_queue_job/tests/test_requeue_dead_job.py @@ -13,23 +13,6 @@ @tagged("post_install", "-at_install") class TestRequeueDeadJob(JobCommonCase): - def _get_demo_job(self, uuid): - # job created during load of demo data - job = self.env["queue.job"].search( - [ - ("uuid", "=", uuid), - ], - limit=1, - ) - - self.assertTrue( - job, - f"Demo data queue job {uuid} should be loaded in order" - " to make this tests work", - ) - - return job - def get_locks(self, uuid, cr=None): """ Retrieve lock rows From ba7d2181696ea0ea63f6bc6d384d5478a385b1f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 3 Jan 2026 11:26:02 +0100 Subject: [PATCH 09/14] [IMP] queue_job: refactor runjob Extract the logic to run one job out of the /queue_job/runjob route. Towards making this logic reusable in other job executors. --- queue_job/controllers/main.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index e964bc5edd..42fc0598e0 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -107,17 +107,7 @@ def _enqueue_dependent_jobs(self, env, job): else: break - @http.route( - "/queue_job/runjob", - type="http", - auth="none", - save_session=False, - readonly=False, - ) - def runjob(self, db, job_uuid, **kw): - http.request.session.db = db - env = http.request.env(user=SUPERUSER_ID) - + def _runjob(self, env: api.Environment, job: Job) -> None: def retry_postpone(job, message, seconds=None): job.env.clear() with registry(job.env.cr.dbname).cursor() as new_cr: @@ -126,10 +116,6 @@ def retry_postpone(job, message, seconds=None): job.set_pending(reset_retry=False) job.store() - job = self._acquire_job(env, job_uuid) - if not job: - return "" - try: try: self._try_perform_job(env, job) @@ -161,7 +147,6 @@ def retry_postpone(job, message, seconds=None): # traceback in the logs we should have the traceback when all # retries are exhausted env.cr.rollback() - return "" except (FailedJobError, Exception) as orig_exception: buff = StringIO() @@ -181,8 +166,6 @@ def retry_postpone(job, message, seconds=None): self._enqueue_dependent_jobs(env, job) _logger.debug("%s enqueue depends done", job) - return "" - def _get_failure_values(self, job, traceback_txt, orig_exception): """Collect relevant data from exception.""" exception_name = orig_exception.__class__.__name__ @@ -197,6 +180,22 @@ def _get_failure_values(self, job, traceback_txt, orig_exception): "exc_message": exc_message, } + @http.route( + "/queue_job/runjob", + type="http", + auth="none", + save_session=False, + readonly=False, + ) + def runjob(self, db, job_uuid, **kw): + http.request.session.db = db + env = http.request.env(user=SUPERUSER_ID) + job = self._acquire_job(env, job_uuid) + if not job: + return "" + self._runjob(env, job) + return "" + # flake8: noqa: C901 @http.route("/queue_job/create_test_job", type="http", auth="user") def create_test_job( From 672220813341c066445ac50ef2dc8c75839d8328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 3 Jan 2026 11:39:13 +0100 Subject: [PATCH 10/14] [IMP] queue_job: convert job execution logic to class method Towards making this logic reusable. --- queue_job/controllers/main.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 42fc0598e0..05ab92fca4 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -64,7 +64,8 @@ def _acquire_job(cls, env: api.Environment, job_uuid: str) -> Job | None: return None return job - def _try_perform_job(self, env, job): + @classmethod + def _try_perform_job(cls, env, job): """Try to perform the job, mark it done and commit if successful.""" _logger.debug("%s started", job) job.perform() @@ -77,7 +78,8 @@ def _try_perform_job(self, env, job): env.cr.commit() _logger.debug("%s done", job) - def _enqueue_dependent_jobs(self, env, job): + @classmethod + def _enqueue_dependent_jobs(cls, env, job): tries = 0 while True: try: @@ -107,7 +109,8 @@ def _enqueue_dependent_jobs(self, env, job): else: break - def _runjob(self, env: api.Environment, job: Job) -> None: + @classmethod + def _runjob(cls, env: api.Environment, job: Job) -> None: def retry_postpone(job, message, seconds=None): job.env.clear() with registry(job.env.cr.dbname).cursor() as new_cr: @@ -118,7 +121,7 @@ def retry_postpone(job, message, seconds=None): try: try: - self._try_perform_job(env, job) + cls._try_perform_job(env, job) except OperationalError as err: # Automatically retry the typical transaction serialization # errors @@ -156,17 +159,18 @@ def retry_postpone(job, message, seconds=None): job.env.clear() with registry(job.env.cr.dbname).cursor() as new_cr: job.env = job.env(cr=new_cr) - vals = self._get_failure_values(job, traceback_txt, orig_exception) + vals = cls._get_failure_values(job, traceback_txt, orig_exception) job.set_failed(**vals) job.store() buff.close() raise _logger.debug("%s enqueue depends started", job) - self._enqueue_dependent_jobs(env, job) + cls._enqueue_dependent_jobs(env, job) _logger.debug("%s enqueue depends done", job) - def _get_failure_values(self, job, traceback_txt, orig_exception): + @classmethod + def _get_failure_values(cls, job, traceback_txt, orig_exception): """Collect relevant data from exception.""" exception_name = orig_exception.__class__.__name__ if hasattr(orig_exception, "__module__"): From efe252eac50f5c8c02246fb2bfcff2ff551a089e Mon Sep 17 00:00:00 2001 From: hoangtrann Date: Sat, 22 Nov 2025 06:05:08 +0700 Subject: [PATCH 11/14] [IMP] queue_job: requeue orphaned jobs --- queue_job/jobrunner/runner.py | 37 +++++++++++++++++++ test_queue_job/tests/test_requeue_dead_job.py | 22 +++++++++++ 2 files changed, 59 insertions(+) diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py index 846682a666..e4f2ab9c78 100644 --- a/queue_job/jobrunner/runner.py +++ b/queue_job/jobrunner/runner.py @@ -382,6 +382,35 @@ def _query_requeue_dead_jobs(self): RETURNING uuid """ + def _query_requeue_orphaned_jobs(self): + """Query to requeue jobs stuck in 'enqueued' state without a lock. + + This handles the edge case where the runner marks a job as 'enqueued' + but the HTTP request to start the job never reaches the Odoo server + (e.g., due to server shutdown/crash between setting enqueued and + the controller receiving the request). These jobs have no lock record + because set_started() was never called, so they are invisible to + _query_requeue_dead_jobs(). + """ + return """ + UPDATE + queue_job + SET + state='pending' + WHERE + state = 'enqueued' + AND date_enqueued < (now() AT TIME ZONE 'utc' - INTERVAL '10 sec') + AND NOT EXISTS ( + SELECT + 1 + FROM + queue_job_lock + WHERE + queue_job_id = queue_job.id + ) + RETURNING uuid + """ + def requeue_dead_jobs(self): """ Set started and enqueued jobs but not locked to pending @@ -410,6 +439,14 @@ def requeue_dead_jobs(self): for (uuid,) in cr.fetchall(): _logger.warning("Re-queued dead job with uuid: %s", uuid) + # Requeue orphaned jobs (enqueued but never started, no lock) + query = self._query_requeue_orphaned_jobs() + cr.execute(query) + for (uuid,) in cr.fetchall(): + _logger.warning( + "Re-queued orphaned job (enqueued without lock) with uuid: %s", uuid + ) + class QueueJobRunner: def __init__( diff --git a/test_queue_job/tests/test_requeue_dead_job.py b/test_queue_job/tests/test_requeue_dead_job.py index 3be5f6ffc6..9f1105d2a8 100644 --- a/test_queue_job/tests/test_requeue_dead_job.py +++ b/test_queue_job/tests/test_requeue_dead_job.py @@ -82,3 +82,25 @@ def test_requeue_dead_jobs(self): uuids_requeued = self.env.cr.fetchall() self.assertTrue(queue_job.uuid in j[0] for j in uuids_requeued) + + def test_requeue_orphaned_jobs(self): + queue_job = self._get_demo_job("test_enqueued_job") + job_obj = Job.load(self.env, queue_job.uuid) + + # Only enqueued job, don't set it to started to simulate the scenario + # that system shutdown before job is starting + job_obj.set_enqueued() + job_obj.date_enqueued = datetime.now() - timedelta(minutes=1) + job_obj.store() + + # job ins't actually picked up by the first requeue attempt + query = Database(self.env.cr.dbname)._query_requeue_dead_jobs() + self.env.cr.execute(query) + uuids_requeued = self.env.cr.fetchall() + self.assertFalse(uuids_requeued) + + # job is picked up by the 2nd requeue attempt + query = Database(self.env.cr.dbname)._query_requeue_orphaned_jobs() + self.env.cr.execute(query) + uuids_requeued = self.env.cr.fetchall() + self.assertTrue(queue_job.uuid in j[0] for j in uuids_requeued) From afc351ce970e8c09b63a01727ebff3bba51a86a5 Mon Sep 17 00:00:00 2001 From: hoangtrann Date: Wed, 31 Dec 2025 19:27:16 +0700 Subject: [PATCH 12/14] [IMP] queue_job: query orphaned dead job not exist in lock table --- queue_job/jobrunner/runner.py | 78 ++++++------------- test_queue_job/tests/test_requeue_dead_job.py | 8 +- 2 files changed, 26 insertions(+), 60 deletions(-) diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py index e4f2ab9c78..396d23bf69 100644 --- a/queue_job/jobrunner/runner.py +++ b/queue_job/jobrunner/runner.py @@ -361,52 +361,26 @@ def _query_requeue_dead_jobs(self): ELSE exc_info END) WHERE - id in ( - SELECT - queue_job_id - FROM - queue_job_lock - WHERE - queue_job_id in ( - SELECT - id - FROM - queue_job - WHERE - state IN ('enqueued','started') - AND date_enqueued < - (now() AT TIME ZONE 'utc' - INTERVAL '10 sec') - ) - FOR UPDATE SKIP LOCKED - ) - RETURNING uuid - """ - - def _query_requeue_orphaned_jobs(self): - """Query to requeue jobs stuck in 'enqueued' state without a lock. - - This handles the edge case where the runner marks a job as 'enqueued' - but the HTTP request to start the job never reaches the Odoo server - (e.g., due to server shutdown/crash between setting enqueued and - the controller receiving the request). These jobs have no lock record - because set_started() was never called, so they are invisible to - _query_requeue_dead_jobs(). - """ - return """ - UPDATE - queue_job - SET - state='pending' - WHERE - state = 'enqueued' + state IN ('enqueued','started') AND date_enqueued < (now() AT TIME ZONE 'utc' - INTERVAL '10 sec') - AND NOT EXISTS ( - SELECT - 1 - FROM - queue_job_lock - WHERE - queue_job_id = queue_job.id + AND ( + id in ( + SELECT + queue_job_id + FROM + queue_job_lock + WHERE + queue_job_lock.queue_job_id = queue_job.id + FOR UPDATE SKIP LOCKED + ) + OR NOT EXISTS ( + SELECT + 1 + FROM + queue_job_lock + WHERE + queue_job_lock.queue_job_id = queue_job.id + ) ) RETURNING uuid """ @@ -429,6 +403,12 @@ def requeue_dead_jobs(self): However, when the Odoo server crashes or is otherwise force-stopped, running jobs are interrupted while the runner has no chance to know they have been aborted. + + This also handles orphaned jobs (enqueued but never started, no lock). + This edge case occurs when the runner marks a job as 'enqueued' + but the HTTP request to start the job never reaches the Odoo server + (e.g., due to server shutdown/crash between setting enqueued and + the controller receiving the request). """ with closing(self.conn.cursor()) as cr: @@ -439,14 +419,6 @@ def requeue_dead_jobs(self): for (uuid,) in cr.fetchall(): _logger.warning("Re-queued dead job with uuid: %s", uuid) - # Requeue orphaned jobs (enqueued but never started, no lock) - query = self._query_requeue_orphaned_jobs() - cr.execute(query) - for (uuid,) in cr.fetchall(): - _logger.warning( - "Re-queued orphaned job (enqueued without lock) with uuid: %s", uuid - ) - class QueueJobRunner: def __init__( diff --git a/test_queue_job/tests/test_requeue_dead_job.py b/test_queue_job/tests/test_requeue_dead_job.py index 9f1105d2a8..510276be63 100644 --- a/test_queue_job/tests/test_requeue_dead_job.py +++ b/test_queue_job/tests/test_requeue_dead_job.py @@ -93,14 +93,8 @@ def test_requeue_orphaned_jobs(self): job_obj.date_enqueued = datetime.now() - timedelta(minutes=1) job_obj.store() - # job ins't actually picked up by the first requeue attempt + # job is now picked up by the requeue query (which includes orphaned jobs) query = Database(self.env.cr.dbname)._query_requeue_dead_jobs() self.env.cr.execute(query) uuids_requeued = self.env.cr.fetchall() - self.assertFalse(uuids_requeued) - - # job is picked up by the 2nd requeue attempt - query = Database(self.env.cr.dbname)._query_requeue_orphaned_jobs() - self.env.cr.execute(query) - uuids_requeued = self.env.cr.fetchall() self.assertTrue(queue_job.uuid in j[0] for j in uuids_requeued) From 227a5e7b455f62b228722fd190876a2548a12c1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 4 Jan 2026 13:38:48 +0100 Subject: [PATCH 13/14] [IMP] queue_job: take weaker locks Since we are not going to delete records nor modify foreign keys, we can take a weaker lock. --- queue_job/controllers/main.py | 2 +- queue_job/job.py | 2 +- queue_job/jobrunner/runner.py | 2 +- test_queue_job/tests/test_requeue_dead_job.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 05ab92fca4..adc450d52d 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -40,7 +40,7 @@ def _acquire_job(cls, env: api.Environment, job_uuid: str) -> Job | None: """ env.cr.execute( "SELECT uuid FROM queue_job WHERE uuid=%s AND state=%s " - "FOR UPDATE SKIP LOCKED", + "FOR NO KEY UPDATE SKIP LOCKED", (job_uuid, ENQUEUED), ) if not env.cr.fetchone(): diff --git a/queue_job/job.py b/queue_job/job.py index 755d37ab38..86407be3bb 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -278,7 +278,7 @@ def lock(self) -> bool: uuid = %s AND state = %s ) - FOR UPDATE SKIP LOCKED; + FOR NO KEY UPDATE SKIP LOCKED; """, [self.uuid, STARTED], ) diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py index 396d23bf69..586c251128 100644 --- a/queue_job/jobrunner/runner.py +++ b/queue_job/jobrunner/runner.py @@ -371,7 +371,7 @@ def _query_requeue_dead_jobs(self): queue_job_lock WHERE queue_job_lock.queue_job_id = queue_job.id - FOR UPDATE SKIP LOCKED + FOR NO KEY UPDATE SKIP LOCKED ) OR NOT EXISTS ( SELECT diff --git a/test_queue_job/tests/test_requeue_dead_job.py b/test_queue_job/tests/test_requeue_dead_job.py index 510276be63..a267c43c87 100644 --- a/test_queue_job/tests/test_requeue_dead_job.py +++ b/test_queue_job/tests/test_requeue_dead_job.py @@ -35,7 +35,7 @@ def get_locks(self, uuid, cr=None): WHERE uuid = %s ) - FOR UPDATE SKIP LOCKED + FOR NO KEY UPDATE SKIP LOCKED """, [uuid], ) From c25e2d1685cfd424bcefe79f6076122de8545e21 Mon Sep 17 00:00:00 2001 From: Tom Date: Thu, 8 Jan 2026 15:57:09 +0100 Subject: [PATCH 14/14] [FIX] test_queue_job: also add an _unregister_hook function that reverts patches Otherwise monkey-patches could be added twice when instantiating new test classes. --- test_queue_job/models/test_models.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index 2e85be8057..585e2fd593 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -141,6 +141,13 @@ def _register_hook(self): ) return super()._register_hook() + def _unregister_hook(self): + """Remove the patches installed by _register_hook()""" + self._revert_method("delay_me") + self._revert_method("delay_me_options") + self._revert_method("delay_me_context_key") + return super()._unregister_hook() + def _job_store_values(self, job): value = "JUST_TESTING" if job.state == "failed":