-
-
Notifications
You must be signed in to change notification settings - Fork 562
Expand file tree
/
Copy pathmain.py
More file actions
352 lines (312 loc) · 11.6 KB
/
main.py
File metadata and controls
352 lines (312 loc) · 11.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
# Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2013-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
import random
import time
import traceback
from io import StringIO
from psycopg2 import OperationalError, errorcodes
from werkzeug.exceptions import BadRequest, Forbidden
from odoo import SUPERUSER_ID, _, api, http
from odoo.modules.registry import Registry
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
from ..delay import chain, group
from ..exception import FailedJobError, RetryableJobError
from ..job import ENQUEUED, Job
_logger = logging.getLogger(__name__)
PG_RETRY = 5 # seconds
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
class RunJobController(http.Controller):
@classmethod
def _acquire_job(
cls, env: api.Environment, job_uuid: str | None = None
) -> Job | None:
"""Acquire a job for execution.
- make sure it is in ENQUEUED state
- mark it as STARTED and commit the state change
- acquire the job lock
If successful, return the Job instance, otherwise return None. This
function may fail to acquire the job is not in the expected state or is
already locked by another worker.
If no job_uuid is given, acquire any available job in ENQUEUED state.
"""
if job_uuid:
env.cr.execute(
"SELECT uuid FROM queue_job WHERE uuid=%s AND state=%s "
"FOR UPDATE SKIP LOCKED",
(job_uuid, ENQUEUED),
)
else:
env.cr.execute(
"SELECT uuid FROM queue_job WHERE state=%s LIMIT 1 "
"FOR UPDATE SKIP LOCKED",
(ENQUEUED,),
)
job_row = env.cr.fetchone()
if not job_row:
_logger.debug("no job to run")
return None
job = Job.load(env, job_uuid=job_row[0])
assert job and job.state == ENQUEUED
job.set_started()
job.store()
env.cr.commit()
if not job.lock():
_logger.debug("could not acquire lock for job %s", job.uuid)
return None
return job
@classmethod
def _try_perform_job(cls, env, job):
"""Try to perform the job, mark it done and commit if successful."""
_logger.debug("%s started", job)
job.perform()
# Triggers any stored computed fields before calling 'set_done'
# so that will be part of the 'exec_time'
env.flush_all()
job.set_done()
job.store()
env.flush_all()
env.cr.commit()
_logger.debug("%s done", job)
@classmethod
def _enqueue_dependent_jobs(cls, env, job):
tries = 0
while True:
try:
with job.env.cr.savepoint():
job.enqueue_waiting()
except OperationalError as err:
# Automatically retry the typical transaction serialization
# errors
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE:
_logger.error(
"%s, maximum number of tries reached to update dependencies",
errorcodes.lookup(err.pgcode),
)
raise
wait_time = random.uniform(0.0, 2**tries)
tries += 1
_logger.info(
"%s, retry %d/%d in %.04f sec...",
errorcodes.lookup(err.pgcode),
tries,
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE,
wait_time,
)
time.sleep(wait_time)
else:
break
@classmethod
def _runjob(cls, env: api.Environment, job: Job) -> None:
def retry_postpone(job, message, seconds=None):
job.env.clear()
with Registry(job.env.cr.dbname).cursor() as new_cr:
job.env = api.Environment(new_cr, SUPERUSER_ID, {})
job.postpone(result=message, seconds=seconds)
job.set_pending(reset_retry=False)
job.store()
try:
try:
cls._try_perform_job(env, job)
except OperationalError as err:
# Automatically retry the typical transaction serialization
# errors
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
_logger.debug("%s OperationalError, postponed", job)
raise RetryableJobError(err.pgerror, seconds=PG_RETRY) from err
except RetryableJobError as err:
# delay the job later, requeue
retry_postpone(job, str(err), seconds=err.seconds)
_logger.debug("%s postponed", job)
# Do not trigger the error up because we don't want an exception
# traceback in the logs we should have the traceback when all
# retries are exhausted
env.cr.rollback()
except (FailedJobError, Exception) as orig_exception:
buff = StringIO()
traceback.print_exc(file=buff)
traceback_txt = buff.getvalue()
_logger.error(traceback_txt)
job.env.clear()
with Registry(job.env.cr.dbname).cursor() as new_cr:
job.env = job.env(cr=new_cr)
vals = cls._get_failure_values(job, traceback_txt, orig_exception)
job.set_failed(**vals)
job.store()
buff.close()
raise
_logger.debug("%s enqueue depends started", job)
cls._enqueue_dependent_jobs(env, job)
env.cr.commit()
_logger.debug("%s enqueue depends done", job)
@classmethod
def _get_failure_values(cls, job, traceback_txt, orig_exception):
"""Collect relevant data from exception."""
exception_name = orig_exception.__class__.__name__
if hasattr(orig_exception, "__module__"):
exception_name = orig_exception.__module__ + "." + exception_name
exc_message = (
orig_exception.args[0] if orig_exception.args else str(orig_exception)
)
return {
"exc_info": traceback_txt,
"exc_name": exception_name,
"exc_message": exc_message,
}
@http.route(
"/queue_job/runjob",
type="http",
auth="none",
save_session=False,
readonly=False,
)
def runjob(self, db: str, job_uuid: str | None, **kw):
http.request.session.db = db
env = http.request.env(user=SUPERUSER_ID)
run_as = env["ir.config_parameter"].get_param("queue_job.run_as")
if run_as == "cron":
crons = env["ir.cron"].search(
env["queue.job.executor"]._executor_cron_domain()
)
assert crons, "No queue_job executor cron found"
for cron in crons:
# TODO Awaking all of them is a bit wasteful although not very
# costly. Ideally we should awaken only one that is not already
# running.
cron._trigger()
else:
# Run in this http worker
job = self._acquire_job(env, job_uuid)
if not job:
return ""
self._runjob(env, job)
return ""
# flake8: noqa: C901
@http.route("/queue_job/create_test_job", type="http", auth="user")
def create_test_job(
self,
priority=None,
max_retries=None,
channel=None,
description="Test job",
size=1,
failure_rate=0,
job_duration=0,
):
if not http.request.env.user.has_group("base.group_erp_manager"):
raise Forbidden(_("Access Denied"))
if failure_rate is not None:
try:
failure_rate = float(failure_rate)
except (ValueError, TypeError):
failure_rate = 0
if job_duration is not None:
try:
job_duration = float(job_duration)
except (ValueError, TypeError):
job_duration = 0
if not (0 <= failure_rate <= 1):
raise BadRequest("failure_rate must be between 0 and 1")
if size is not None:
try:
size = int(size)
except (ValueError, TypeError):
size = 1
if priority is not None:
try:
priority = int(priority)
except ValueError:
priority = None
if max_retries is not None:
try:
max_retries = int(max_retries)
except ValueError:
max_retries = None
if size == 1:
return self._create_single_test_job(
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
failure_rate=failure_rate,
job_duration=job_duration,
)
if size > 1:
return self._create_graph_test_jobs(
size,
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
failure_rate=failure_rate,
job_duration=job_duration,
)
return ""
def _create_single_test_job(
self,
priority=None,
max_retries=None,
channel=None,
description="Test job",
size=1,
failure_rate=0,
job_duration=0,
):
delayed = (
http.request.env["queue.job"]
.with_delay(
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
)
._test_job(failure_rate=failure_rate, job_duration=job_duration)
)
return f"job uuid: {delayed.db_record().uuid}"
TEST_GRAPH_MAX_PER_GROUP = 5
def _create_graph_test_jobs(
self,
size,
priority=None,
max_retries=None,
channel=None,
description="Test job",
failure_rate=0,
job_duration=0,
):
model = http.request.env["queue.job"]
current_count = 0
possible_grouping_methods = (chain, group)
tails = [] # we can connect new graph chains/groups to tails
root_delayable = None
while current_count < size:
jobs_count = min(
size - current_count, random.randint(1, self.TEST_GRAPH_MAX_PER_GROUP)
)
jobs = []
for __ in range(jobs_count):
current_count += 1
jobs.append(
model.delayable(
priority=priority,
max_retries=max_retries,
channel=channel,
description="%s #%d" % (description, current_count),
)._test_job(failure_rate=failure_rate, job_duration=job_duration)
)
grouping = random.choice(possible_grouping_methods)
delayable = grouping(*jobs)
if not root_delayable:
root_delayable = delayable
else:
tail_delayable = random.choice(tails)
tail_delayable.on_done(delayable)
tails.append(delayable)
root_delayable.delay()
return (
f"graph uuid: {list(root_delayable._head())[0]._generated_job.graph_uuid}"
)