Merge "Change 'editfont' default preference to 'monospace'"
[lhc/web/wiklou.git] / includes / jobqueue / JobQueueDB.php
1 <?php
2 /**
3 * Database-backed job queue code.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 */
22 use Wikimedia\Rdbms\IDatabase;
23 use Wikimedia\Rdbms\DBConnRef;
24 use Wikimedia\Rdbms\DBConnectionError;
25 use Wikimedia\Rdbms\DBError;
26 use MediaWiki\MediaWikiServices;
27 use Wikimedia\ScopedCallback;
28
29 /**
30 * Class to handle job queues stored in the DB
31 *
32 * @ingroup JobQueue
33 * @since 1.21
34 */
35 class JobQueueDB extends JobQueue {
36 const CACHE_TTL_SHORT = 30; // integer; seconds to cache info without re-validating
37 const MAX_AGE_PRUNE = 604800; // integer; seconds a job can live once claimed
38 const MAX_JOB_RANDOM = 2147483647; // integer; 2^31 - 1, used for job_random
39 const MAX_OFFSET = 255; // integer; maximum number of rows to skip
40
41 /** @var WANObjectCache */
42 protected $cache;
43
44 /** @var bool|string Name of an external DB cluster. False if not set */
45 protected $cluster = false;
46
47 /**
48 * Additional parameters include:
49 * - cluster : The name of an external cluster registered via LBFactory.
50 * If not specified, the primary DB cluster for the wiki will be used.
51 * This can be overridden with a custom cluster so that DB handles will
52 * be retrieved via LBFactory::getExternalLB() and getConnection().
53 * @param array $params
54 */
55 protected function __construct( array $params ) {
56 parent::__construct( $params );
57
58 $this->cluster = isset( $params['cluster'] ) ? $params['cluster'] : false;
59 $this->cache = ObjectCache::getMainWANInstance();
60 }
61
62 protected function supportedOrders() {
63 return [ 'random', 'timestamp', 'fifo' ];
64 }
65
66 protected function optimalOrder() {
67 return 'random';
68 }
69
70 /**
71 * @see JobQueue::doIsEmpty()
72 * @return bool
73 */
74 protected function doIsEmpty() {
75 $dbr = $this->getReplicaDB();
76 try {
77 $found = $dbr->selectField( // unclaimed job
78 'job', '1', [ 'job_cmd' => $this->type, 'job_token' => '' ], __METHOD__
79 );
80 } catch ( DBError $e ) {
81 $this->throwDBException( $e );
82 }
83
84 return !$found;
85 }
86
87 /**
88 * @see JobQueue::doGetSize()
89 * @return int
90 */
91 protected function doGetSize() {
92 $key = $this->getCacheKey( 'size' );
93
94 $size = $this->cache->get( $key );
95 if ( is_int( $size ) ) {
96 return $size;
97 }
98
99 try {
100 $dbr = $this->getReplicaDB();
101 $size = (int)$dbr->selectField( 'job', 'COUNT(*)',
102 [ 'job_cmd' => $this->type, 'job_token' => '' ],
103 __METHOD__
104 );
105 } catch ( DBError $e ) {
106 $this->throwDBException( $e );
107 }
108 $this->cache->set( $key, $size, self::CACHE_TTL_SHORT );
109
110 return $size;
111 }
112
113 /**
114 * @see JobQueue::doGetAcquiredCount()
115 * @return int
116 */
117 protected function doGetAcquiredCount() {
118 if ( $this->claimTTL <= 0 ) {
119 return 0; // no acknowledgements
120 }
121
122 $key = $this->getCacheKey( 'acquiredcount' );
123
124 $count = $this->cache->get( $key );
125 if ( is_int( $count ) ) {
126 return $count;
127 }
128
129 $dbr = $this->getReplicaDB();
130 try {
131 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
132 [ 'job_cmd' => $this->type, "job_token != {$dbr->addQuotes( '' )}" ],
133 __METHOD__
134 );
135 } catch ( DBError $e ) {
136 $this->throwDBException( $e );
137 }
138 $this->cache->set( $key, $count, self::CACHE_TTL_SHORT );
139
140 return $count;
141 }
142
143 /**
144 * @see JobQueue::doGetAbandonedCount()
145 * @return int
146 * @throws MWException
147 */
148 protected function doGetAbandonedCount() {
149 if ( $this->claimTTL <= 0 ) {
150 return 0; // no acknowledgements
151 }
152
153 $key = $this->getCacheKey( 'abandonedcount' );
154
155 $count = $this->cache->get( $key );
156 if ( is_int( $count ) ) {
157 return $count;
158 }
159
160 $dbr = $this->getReplicaDB();
161 try {
162 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
163 [
164 'job_cmd' => $this->type,
165 "job_token != {$dbr->addQuotes( '' )}",
166 "job_attempts >= " . $dbr->addQuotes( $this->maxTries )
167 ],
168 __METHOD__
169 );
170 } catch ( DBError $e ) {
171 $this->throwDBException( $e );
172 }
173
174 $this->cache->set( $key, $count, self::CACHE_TTL_SHORT );
175
176 return $count;
177 }
178
179 /**
180 * @see JobQueue::doBatchPush()
181 * @param IJobSpecification[] $jobs
182 * @param int $flags
183 * @throws DBError|Exception
184 * @return void
185 */
186 protected function doBatchPush( array $jobs, $flags ) {
187 $dbw = $this->getMasterDB();
188 // In general, there will be two cases here:
189 // a) sqlite; DB connection is probably a regular round-aware handle.
190 // If the connection is busy with a transaction, then defer the job writes
191 // until right before the main round commit step. Any errors that bubble
192 // up will rollback the main commit round.
193 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTO handle.
194 // No transaction is active nor will be started by writes, so enqueue the jobs
195 // now so that any errors will show up immediately as the interface expects. Any
196 // errors that bubble up will rollback the main commit round.
197 $fname = __METHOD__;
198 $dbw->onTransactionPreCommitOrIdle(
199 function () use ( $dbw, $jobs, $flags, $fname ) {
200 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
201 },
202 $fname
203 );
204 }
205
206 /**
207 * This function should *not* be called outside of JobQueueDB
208 *
209 * @param IDatabase $dbw
210 * @param IJobSpecification[] $jobs
211 * @param int $flags
212 * @param string $method
213 * @throws DBError
214 * @return void
215 */
216 public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
217 if ( !count( $jobs ) ) {
218 return;
219 }
220
221 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
222 $rowList = []; // list of jobs for jobs that are not de-duplicated
223 foreach ( $jobs as $job ) {
224 $row = $this->insertFields( $job );
225 if ( $job->ignoreDuplicates() ) {
226 $rowSet[$row['job_sha1']] = $row;
227 } else {
228 $rowList[] = $row;
229 }
230 }
231
232 if ( $flags & self::QOS_ATOMIC ) {
233 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
234 }
235 try {
236 // Strip out any duplicate jobs that are already in the queue...
237 if ( count( $rowSet ) ) {
238 $res = $dbw->select( 'job', 'job_sha1',
239 [
240 // No job_type condition since it's part of the job_sha1 hash
241 'job_sha1' => array_keys( $rowSet ),
242 'job_token' => '' // unclaimed
243 ],
244 $method
245 );
246 foreach ( $res as $row ) {
247 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate.\n" );
248 unset( $rowSet[$row->job_sha1] ); // already enqueued
249 }
250 }
251 // Build the full list of job rows to insert
252 $rows = array_merge( $rowList, array_values( $rowSet ) );
253 // Insert the job rows in chunks to avoid replica DB lag...
254 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
255 $dbw->insert( 'job', $rowBatch, $method );
256 }
257 JobQueue::incrStats( 'inserts', $this->type, count( $rows ) );
258 JobQueue::incrStats( 'dupe_inserts', $this->type,
259 count( $rowSet ) + count( $rowList ) - count( $rows )
260 );
261 } catch ( DBError $e ) {
262 $this->throwDBException( $e );
263 }
264 if ( $flags & self::QOS_ATOMIC ) {
265 $dbw->endAtomic( $method );
266 }
267
268 return;
269 }
270
271 /**
272 * @see JobQueue::doPop()
273 * @return Job|bool
274 */
275 protected function doPop() {
276 $dbw = $this->getMasterDB();
277 try {
278 $autoTrx = $dbw->getFlag( DBO_TRX ); // get current setting
279 $dbw->clearFlag( DBO_TRX ); // make each query its own transaction
280 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
281 $dbw->setFlag( $autoTrx ? DBO_TRX : 0 ); // restore old setting
282 } );
283
284 $uuid = wfRandomString( 32 ); // pop attempt
285 $job = false; // job popped off
286 do { // retry when our row is invalid or deleted as a duplicate
287 // Try to reserve a row in the DB...
288 if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
289 $row = $this->claimOldest( $uuid );
290 } else { // random first
291 $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
292 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
293 $row = $this->claimRandom( $uuid, $rand, $gte );
294 }
295 // Check if we found a row to reserve...
296 if ( !$row ) {
297 break; // nothing to do
298 }
299 JobQueue::incrStats( 'pops', $this->type );
300 // Get the job object from the row...
301 $title = Title::makeTitle( $row->job_namespace, $row->job_title );
302 $job = Job::factory( $row->job_cmd, $title,
303 self::extractBlob( $row->job_params ), $row->job_id );
304 $job->metadata['id'] = $row->job_id;
305 $job->metadata['timestamp'] = $row->job_timestamp;
306 break; // done
307 } while ( true );
308
309 if ( !$job || mt_rand( 0, 9 ) == 0 ) {
310 // Handled jobs that need to be recycled/deleted;
311 // any recycled jobs will be picked up next attempt
312 $this->recycleAndDeleteStaleJobs();
313 }
314 } catch ( DBError $e ) {
315 $this->throwDBException( $e );
316 }
317
318 return $job;
319 }
320
321 /**
322 * Reserve a row with a single UPDATE without holding row locks over RTTs...
323 *
324 * @param string $uuid 32 char hex string
325 * @param int $rand Random unsigned integer (31 bits)
326 * @param bool $gte Search for job_random >= $random (otherwise job_random <= $random)
327 * @return stdClass|bool Row|false
328 */
329 protected function claimRandom( $uuid, $rand, $gte ) {
330 $dbw = $this->getMasterDB();
331 // Check cache to see if the queue has <= OFFSET items
332 $tinyQueue = $this->cache->get( $this->getCacheKey( 'small' ) );
333
334 $row = false; // the row acquired
335 $invertedDirection = false; // whether one job_random direction was already scanned
336 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
337 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
338 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
339 // be used here with MySQL.
340 do {
341 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
342 // For small queues, using OFFSET will overshoot and return no rows more often.
343 // Instead, this uses job_random to pick a row (possibly checking both directions).
344 $ineq = $gte ? '>=' : '<=';
345 $dir = $gte ? 'ASC' : 'DESC';
346 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
347 [
348 'job_cmd' => $this->type,
349 'job_token' => '', // unclaimed
350 "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
351 __METHOD__,
352 [ 'ORDER BY' => "job_random {$dir}" ]
353 );
354 if ( !$row && !$invertedDirection ) {
355 $gte = !$gte;
356 $invertedDirection = true;
357 continue; // try the other direction
358 }
359 } else { // table *may* have >= MAX_OFFSET rows
360 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
361 // in MySQL if there are many rows for some reason. This uses a small OFFSET
362 // instead of job_random for reducing excess claim retries.
363 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
364 [
365 'job_cmd' => $this->type,
366 'job_token' => '', // unclaimed
367 ],
368 __METHOD__,
369 [ 'OFFSET' => mt_rand( 0, self::MAX_OFFSET ) ]
370 );
371 if ( !$row ) {
372 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
373 $this->cache->set( $this->getCacheKey( 'small' ), 1, 30 );
374 continue; // use job_random
375 }
376 }
377
378 if ( $row ) { // claim the job
379 $dbw->update( 'job', // update by PK
380 [
381 'job_token' => $uuid,
382 'job_token_timestamp' => $dbw->timestamp(),
383 'job_attempts = job_attempts+1' ],
384 [ 'job_cmd' => $this->type, 'job_id' => $row->job_id, 'job_token' => '' ],
385 __METHOD__
386 );
387 // This might get raced out by another runner when claiming the previously
388 // selected row. The use of job_random should minimize this problem, however.
389 if ( !$dbw->affectedRows() ) {
390 $row = false; // raced out
391 }
392 } else {
393 break; // nothing to do
394 }
395 } while ( !$row );
396
397 return $row;
398 }
399
400 /**
401 * Reserve a row with a single UPDATE without holding row locks over RTTs...
402 *
403 * @param string $uuid 32 char hex string
404 * @return stdClass|bool Row|false
405 */
406 protected function claimOldest( $uuid ) {
407 $dbw = $this->getMasterDB();
408
409 $row = false; // the row acquired
410 do {
411 if ( $dbw->getType() === 'mysql' ) {
412 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
413 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
414 // Oracle and Postgre have no such limitation. However, MySQL offers an
415 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
416 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
417 "SET " .
418 "job_token = {$dbw->addQuotes( $uuid ) }, " .
419 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
420 "job_attempts = job_attempts+1 " .
421 "WHERE ( " .
422 "job_cmd = {$dbw->addQuotes( $this->type )} " .
423 "AND job_token = {$dbw->addQuotes( '' )} " .
424 ") ORDER BY job_id ASC LIMIT 1",
425 __METHOD__
426 );
427 } else {
428 // Use a subquery to find the job, within an UPDATE to claim it.
429 // This uses as much of the DB wrapper functions as possible.
430 $dbw->update( 'job',
431 [
432 'job_token' => $uuid,
433 'job_token_timestamp' => $dbw->timestamp(),
434 'job_attempts = job_attempts+1' ],
435 [ 'job_id = (' .
436 $dbw->selectSQLText( 'job', 'job_id',
437 [ 'job_cmd' => $this->type, 'job_token' => '' ],
438 __METHOD__,
439 [ 'ORDER BY' => 'job_id ASC', 'LIMIT' => 1 ] ) .
440 ')'
441 ],
442 __METHOD__
443 );
444 }
445 // Fetch any row that we just reserved...
446 if ( $dbw->affectedRows() ) {
447 $row = $dbw->selectRow( 'job', self::selectFields(),
448 [ 'job_cmd' => $this->type, 'job_token' => $uuid ], __METHOD__
449 );
450 if ( !$row ) { // raced out by duplicate job removal
451 wfDebug( "Row deleted as duplicate by another process.\n" );
452 }
453 } else {
454 break; // nothing to do
455 }
456 } while ( !$row );
457
458 return $row;
459 }
460
461 /**
462 * @see JobQueue::doAck()
463 * @param Job $job
464 * @throws MWException
465 */
466 protected function doAck( Job $job ) {
467 if ( !isset( $job->metadata['id'] ) ) {
468 throw new MWException( "Job of type '{$job->getType()}' has no ID." );
469 }
470
471 $dbw = $this->getMasterDB();
472 try {
473 $autoTrx = $dbw->getFlag( DBO_TRX ); // get current setting
474 $dbw->clearFlag( DBO_TRX ); // make each query its own transaction
475 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
476 $dbw->setFlag( $autoTrx ? DBO_TRX : 0 ); // restore old setting
477 } );
478
479 // Delete a row with a single DELETE without holding row locks over RTTs...
480 $dbw->delete( 'job',
481 [ 'job_cmd' => $this->type, 'job_id' => $job->metadata['id'] ], __METHOD__ );
482
483 JobQueue::incrStats( 'acks', $this->type );
484 } catch ( DBError $e ) {
485 $this->throwDBException( $e );
486 }
487 }
488
489 /**
490 * @see JobQueue::doDeduplicateRootJob()
491 * @param IJobSpecification $job
492 * @throws MWException
493 * @return bool
494 */
495 protected function doDeduplicateRootJob( IJobSpecification $job ) {
496 $params = $job->getParams();
497 if ( !isset( $params['rootJobSignature'] ) ) {
498 throw new MWException( "Cannot register root job; missing 'rootJobSignature'." );
499 } elseif ( !isset( $params['rootJobTimestamp'] ) ) {
500 throw new MWException( "Cannot register root job; missing 'rootJobTimestamp'." );
501 }
502 $key = $this->getRootJobCacheKey( $params['rootJobSignature'] );
503 // Callers should call batchInsert() and then this function so that if the insert
504 // fails, the de-duplication registration will be aborted. Since the insert is
505 // deferred till "transaction idle", do the same here, so that the ordering is
506 // maintained. Having only the de-duplication registration succeed would cause
507 // jobs to become no-ops without any actual jobs that made them redundant.
508 $dbw = $this->getMasterDB();
509 $cache = $this->dupCache;
510 $dbw->onTransactionIdle(
511 function () use ( $cache, $params, $key, $dbw ) {
512 $timestamp = $cache->get( $key ); // current last timestamp of this job
513 if ( $timestamp && $timestamp >= $params['rootJobTimestamp'] ) {
514 return true; // a newer version of this root job was enqueued
515 }
516
517 // Update the timestamp of the last root job started at the location...
518 return $cache->set( $key, $params['rootJobTimestamp'], JobQueueDB::ROOTJOB_TTL );
519 },
520 __METHOD__
521 );
522
523 return true;
524 }
525
526 /**
527 * @see JobQueue::doDelete()
528 * @return bool
529 */
530 protected function doDelete() {
531 $dbw = $this->getMasterDB();
532 try {
533 $dbw->delete( 'job', [ 'job_cmd' => $this->type ] );
534 } catch ( DBError $e ) {
535 $this->throwDBException( $e );
536 }
537
538 return true;
539 }
540
541 /**
542 * @see JobQueue::doWaitForBackups()
543 * @return void
544 */
545 protected function doWaitForBackups() {
546 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
547 $lbFactory->waitForReplication( [ 'wiki' => $this->wiki, 'cluster' => $this->cluster ] );
548 }
549
550 /**
551 * @return void
552 */
553 protected function doFlushCaches() {
554 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
555 $this->cache->delete( $this->getCacheKey( $type ) );
556 }
557 }
558
559 /**
560 * @see JobQueue::getAllQueuedJobs()
561 * @return Iterator
562 */
563 public function getAllQueuedJobs() {
564 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
565 }
566
567 /**
568 * @see JobQueue::getAllAcquiredJobs()
569 * @return Iterator
570 */
571 public function getAllAcquiredJobs() {
572 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), "job_token > ''" ] );
573 }
574
575 /**
576 * @param array $conds Query conditions
577 * @return Iterator
578 */
579 protected function getJobIterator( array $conds ) {
580 $dbr = $this->getReplicaDB();
581 try {
582 return new MappedIterator(
583 $dbr->select( 'job', self::selectFields(), $conds ),
584 function ( $row ) {
585 $job = Job::factory(
586 $row->job_cmd,
587 Title::makeTitle( $row->job_namespace, $row->job_title ),
588 strlen( $row->job_params ) ? unserialize( $row->job_params ) : []
589 );
590 $job->metadata['id'] = $row->job_id;
591 $job->metadata['timestamp'] = $row->job_timestamp;
592
593 return $job;
594 }
595 );
596 } catch ( DBError $e ) {
597 $this->throwDBException( $e );
598 }
599 }
600
601 public function getCoalesceLocationInternal() {
602 return $this->cluster
603 ? "DBCluster:{$this->cluster}:{$this->wiki}"
604 : "LBFactory:{$this->wiki}";
605 }
606
607 protected function doGetSiblingQueuesWithJobs( array $types ) {
608 $dbr = $this->getReplicaDB();
609 // @note: this does not check whether the jobs are claimed or not.
610 // This is useful so JobQueueGroup::pop() also sees queues that only
611 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
612 // failed jobs so that they can be popped again for that edge case.
613 $res = $dbr->select( 'job', 'DISTINCT job_cmd',
614 [ 'job_cmd' => $types ], __METHOD__ );
615
616 $types = [];
617 foreach ( $res as $row ) {
618 $types[] = $row->job_cmd;
619 }
620
621 return $types;
622 }
623
624 protected function doGetSiblingQueueSizes( array $types ) {
625 $dbr = $this->getReplicaDB();
626 $res = $dbr->select( 'job', [ 'job_cmd', 'COUNT(*) AS count' ],
627 [ 'job_cmd' => $types ], __METHOD__, [ 'GROUP BY' => 'job_cmd' ] );
628
629 $sizes = [];
630 foreach ( $res as $row ) {
631 $sizes[$row->job_cmd] = (int)$row->count;
632 }
633
634 return $sizes;
635 }
636
637 /**
638 * Recycle or destroy any jobs that have been claimed for too long
639 *
640 * @return int Number of jobs recycled/deleted
641 */
642 public function recycleAndDeleteStaleJobs() {
643 $now = time();
644 $count = 0; // affected rows
645 $dbw = $this->getMasterDB();
646
647 try {
648 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
649 return $count; // already in progress
650 }
651
652 // Remove claims on jobs acquired for too long if enabled...
653 if ( $this->claimTTL > 0 ) {
654 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
655 // Get the IDs of jobs that have be claimed but not finished after too long.
656 // These jobs can be recycled into the queue by expiring the claim. Selecting
657 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
658 $res = $dbw->select( 'job', 'job_id',
659 [
660 'job_cmd' => $this->type,
661 "job_token != {$dbw->addQuotes( '' )}", // was acquired
662 "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}", // stale
663 "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ], // retries left
664 __METHOD__
665 );
666 $ids = array_map(
667 function ( $o ) {
668 return $o->job_id;
669 }, iterator_to_array( $res )
670 );
671 if ( count( $ids ) ) {
672 // Reset job_token for these jobs so that other runners will pick them up.
673 // Set the timestamp to the current time, as it is useful to now that the job
674 // was already tried before (the timestamp becomes the "released" time).
675 $dbw->update( 'job',
676 [
677 'job_token' => '',
678 'job_token_timestamp' => $dbw->timestamp( $now ) ], // time of release
679 [
680 'job_id' => $ids ],
681 __METHOD__
682 );
683 $affected = $dbw->affectedRows();
684 $count += $affected;
685 JobQueue::incrStats( 'recycles', $this->type, $affected );
686 $this->aggr->notifyQueueNonEmpty( $this->wiki, $this->type );
687 }
688 }
689
690 // Just destroy any stale jobs...
691 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
692 $conds = [
693 'job_cmd' => $this->type,
694 "job_token != {$dbw->addQuotes( '' )}", // was acquired
695 "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}" // stale
696 ];
697 if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
698 $conds[] = "job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
699 }
700 // Get the IDs of jobs that are considered stale and should be removed. Selecting
701 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
702 $res = $dbw->select( 'job', 'job_id', $conds, __METHOD__ );
703 $ids = array_map(
704 function ( $o ) {
705 return $o->job_id;
706 }, iterator_to_array( $res )
707 );
708 if ( count( $ids ) ) {
709 $dbw->delete( 'job', [ 'job_id' => $ids ], __METHOD__ );
710 $affected = $dbw->affectedRows();
711 $count += $affected;
712 JobQueue::incrStats( 'abandons', $this->type, $affected );
713 }
714
715 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
716 } catch ( DBError $e ) {
717 $this->throwDBException( $e );
718 }
719
720 return $count;
721 }
722
723 /**
724 * @param IJobSpecification $job
725 * @return array
726 */
727 protected function insertFields( IJobSpecification $job ) {
728 $dbw = $this->getMasterDB();
729
730 return [
731 // Fields that describe the nature of the job
732 'job_cmd' => $job->getType(),
733 'job_namespace' => $job->getTitle()->getNamespace(),
734 'job_title' => $job->getTitle()->getDBkey(),
735 'job_params' => self::makeBlob( $job->getParams() ),
736 // Additional job metadata
737 'job_id' => $dbw->nextSequenceValue( 'job_job_id_seq' ),
738 'job_timestamp' => $dbw->timestamp(),
739 'job_sha1' => Wikimedia\base_convert(
740 sha1( serialize( $job->getDeduplicationInfo() ) ),
741 16, 36, 31
742 ),
743 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
744 ];
745 }
746
747 /**
748 * @throws JobQueueConnectionError
749 * @return DBConnRef
750 */
751 protected function getReplicaDB() {
752 try {
753 return $this->getDB( DB_REPLICA );
754 } catch ( DBConnectionError $e ) {
755 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
756 }
757 }
758
759 /**
760 * @throws JobQueueConnectionError
761 * @return DBConnRef
762 */
763 protected function getMasterDB() {
764 try {
765 return $this->getDB( DB_MASTER );
766 } catch ( DBConnectionError $e ) {
767 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
768 }
769 }
770
771 /**
772 * @param int $index (DB_REPLICA/DB_MASTER)
773 * @return DBConnRef
774 */
775 protected function getDB( $index ) {
776 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
777 $lb = ( $this->cluster !== false )
778 ? $lbFactory->getExternalLB( $this->cluster )
779 : $lbFactory->getMainLB( $this->wiki );
780
781 return ( $lb->getServerType( $lb->getWriterIndex() ) !== 'sqlite' )
782 // Keep a separate connection to avoid contention and deadlocks;
783 // However, SQLite has the opposite behavior due to DB-level locking.
784 ? $lb->getConnectionRef( $index, [], $this->wiki, $lb::CONN_TRX_AUTO )
785 // Jobs insertion will be defered until the PRESEND stage to reduce contention.
786 : $lb->getConnectionRef( $index, [], $this->wiki );
787 }
788
789 /**
790 * @param string $property
791 * @return string
792 */
793 private function getCacheKey( $property ) {
794 list( $db, $prefix ) = wfSplitWikiID( $this->wiki );
795 $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
796
797 return wfForeignMemcKey( $db, $prefix, 'jobqueue', $cluster, $this->type, $property );
798 }
799
800 /**
801 * @param array|bool $params
802 * @return string
803 */
804 protected static function makeBlob( $params ) {
805 if ( $params !== false ) {
806 return serialize( $params );
807 } else {
808 return '';
809 }
810 }
811
812 /**
813 * @param string $blob
814 * @return bool|mixed
815 */
816 protected static function extractBlob( $blob ) {
817 if ( (string)$blob !== '' ) {
818 return unserialize( $blob );
819 } else {
820 return false;
821 }
822 }
823
824 /**
825 * @param DBError $e
826 * @throws JobQueueError
827 */
828 protected function throwDBException( DBError $e ) {
829 throw new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
830 }
831
832 /**
833 * Return the list of job fields that should be selected.
834 * @since 1.23
835 * @return array
836 */
837 public static function selectFields() {
838 return [
839 'job_id',
840 'job_cmd',
841 'job_namespace',
842 'job_title',
843 'job_timestamp',
844 'job_params',
845 'job_random',
846 'job_attempts',
847 'job_token',
848 'job_token_timestamp',
849 'job_sha1',
850 ];
851 }
852 }