Merge "Add skipping to nth page option/ability for dump importing process"
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
30 use Wikimedia\Rdbms\LBFactory;
31 use Wikimedia\Rdbms\DBError;
32 use Wikimedia\Rdbms\DBReplicationWaitError;
33
34 /**
35 * Job queue runner utility methods
36 *
37 * @ingroup JobQueue
38 * @since 1.24
39 */
40 class JobRunner implements LoggerAwareInterface {
41 /** @var callable|null Debug output handler */
42 protected $debug;
43
44 /**
45 * @var LoggerInterface $logger
46 */
47 protected $logger;
48
49 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
50 const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
51 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
52 const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
53
54 /**
55 * @param callable $debug Optional debug output handler
56 */
57 public function setDebugHandler( $debug ) {
58 $this->debug = $debug;
59 }
60
61 /**
62 * @param LoggerInterface $logger
63 * @return void
64 */
65 public function setLogger( LoggerInterface $logger ) {
66 $this->logger = $logger;
67 }
68
69 /**
70 * @param LoggerInterface $logger
71 */
72 public function __construct( LoggerInterface $logger = null ) {
73 if ( $logger === null ) {
74 $logger = LoggerFactory::getInstance( 'runJobs' );
75 }
76 $this->setLogger( $logger );
77 }
78
79 /**
80 * Run jobs of the specified number/type for the specified time
81 *
82 * The response map has a 'job' field that lists status of each job, including:
83 * - type : the job type
84 * - status : ok/failed
85 * - error : any error message string
86 * - time : the job run time in ms
87 * The response map also has:
88 * - backoffs : the (job type => seconds) map of backoff times
89 * - elapsed : the total time spent running tasks in ms
90 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
91 * memory-limit)
92 *
93 * This method outputs status information only if a debug handler was set.
94 * Any exceptions are caught and logged, but are not reported as output.
95 *
96 * @param array $options Map of parameters:
97 * - type : the job type (or false for the default types)
98 * - maxJobs : maximum number of jobs to run
99 * - maxTime : maximum time in seconds before stopping
100 * - throttle : whether to respect job backoff configuration
101 * @return array Summary response that can easily be JSON serialized
102 */
103 public function run( array $options ) {
104 global $wgJobClasses, $wgTrxProfilerLimits;
105
106 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
107
108 $type = isset( $options['type'] ) ? $options['type'] : false;
109 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
110 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
111 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
112
113 // Bail if job type is invalid
114 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
115 $response['reached'] = 'none-possible';
116 return $response;
117 }
118 // Bail out if DB is in read-only mode
119 if ( wfReadOnly() ) {
120 $response['reached'] = 'read-only';
121 return $response;
122 }
123
124 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
125 // Bail out if there is too much DB lag.
126 // This check should not block as we want to try other wiki queues.
127 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
128 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
129 $response['reached'] = 'replica-lag-limit';
130 return $response;
131 }
132
133 // Flush any pending DB writes for sanity
134 $lbFactory->commitAll( __METHOD__ );
135
136 // Catch huge single updates that lead to replica DB lag
137 $trxProfiler = Profiler::instance()->getTransactionProfiler();
138 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
139 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
140
141 // Some jobs types should not run until a certain timestamp
142 $backoffs = []; // map of (type => UNIX expiry)
143 $backoffDeltas = []; // map of (type => seconds)
144 $wait = 'wait'; // block to read backoffs the first time
145
146 $group = JobQueueGroup::singleton();
147 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
148 $jobsPopped = 0;
149 $timeMsTotal = 0;
150 $startTime = microtime( true ); // time since jobs started running
151 $lastCheckTime = 1; // timestamp of last replica DB check
152 do {
153 // Sync the persistent backoffs with concurrent runners
154 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
155 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
156 $wait = 'nowait'; // less important now
157
158 if ( $type === false ) {
159 $job = $group->pop(
160 JobQueueGroup::TYPE_DEFAULT,
161 JobQueueGroup::USE_CACHE,
162 $blacklist
163 );
164 } elseif ( in_array( $type, $blacklist ) ) {
165 $job = false; // requested queue in backoff state
166 } else {
167 $job = $group->pop( $type ); // job from a single queue
168 }
169 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
170
171 if ( $job ) { // found a job
172 ++$jobsPopped;
173 $popTime = time();
174 $jType = $job->getType();
175
176 WebRequest::overrideRequestId( $job->getRequestId() );
177
178 // Back off of certain jobs for a while (for throttling and for errors)
179 $ttw = $this->getBackoffTimeToWait( $job );
180 if ( $ttw > 0 ) {
181 // Always add the delta for other runners in case the time running the
182 // job negated the backoff for each individually but not collectively.
183 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
184 ? $backoffDeltas[$jType] + $ttw
185 : $ttw;
186 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
187 }
188
189 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
190 if ( $info['status'] !== false || !$job->allowRetries() ) {
191 $group->ack( $job ); // succeeded or job cannot be retried
192 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
193 }
194
195 // Back off of certain jobs for a while (for throttling and for errors)
196 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
197 $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
198 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
199 ? $backoffDeltas[$jType] + $ttw
200 : $ttw;
201 }
202
203 $response['jobs'][] = [
204 'type' => $jType,
205 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
206 'error' => $info['error'],
207 'time' => $info['timeMs']
208 ];
209 $timeMsTotal += $info['timeMs'];
210
211 // Break out if we hit the job count or wall time limits...
212 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
213 $response['reached'] = 'job-limit';
214 break;
215 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
216 $response['reached'] = 'time-limit';
217 break;
218 }
219
220 // Don't let any of the main DB replica DBs get backed up.
221 // This only waits for so long before exiting and letting
222 // other wikis in the farm (on different masters) get a chance.
223 $timePassed = microtime( true ) - $lastCheckTime;
224 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
225 try {
226 $lbFactory->waitForReplication( [
227 'ifWritesSince' => $lastCheckTime,
228 'timeout' => self::MAX_ALLOWED_LAG
229 ] );
230 } catch ( DBReplicationWaitError $e ) {
231 $response['reached'] = 'replica-lag-limit';
232 break;
233 }
234 $lastCheckTime = microtime( true );
235 }
236 // Don't let any queue replica DBs/backups fall behind
237 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
238 $group->waitForBackups();
239 }
240
241 // Bail if near-OOM instead of in a job
242 if ( !$this->checkMemoryOK() ) {
243 $response['reached'] = 'memory-limit';
244 break;
245 }
246 }
247 } while ( $job ); // stop when there are no jobs
248
249 // Sync the persistent backoffs for the next runJobs.php pass
250 if ( $backoffDeltas ) {
251 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
252 }
253
254 $response['backoffs'] = $backoffs;
255 $response['elapsed'] = $timeMsTotal;
256
257 return $response;
258 }
259
260 /**
261 * @param string $error
262 * @return int TTL in seconds
263 */
264 private function getErrorBackoffTTL( $error ) {
265 return strpos( $error, 'DBReadOnlyError' ) !== false
266 ? self::READONLY_BACKOFF_TTL
267 : self::ERROR_BACKOFF_TTL;
268 }
269
270 /**
271 * @param Job $job
272 * @param LBFactory $lbFactory
273 * @param StatsdDataFactory $stats
274 * @param float $popTime
275 * @return array Map of status/error/timeMs
276 */
277 private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
278 $jType = $job->getType();
279 $msg = $job->toString() . " STARTING";
280 $this->logger->debug( $msg );
281 $this->debugCallback( $msg );
282
283 // Run the job...
284 $rssStart = $this->getMaxRssKb();
285 $jobStartTime = microtime( true );
286 try {
287 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
288 $lbFactory->beginMasterChanges( $fnameTrxOwner );
289 $status = $job->run();
290 $error = $job->getLastError();
291 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
292 // Push lazilly-pushed jobs
293 // Important: this must be the last deferred update added (T100085, T154425)
294 DeferredUpdates::addCallableUpdate( [ 'JobQueueGroup', 'pushLazyJobs' ] );
295 // Run any deferred update tasks; doUpdates() manages transactions itself
296 DeferredUpdates::doUpdates();
297 } catch ( Exception $e ) {
298 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
299 $status = false;
300 $error = get_class( $e ) . ': ' . $e->getMessage();
301 }
302 // Always attempt to call teardown() even if Job throws exception.
303 try {
304 $job->teardown( $status );
305 } catch ( Exception $e ) {
306 MWExceptionHandler::logException( $e );
307 }
308
309 // Commit all outstanding connections that are in a transaction
310 // to get a fresh repeatable read snapshot on every connection.
311 // Note that jobs are still responsible for handling replica DB lag.
312 $lbFactory->flushReplicaSnapshots( __METHOD__ );
313 // Clear out title cache data from prior snapshots
314 MediaWikiServices::getInstance()->getLinkCache()->clear();
315 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
316 $rssEnd = $this->getMaxRssKb();
317
318 // Record how long jobs wait before getting popped
319 $readyTs = $job->getReadyTimestamp();
320 if ( $readyTs ) {
321 $pickupDelay = max( 0, $popTime - $readyTs );
322 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
323 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
324 }
325 // Record root job age for jobs being run
326 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
327 if ( $rootTimestamp ) {
328 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
329 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
330 }
331 // Track the execution time for jobs
332 $stats->timing( "jobqueue.run.$jType", $timeMs );
333 // Track RSS increases for jobs (in case of memory leaks)
334 if ( $rssStart && $rssEnd ) {
335 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
336 }
337
338 if ( $status === false ) {
339 $msg = $job->toString() . " t=$timeMs error={$error}";
340 $this->logger->error( $msg );
341 $this->debugCallback( $msg );
342 } else {
343 $msg = $job->toString() . " t=$timeMs good";
344 $this->logger->info( $msg );
345 $this->debugCallback( $msg );
346 }
347
348 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
349 }
350
351 /**
352 * @return int|null Max memory RSS in kilobytes
353 */
354 private function getMaxRssKb() {
355 $info = wfGetRusage() ?: [];
356 // see https://linux.die.net/man/2/getrusage
357 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
358 }
359
360 /**
361 * @param Job $job
362 * @return int Seconds for this runner to avoid doing more jobs of this type
363 * @see $wgJobBackoffThrottling
364 */
365 private function getBackoffTimeToWait( Job $job ) {
366 global $wgJobBackoffThrottling;
367
368 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
369 $job instanceof DuplicateJob // no work was done
370 ) {
371 return 0; // not throttled
372 }
373
374 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
375 if ( $itemsPerSecond <= 0 ) {
376 return 0; // not throttled
377 }
378
379 $seconds = 0;
380 if ( $job->workItemCount() > 0 ) {
381 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
382 // use randomized rounding
383 $seconds = floor( $exactSeconds );
384 $remainder = $exactSeconds - $seconds;
385 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
386 }
387
388 return (int)$seconds;
389 }
390
391 /**
392 * Get the previous backoff expiries from persistent storage
393 * On I/O or lock acquisition failure this returns the original $backoffs.
394 *
395 * @param array $backoffs Map of (job type => UNIX timestamp)
396 * @param string $mode Lock wait mode - "wait" or "nowait"
397 * @return array Map of (job type => backoff expiry timestamp)
398 */
399 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
400 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
401 if ( is_file( $file ) ) {
402 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
403 $handle = fopen( $file, 'rb' );
404 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
405 fclose( $handle );
406 return $backoffs; // don't wait on lock
407 }
408 $content = stream_get_contents( $handle );
409 flock( $handle, LOCK_UN );
410 fclose( $handle );
411 $ctime = microtime( true );
412 $cBackoffs = json_decode( $content, true ) ?: [];
413 foreach ( $cBackoffs as $type => $timestamp ) {
414 if ( $timestamp < $ctime ) {
415 unset( $cBackoffs[$type] );
416 }
417 }
418 } else {
419 $cBackoffs = [];
420 }
421
422 return $cBackoffs;
423 }
424
425 /**
426 * Merge the current backoff expiries from persistent storage
427 *
428 * The $deltas map is set to an empty array on success.
429 * On I/O or lock acquisition failure this returns the original $backoffs.
430 *
431 * @param array $backoffs Map of (job type => UNIX timestamp)
432 * @param array $deltas Map of (job type => seconds)
433 * @param string $mode Lock wait mode - "wait" or "nowait"
434 * @return array The new backoffs account for $backoffs and the latest file data
435 */
436 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
437 if ( !$deltas ) {
438 return $this->loadBackoffs( $backoffs, $mode );
439 }
440
441 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
442 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
443 $handle = fopen( $file, 'wb+' );
444 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
445 fclose( $handle );
446 return $backoffs; // don't wait on lock
447 }
448 $ctime = microtime( true );
449 $content = stream_get_contents( $handle );
450 $cBackoffs = json_decode( $content, true ) ?: [];
451 foreach ( $deltas as $type => $seconds ) {
452 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
453 ? $cBackoffs[$type] + $seconds
454 : $ctime + $seconds;
455 }
456 foreach ( $cBackoffs as $type => $timestamp ) {
457 if ( $timestamp < $ctime ) {
458 unset( $cBackoffs[$type] );
459 }
460 }
461 ftruncate( $handle, 0 );
462 fwrite( $handle, json_encode( $cBackoffs ) );
463 flock( $handle, LOCK_UN );
464 fclose( $handle );
465
466 $deltas = [];
467
468 return $cBackoffs;
469 }
470
471 /**
472 * Make sure that this script is not too close to the memory usage limit.
473 * It is better to die in between jobs than OOM right in the middle of one.
474 * @return bool
475 */
476 private function checkMemoryOK() {
477 static $maxBytes = null;
478 if ( $maxBytes === null ) {
479 $m = [];
480 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
481 list( , $num, $unit ) = $m;
482 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
483 $maxBytes = $num * $conv[strtolower( $unit )];
484 } else {
485 $maxBytes = 0;
486 }
487 }
488 $usedBytes = memory_get_usage();
489 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
490 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
491 $this->debugCallback( $msg );
492 $this->logger->error( $msg );
493
494 return false;
495 }
496
497 return true;
498 }
499
500 /**
501 * Log the job message
502 * @param string $msg The message to log
503 */
504 private function debugCallback( $msg ) {
505 if ( $this->debug ) {
506 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
507 }
508 }
509
510 /**
511 * Issue a commit on all masters who are currently in a transaction and have
512 * made changes to the database. It also supports sometimes waiting for the
513 * local wiki's replica DBs to catch up. See the documentation for
514 * $wgJobSerialCommitThreshold for more.
515 *
516 * @param LBFactory $lbFactory
517 * @param Job $job
518 * @param string $fnameTrxOwner
519 * @throws DBError
520 */
521 private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
522 global $wgJobSerialCommitThreshold;
523
524 $time = false;
525 $lb = $lbFactory->getMainLB( wfWikiID() );
526 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
527 // Generally, there is one master connection to the local DB
528 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
529 // We need natively blocking fast locks
530 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
531 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
532 if ( $time < $wgJobSerialCommitThreshold ) {
533 $dbwSerial = false;
534 }
535 } else {
536 $dbwSerial = false;
537 }
538 } else {
539 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
540 $dbwSerial = false;
541 }
542
543 if ( !$dbwSerial ) {
544 $lbFactory->commitMasterChanges( $fnameTrxOwner );
545 return;
546 }
547
548 $ms = intval( 1000 * $time );
549 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
550 $this->logger->info( $msg );
551 $this->debugCallback( $msg );
552
553 // Wait for an exclusive lock to commit
554 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
555 // This will trigger a rollback in the main loop
556 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
557 }
558 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
559 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
560 } );
561
562 // Wait for the replica DBs to catch up
563 $pos = $lb->getMasterPos();
564 if ( $pos ) {
565 $lb->waitForAll( $pos );
566 }
567
568 // Actually commit the DB master changes
569 $lbFactory->commitMasterChanges( $fnameTrxOwner );
570 ScopedCallback::consume( $unlocker );
571 }
572 }