Merge "Use {{int:}} on MediaWiki:Blockedtext and MediaWiki:Autoblockedtext"
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
30 use Wikimedia\Rdbms\LBFactory;
31 use Wikimedia\Rdbms\DBError;
32 use Wikimedia\Rdbms\DBReplicationWaitError;
33
34 /**
35 * Job queue runner utility methods
36 *
37 * @ingroup JobQueue
38 * @since 1.24
39 */
40 class JobRunner implements LoggerAwareInterface {
41 /** @var Config */
42 protected $config;
43 /** @var callable|null Debug output handler */
44 protected $debug;
45
46 /**
47 * @var LoggerInterface $logger
48 */
49 protected $logger;
50
51 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
52 const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
53 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
54 const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
55
56 /**
57 * @param callable $debug Optional debug output handler
58 */
59 public function setDebugHandler( $debug ) {
60 $this->debug = $debug;
61 }
62
63 /**
64 * @param LoggerInterface $logger
65 * @return void
66 */
67 public function setLogger( LoggerInterface $logger ) {
68 $this->logger = $logger;
69 }
70
71 /**
72 * @param LoggerInterface $logger
73 */
74 public function __construct( LoggerInterface $logger = null ) {
75 if ( $logger === null ) {
76 $logger = LoggerFactory::getInstance( 'runJobs' );
77 }
78 $this->setLogger( $logger );
79 $this->config = MediaWikiServices::getInstance()->getMainConfig();
80 }
81
82 /**
83 * Run jobs of the specified number/type for the specified time
84 *
85 * The response map has a 'job' field that lists status of each job, including:
86 * - type : the job type
87 * - status : ok/failed
88 * - error : any error message string
89 * - time : the job run time in ms
90 * The response map also has:
91 * - backoffs : the (job type => seconds) map of backoff times
92 * - elapsed : the total time spent running tasks in ms
93 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
94 * memory-limit)
95 *
96 * This method outputs status information only if a debug handler was set.
97 * Any exceptions are caught and logged, but are not reported as output.
98 *
99 * @param array $options Map of parameters:
100 * - type : the job type (or false for the default types)
101 * - maxJobs : maximum number of jobs to run
102 * - maxTime : maximum time in seconds before stopping
103 * - throttle : whether to respect job backoff configuration
104 * @return array Summary response that can easily be JSON serialized
105 */
106 public function run( array $options ) {
107 $jobClasses = $this->config->get( 'JobClasses' );
108 $profilerLimits = $this->config->get( 'TrxProfilerLimits' );
109
110 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
111
112 $type = isset( $options['type'] ) ? $options['type'] : false;
113 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
114 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
115 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
116
117 // Bail if job type is invalid
118 if ( $type !== false && !isset( $jobClasses[$type] ) ) {
119 $response['reached'] = 'none-possible';
120 return $response;
121 }
122
123 // Bail out if DB is in read-only mode
124 if ( wfReadOnly() ) {
125 $response['reached'] = 'read-only';
126 return $response;
127 }
128
129 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
130 if ( $lbFactory->hasTransactionRound() ) {
131 throw new LogicException( __METHOD__ . ' called with an active transaction round.' );
132 }
133 // Bail out if there is too much DB lag.
134 // This check should not block as we want to try other wiki queues.
135 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
136 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
137 $response['reached'] = 'replica-lag-limit';
138 return $response;
139 }
140
141 // Catch huge single updates that lead to replica DB lag
142 $trxProfiler = Profiler::instance()->getTransactionProfiler();
143 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
144 $trxProfiler->setExpectations( $profilerLimits['JobRunner'], __METHOD__ );
145
146 // Some jobs types should not run until a certain timestamp
147 $backoffs = []; // map of (type => UNIX expiry)
148 $backoffDeltas = []; // map of (type => seconds)
149 $wait = 'wait'; // block to read backoffs the first time
150
151 $group = JobQueueGroup::singleton();
152 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
153 $jobsPopped = 0;
154 $timeMsTotal = 0;
155 $startTime = microtime( true ); // time since jobs started running
156 $lastCheckTime = 1; // timestamp of last replica DB check
157 do {
158 // Sync the persistent backoffs with concurrent runners
159 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
160 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
161 $wait = 'nowait'; // less important now
162
163 if ( $type === false ) {
164 $job = $group->pop(
165 JobQueueGroup::TYPE_DEFAULT,
166 JobQueueGroup::USE_CACHE,
167 $blacklist
168 );
169 } elseif ( in_array( $type, $blacklist ) ) {
170 $job = false; // requested queue in backoff state
171 } else {
172 $job = $group->pop( $type ); // job from a single queue
173 }
174
175 if ( $job ) { // found a job
176 ++$jobsPopped;
177 $popTime = time();
178 $jType = $job->getType();
179
180 WebRequest::overrideRequestId( $job->getRequestId() );
181
182 // Back off of certain jobs for a while (for throttling and for errors)
183 $ttw = $this->getBackoffTimeToWait( $job );
184 if ( $ttw > 0 ) {
185 // Always add the delta for other runners in case the time running the
186 // job negated the backoff for each individually but not collectively.
187 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
188 ? $backoffDeltas[$jType] + $ttw
189 : $ttw;
190 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
191 }
192
193 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
194 if ( $info['status'] !== false || !$job->allowRetries() ) {
195 $group->ack( $job ); // succeeded or job cannot be retried
196 }
197
198 // Back off of certain jobs for a while (for throttling and for errors)
199 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
200 $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
201 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
202 ? $backoffDeltas[$jType] + $ttw
203 : $ttw;
204 }
205
206 $response['jobs'][] = [
207 'type' => $jType,
208 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
209 'error' => $info['error'],
210 'time' => $info['timeMs']
211 ];
212 $timeMsTotal += $info['timeMs'];
213
214 // Break out if we hit the job count or wall time limits...
215 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
216 $response['reached'] = 'job-limit';
217 break;
218 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
219 $response['reached'] = 'time-limit';
220 break;
221 }
222
223 // Don't let any of the main DB replica DBs get backed up.
224 // This only waits for so long before exiting and letting
225 // other wikis in the farm (on different masters) get a chance.
226 $timePassed = microtime( true ) - $lastCheckTime;
227 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
228 try {
229 $lbFactory->waitForReplication( [
230 'ifWritesSince' => $lastCheckTime,
231 'timeout' => self::MAX_ALLOWED_LAG
232 ] );
233 } catch ( DBReplicationWaitError $e ) {
234 $response['reached'] = 'replica-lag-limit';
235 break;
236 }
237 $lastCheckTime = microtime( true );
238 }
239 // Don't let any queue replica DBs/backups fall behind
240 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
241 $group->waitForBackups();
242 }
243
244 // Bail if near-OOM instead of in a job
245 if ( !$this->checkMemoryOK() ) {
246 $response['reached'] = 'memory-limit';
247 break;
248 }
249 }
250 } while ( $job ); // stop when there are no jobs
251
252 // Sync the persistent backoffs for the next runJobs.php pass
253 if ( $backoffDeltas ) {
254 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
255 }
256
257 $response['backoffs'] = $backoffs;
258 $response['elapsed'] = $timeMsTotal;
259
260 return $response;
261 }
262
263 /**
264 * @param string $error
265 * @return int TTL in seconds
266 */
267 private function getErrorBackoffTTL( $error ) {
268 return strpos( $error, 'DBReadOnlyError' ) !== false
269 ? self::READONLY_BACKOFF_TTL
270 : self::ERROR_BACKOFF_TTL;
271 }
272
273 /**
274 * @param Job $job
275 * @param LBFactory $lbFactory
276 * @param StatsdDataFactory $stats
277 * @param float $popTime
278 * @return array Map of status/error/timeMs
279 */
280 private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
281 $jType = $job->getType();
282 $msg = $job->toString() . " STARTING";
283 $this->logger->debug( $msg, [
284 'job_type' => $job->getType(),
285 ] );
286 $this->debugCallback( $msg );
287
288 // Run the job...
289 $rssStart = $this->getMaxRssKb();
290 $jobStartTime = microtime( true );
291 try {
292 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
293 if ( !$job->hasExecutionFlag( $job::JOB_NO_EXPLICIT_TRX_ROUND ) ) {
294 $lbFactory->beginMasterChanges( $fnameTrxOwner );
295 }
296 $status = $job->run();
297 $error = $job->getLastError();
298 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
299 // Important: this must be the last deferred update added (T100085, T154425)
300 DeferredUpdates::addCallableUpdate( [ JobQueueGroup::class, 'pushLazyJobs' ] );
301 // Run any deferred update tasks; doUpdates() manages transactions itself
302 DeferredUpdates::doUpdates();
303 } catch ( Exception $e ) {
304 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
305 $status = false;
306 $error = get_class( $e ) . ': ' . $e->getMessage();
307 }
308 // Always attempt to call teardown() even if Job throws exception.
309 try {
310 $job->teardown( $status );
311 } catch ( Exception $e ) {
312 MWExceptionHandler::logException( $e );
313 }
314
315 // Commit all outstanding connections that are in a transaction
316 // to get a fresh repeatable read snapshot on every connection.
317 // Note that jobs are still responsible for handling replica DB lag.
318 $lbFactory->flushReplicaSnapshots( __METHOD__ );
319 // Clear out title cache data from prior snapshots
320 MediaWikiServices::getInstance()->getLinkCache()->clear();
321 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
322 $rssEnd = $this->getMaxRssKb();
323
324 // Record how long jobs wait before getting popped
325 $readyTs = $job->getReadyTimestamp();
326 if ( $readyTs ) {
327 $pickupDelay = max( 0, $popTime - $readyTs );
328 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
329 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
330 }
331 // Record root job age for jobs being run
332 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
333 if ( $rootTimestamp ) {
334 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
335 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
336 }
337 // Track the execution time for jobs
338 $stats->timing( "jobqueue.run.$jType", $timeMs );
339 // Track RSS increases for jobs (in case of memory leaks)
340 if ( $rssStart && $rssEnd ) {
341 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
342 }
343
344 if ( $status === false ) {
345 $msg = $job->toString() . " t={job_duration} error={job_error}";
346 $this->logger->error( $msg, [
347 'job_type' => $job->getType(),
348 'job_duration' => $timeMs,
349 'job_error' => $error,
350 ] );
351
352 $msg = $job->toString() . " t=$timeMs error={$error}";
353 $this->debugCallback( $msg );
354 } else {
355 $msg = $job->toString() . " t={job_duration} good";
356 $this->logger->info( $msg, [
357 'job_type' => $job->getType(),
358 'job_duration' => $timeMs,
359 ] );
360
361 $msg = $job->toString() . " t=$timeMs good";
362 $this->debugCallback( $msg );
363 }
364
365 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
366 }
367
368 /**
369 * @return int|null Max memory RSS in kilobytes
370 */
371 private function getMaxRssKb() {
372 $info = wfGetRusage() ?: [];
373 // see https://linux.die.net/man/2/getrusage
374 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
375 }
376
377 /**
378 * @param Job $job
379 * @return int Seconds for this runner to avoid doing more jobs of this type
380 * @see $wgJobBackoffThrottling
381 */
382 private function getBackoffTimeToWait( Job $job ) {
383 $throttling = $this->config->get( 'JobBackoffThrottling' );
384
385 if ( !isset( $throttling[$job->getType()] ) || $job instanceof DuplicateJob ) {
386 return 0; // not throttled
387 }
388
389 $itemsPerSecond = $throttling[$job->getType()];
390 if ( $itemsPerSecond <= 0 ) {
391 return 0; // not throttled
392 }
393
394 $seconds = 0;
395 if ( $job->workItemCount() > 0 ) {
396 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
397 // use randomized rounding
398 $seconds = floor( $exactSeconds );
399 $remainder = $exactSeconds - $seconds;
400 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
401 }
402
403 return (int)$seconds;
404 }
405
406 /**
407 * Get the previous backoff expiries from persistent storage
408 * On I/O or lock acquisition failure this returns the original $backoffs.
409 *
410 * @param array $backoffs Map of (job type => UNIX timestamp)
411 * @param string $mode Lock wait mode - "wait" or "nowait"
412 * @return array Map of (job type => backoff expiry timestamp)
413 */
414 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
415 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
416 if ( is_file( $file ) ) {
417 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
418 $handle = fopen( $file, 'rb' );
419 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
420 fclose( $handle );
421 return $backoffs; // don't wait on lock
422 }
423 $content = stream_get_contents( $handle );
424 flock( $handle, LOCK_UN );
425 fclose( $handle );
426 $ctime = microtime( true );
427 $cBackoffs = json_decode( $content, true ) ?: [];
428 foreach ( $cBackoffs as $type => $timestamp ) {
429 if ( $timestamp < $ctime ) {
430 unset( $cBackoffs[$type] );
431 }
432 }
433 } else {
434 $cBackoffs = [];
435 }
436
437 return $cBackoffs;
438 }
439
440 /**
441 * Merge the current backoff expiries from persistent storage
442 *
443 * The $deltas map is set to an empty array on success.
444 * On I/O or lock acquisition failure this returns the original $backoffs.
445 *
446 * @param array $backoffs Map of (job type => UNIX timestamp)
447 * @param array $deltas Map of (job type => seconds)
448 * @param string $mode Lock wait mode - "wait" or "nowait"
449 * @return array The new backoffs account for $backoffs and the latest file data
450 */
451 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
452 if ( !$deltas ) {
453 return $this->loadBackoffs( $backoffs, $mode );
454 }
455
456 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
457 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
458 $handle = fopen( $file, 'wb+' );
459 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
460 fclose( $handle );
461 return $backoffs; // don't wait on lock
462 }
463 $ctime = microtime( true );
464 $content = stream_get_contents( $handle );
465 $cBackoffs = json_decode( $content, true ) ?: [];
466 foreach ( $deltas as $type => $seconds ) {
467 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
468 ? $cBackoffs[$type] + $seconds
469 : $ctime + $seconds;
470 }
471 foreach ( $cBackoffs as $type => $timestamp ) {
472 if ( $timestamp < $ctime ) {
473 unset( $cBackoffs[$type] );
474 }
475 }
476 ftruncate( $handle, 0 );
477 fwrite( $handle, json_encode( $cBackoffs ) );
478 flock( $handle, LOCK_UN );
479 fclose( $handle );
480
481 $deltas = [];
482
483 return $cBackoffs;
484 }
485
486 /**
487 * Make sure that this script is not too close to the memory usage limit.
488 * It is better to die in between jobs than OOM right in the middle of one.
489 * @return bool
490 */
491 private function checkMemoryOK() {
492 static $maxBytes = null;
493 if ( $maxBytes === null ) {
494 $m = [];
495 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
496 list( , $num, $unit ) = $m;
497 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
498 $maxBytes = $num * $conv[strtolower( $unit )];
499 } else {
500 $maxBytes = 0;
501 }
502 }
503 $usedBytes = memory_get_usage();
504 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
505 $msg = "Detected excessive memory usage ({used_bytes}/{max_bytes}).";
506 $this->logger->error( $msg, [
507 'used_bytes' => $usedBytes,
508 'max_bytes' => $maxBytes,
509 ] );
510
511 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
512 $this->debugCallback( $msg );
513
514 return false;
515 }
516
517 return true;
518 }
519
520 /**
521 * Log the job message
522 * @param string $msg The message to log
523 */
524 private function debugCallback( $msg ) {
525 if ( $this->debug ) {
526 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
527 }
528 }
529
530 /**
531 * Issue a commit on all masters who are currently in a transaction and have
532 * made changes to the database. It also supports sometimes waiting for the
533 * local wiki's replica DBs to catch up. See the documentation for
534 * $wgJobSerialCommitThreshold for more.
535 *
536 * @param LBFactory $lbFactory
537 * @param Job $job
538 * @param string $fnameTrxOwner
539 * @throws DBError
540 */
541 private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
542 $syncThreshold = $this->config->get( 'JobSerialCommitThreshold' );
543
544 $time = false;
545 $lb = $lbFactory->getMainLB( wfWikiID() );
546 if ( $syncThreshold !== false && $lb->getServerCount() > 1 ) {
547 // Generally, there is one master connection to the local DB
548 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
549 // We need natively blocking fast locks
550 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
551 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
552 if ( $time < $syncThreshold ) {
553 $dbwSerial = false;
554 }
555 } else {
556 $dbwSerial = false;
557 }
558 } else {
559 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
560 $dbwSerial = false;
561 }
562
563 if ( !$dbwSerial ) {
564 $lbFactory->commitMasterChanges(
565 $fnameTrxOwner,
566 // Abort if any transaction was too big
567 [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
568 );
569
570 return;
571 }
572
573 $ms = intval( 1000 * $time );
574
575 $msg = $job->toString() . " COMMIT ENQUEUED [{job_commit_write_ms}ms of writes]";
576 $this->logger->info( $msg, [
577 'job_type' => $job->getType(),
578 'job_commit_write_ms' => $ms,
579 ] );
580
581 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
582 $this->debugCallback( $msg );
583
584 // Wait for an exclusive lock to commit
585 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
586 // This will trigger a rollback in the main loop
587 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
588 }
589 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
590 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
591 } );
592
593 // Wait for the replica DBs to catch up
594 $pos = $lb->getMasterPos();
595 if ( $pos ) {
596 $lb->waitForAll( $pos );
597 }
598
599 // Actually commit the DB master changes
600 $lbFactory->commitMasterChanges(
601 $fnameTrxOwner,
602 // Abort if any transaction was too big
603 [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
604 );
605 ScopedCallback::consume( $unlocker );
606 }
607 }