Update weblinks in comments from HTTP to HTTPS
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29
30 /**
31 * Job queue runner utility methods
32 *
33 * @ingroup JobQueue
34 * @since 1.24
35 */
36 class JobRunner implements LoggerAwareInterface {
37 /** @var callable|null Debug output handler */
38 protected $debug;
39
40 /**
41 * @var LoggerInterface $logger
42 */
43 protected $logger;
44
45 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
46 const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
47 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
48
49 /**
50 * @param callable $debug Optional debug output handler
51 */
52 public function setDebugHandler( $debug ) {
53 $this->debug = $debug;
54 }
55
56 /**
57 * @param LoggerInterface $logger
58 * @return void
59 */
60 public function setLogger( LoggerInterface $logger ) {
61 $this->logger = $logger;
62 }
63
64 /**
65 * @param LoggerInterface $logger
66 */
67 public function __construct( LoggerInterface $logger = null ) {
68 if ( $logger === null ) {
69 $logger = LoggerFactory::getInstance( 'runJobs' );
70 }
71 $this->setLogger( $logger );
72 }
73
74 /**
75 * Run jobs of the specified number/type for the specified time
76 *
77 * The response map has a 'job' field that lists status of each job, including:
78 * - type : the job type
79 * - status : ok/failed
80 * - error : any error message string
81 * - time : the job run time in ms
82 * The response map also has:
83 * - backoffs : the (job type => seconds) map of backoff times
84 * - elapsed : the total time spent running tasks in ms
85 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
86 * memory-limit)
87 *
88 * This method outputs status information only if a debug handler was set.
89 * Any exceptions are caught and logged, but are not reported as output.
90 *
91 * @param array $options Map of parameters:
92 * - type : the job type (or false for the default types)
93 * - maxJobs : maximum number of jobs to run
94 * - maxTime : maximum time in seconds before stopping
95 * - throttle : whether to respect job backoff configuration
96 * @return array Summary response that can easily be JSON serialized
97 */
98 public function run( array $options ) {
99 global $wgJobClasses, $wgTrxProfilerLimits;
100
101 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
102
103 $type = isset( $options['type'] ) ? $options['type'] : false;
104 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
105 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
106 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
107
108 // Bail if job type is invalid
109 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
110 $response['reached'] = 'none-possible';
111 return $response;
112 }
113 // Bail out if DB is in read-only mode
114 if ( wfReadOnly() ) {
115 $response['reached'] = 'read-only';
116 return $response;
117 }
118
119 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
120 // Bail out if there is too much DB lag.
121 // This check should not block as we want to try other wiki queues.
122 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
123 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
124 $response['reached'] = 'replica-lag-limit';
125 return $response;
126 }
127
128 // Flush any pending DB writes for sanity
129 $lbFactory->commitAll( __METHOD__ );
130
131 // Catch huge single updates that lead to replica DB lag
132 $trxProfiler = Profiler::instance()->getTransactionProfiler();
133 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
134 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
135
136 // Some jobs types should not run until a certain timestamp
137 $backoffs = []; // map of (type => UNIX expiry)
138 $backoffDeltas = []; // map of (type => seconds)
139 $wait = 'wait'; // block to read backoffs the first time
140
141 $group = JobQueueGroup::singleton();
142 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
143 $jobsPopped = 0;
144 $timeMsTotal = 0;
145 $startTime = microtime( true ); // time since jobs started running
146 $lastCheckTime = 1; // timestamp of last replica DB check
147 do {
148 // Sync the persistent backoffs with concurrent runners
149 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
150 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
151 $wait = 'nowait'; // less important now
152
153 if ( $type === false ) {
154 $job = $group->pop(
155 JobQueueGroup::TYPE_DEFAULT,
156 JobQueueGroup::USE_CACHE,
157 $blacklist
158 );
159 } elseif ( in_array( $type, $blacklist ) ) {
160 $job = false; // requested queue in backoff state
161 } else {
162 $job = $group->pop( $type ); // job from a single queue
163 }
164 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
165
166 if ( $job ) { // found a job
167 ++$jobsPopped;
168 $popTime = time();
169 $jType = $job->getType();
170
171 WebRequest::overrideRequestId( $job->getRequestId() );
172
173 // Back off of certain jobs for a while (for throttling and for errors)
174 $ttw = $this->getBackoffTimeToWait( $job );
175 if ( $ttw > 0 ) {
176 // Always add the delta for other runners in case the time running the
177 // job negated the backoff for each individually but not collectively.
178 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
179 ? $backoffDeltas[$jType] + $ttw
180 : $ttw;
181 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
182 }
183
184 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
185 if ( $info['status'] !== false || !$job->allowRetries() ) {
186 $group->ack( $job ); // succeeded or job cannot be retried
187 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
188 }
189
190 // Back off of certain jobs for a while (for throttling and for errors)
191 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
192 $ttw = max( $ttw, self::ERROR_BACKOFF_TTL ); // too many errors
193 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
194 ? $backoffDeltas[$jType] + $ttw
195 : $ttw;
196 }
197
198 $response['jobs'][] = [
199 'type' => $jType,
200 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
201 'error' => $info['error'],
202 'time' => $info['timeMs']
203 ];
204 $timeMsTotal += $info['timeMs'];
205
206 // Break out if we hit the job count or wall time limits...
207 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
208 $response['reached'] = 'job-limit';
209 break;
210 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
211 $response['reached'] = 'time-limit';
212 break;
213 }
214
215 // Don't let any of the main DB replica DBs get backed up.
216 // This only waits for so long before exiting and letting
217 // other wikis in the farm (on different masters) get a chance.
218 $timePassed = microtime( true ) - $lastCheckTime;
219 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
220 try {
221 $lbFactory->waitForReplication( [
222 'ifWritesSince' => $lastCheckTime,
223 'timeout' => self::MAX_ALLOWED_LAG
224 ] );
225 } catch ( DBReplicationWaitError $e ) {
226 $response['reached'] = 'replica-lag-limit';
227 break;
228 }
229 $lastCheckTime = microtime( true );
230 }
231 // Don't let any queue replica DBs/backups fall behind
232 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
233 $group->waitForBackups();
234 }
235
236 // Bail if near-OOM instead of in a job
237 if ( !$this->checkMemoryOK() ) {
238 $response['reached'] = 'memory-limit';
239 break;
240 }
241 }
242 } while ( $job ); // stop when there are no jobs
243
244 // Sync the persistent backoffs for the next runJobs.php pass
245 if ( $backoffDeltas ) {
246 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
247 }
248
249 $response['backoffs'] = $backoffs;
250 $response['elapsed'] = $timeMsTotal;
251
252 return $response;
253 }
254
255 /**
256 * @param Job $job
257 * @param LBFactory $lbFactory
258 * @param StatsdDataFactory $stats
259 * @param float $popTime
260 * @return array Map of status/error/timeMs
261 */
262 private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
263 $jType = $job->getType();
264 $msg = $job->toString() . " STARTING";
265 $this->logger->debug( $msg );
266 $this->debugCallback( $msg );
267
268 // Run the job...
269 $rssStart = $this->getMaxRssKb();
270 $jobStartTime = microtime( true );
271 try {
272 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
273 $lbFactory->beginMasterChanges( $fnameTrxOwner );
274 $status = $job->run();
275 $error = $job->getLastError();
276 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
277 // Run any deferred update tasks; doUpdates() manages transactions itself
278 DeferredUpdates::doUpdates();
279 } catch ( Exception $e ) {
280 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
281 $status = false;
282 $error = get_class( $e ) . ': ' . $e->getMessage();
283 }
284 // Always attempt to call teardown() even if Job throws exception.
285 try {
286 $job->teardown( $status );
287 } catch ( Exception $e ) {
288 MWExceptionHandler::logException( $e );
289 }
290
291 // Commit all outstanding connections that are in a transaction
292 // to get a fresh repeatable read snapshot on every connection.
293 // Note that jobs are still responsible for handling replica DB lag.
294 $lbFactory->flushReplicaSnapshots( __METHOD__ );
295 // Clear out title cache data from prior snapshots
296 MediaWikiServices::getInstance()->getLinkCache()->clear();
297 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
298 $rssEnd = $this->getMaxRssKb();
299
300 // Record how long jobs wait before getting popped
301 $readyTs = $job->getReadyTimestamp();
302 if ( $readyTs ) {
303 $pickupDelay = max( 0, $popTime - $readyTs );
304 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
305 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
306 }
307 // Record root job age for jobs being run
308 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
309 if ( $rootTimestamp ) {
310 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
311 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
312 }
313 // Track the execution time for jobs
314 $stats->timing( "jobqueue.run.$jType", $timeMs );
315 // Track RSS increases for jobs (in case of memory leaks)
316 if ( $rssStart && $rssEnd ) {
317 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
318 }
319
320 if ( $status === false ) {
321 $msg = $job->toString() . " t=$timeMs error={$error}";
322 $this->logger->error( $msg );
323 $this->debugCallback( $msg );
324 } else {
325 $msg = $job->toString() . " t=$timeMs good";
326 $this->logger->info( $msg );
327 $this->debugCallback( $msg );
328 }
329
330 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
331 }
332
333 /**
334 * @return int|null Max memory RSS in kilobytes
335 */
336 private function getMaxRssKb() {
337 $info = wfGetRusage() ?: [];
338 // see https://linux.die.net/man/2/getrusage
339 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
340 }
341
342 /**
343 * @param Job $job
344 * @return int Seconds for this runner to avoid doing more jobs of this type
345 * @see $wgJobBackoffThrottling
346 */
347 private function getBackoffTimeToWait( Job $job ) {
348 global $wgJobBackoffThrottling;
349
350 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
351 $job instanceof DuplicateJob // no work was done
352 ) {
353 return 0; // not throttled
354 }
355
356 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
357 if ( $itemsPerSecond <= 0 ) {
358 return 0; // not throttled
359 }
360
361 $seconds = 0;
362 if ( $job->workItemCount() > 0 ) {
363 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
364 // use randomized rounding
365 $seconds = floor( $exactSeconds );
366 $remainder = $exactSeconds - $seconds;
367 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
368 }
369
370 return (int)$seconds;
371 }
372
373 /**
374 * Get the previous backoff expiries from persistent storage
375 * On I/O or lock acquisition failure this returns the original $backoffs.
376 *
377 * @param array $backoffs Map of (job type => UNIX timestamp)
378 * @param string $mode Lock wait mode - "wait" or "nowait"
379 * @return array Map of (job type => backoff expiry timestamp)
380 */
381 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
382 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
383 if ( is_file( $file ) ) {
384 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
385 $handle = fopen( $file, 'rb' );
386 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
387 fclose( $handle );
388 return $backoffs; // don't wait on lock
389 }
390 $content = stream_get_contents( $handle );
391 flock( $handle, LOCK_UN );
392 fclose( $handle );
393 $ctime = microtime( true );
394 $cBackoffs = json_decode( $content, true ) ?: [];
395 foreach ( $cBackoffs as $type => $timestamp ) {
396 if ( $timestamp < $ctime ) {
397 unset( $cBackoffs[$type] );
398 }
399 }
400 } else {
401 $cBackoffs = [];
402 }
403
404 return $cBackoffs;
405 }
406
407 /**
408 * Merge the current backoff expiries from persistent storage
409 *
410 * The $deltas map is set to an empty array on success.
411 * On I/O or lock acquisition failure this returns the original $backoffs.
412 *
413 * @param array $backoffs Map of (job type => UNIX timestamp)
414 * @param array $deltas Map of (job type => seconds)
415 * @param string $mode Lock wait mode - "wait" or "nowait"
416 * @return array The new backoffs account for $backoffs and the latest file data
417 */
418 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
419 if ( !$deltas ) {
420 return $this->loadBackoffs( $backoffs, $mode );
421 }
422
423 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
424 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
425 $handle = fopen( $file, 'wb+' );
426 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
427 fclose( $handle );
428 return $backoffs; // don't wait on lock
429 }
430 $ctime = microtime( true );
431 $content = stream_get_contents( $handle );
432 $cBackoffs = json_decode( $content, true ) ?: [];
433 foreach ( $deltas as $type => $seconds ) {
434 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
435 ? $cBackoffs[$type] + $seconds
436 : $ctime + $seconds;
437 }
438 foreach ( $cBackoffs as $type => $timestamp ) {
439 if ( $timestamp < $ctime ) {
440 unset( $cBackoffs[$type] );
441 }
442 }
443 ftruncate( $handle, 0 );
444 fwrite( $handle, json_encode( $cBackoffs ) );
445 flock( $handle, LOCK_UN );
446 fclose( $handle );
447
448 $deltas = [];
449
450 return $cBackoffs;
451 }
452
453 /**
454 * Make sure that this script is not too close to the memory usage limit.
455 * It is better to die in between jobs than OOM right in the middle of one.
456 * @return bool
457 */
458 private function checkMemoryOK() {
459 static $maxBytes = null;
460 if ( $maxBytes === null ) {
461 $m = [];
462 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
463 list( , $num, $unit ) = $m;
464 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
465 $maxBytes = $num * $conv[strtolower( $unit )];
466 } else {
467 $maxBytes = 0;
468 }
469 }
470 $usedBytes = memory_get_usage();
471 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
472 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
473 $this->debugCallback( $msg );
474 $this->logger->error( $msg );
475
476 return false;
477 }
478
479 return true;
480 }
481
482 /**
483 * Log the job message
484 * @param string $msg The message to log
485 */
486 private function debugCallback( $msg ) {
487 if ( $this->debug ) {
488 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
489 }
490 }
491
492 /**
493 * Issue a commit on all masters who are currently in a transaction and have
494 * made changes to the database. It also supports sometimes waiting for the
495 * local wiki's replica DBs to catch up. See the documentation for
496 * $wgJobSerialCommitThreshold for more.
497 *
498 * @param LBFactory $lbFactory
499 * @param Job $job
500 * @param string $fnameTrxOwner
501 * @throws DBError
502 */
503 private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
504 global $wgJobSerialCommitThreshold;
505
506 $time = false;
507 $lb = $lbFactory->getMainLB( wfWikiID() );
508 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
509 // Generally, there is one master connection to the local DB
510 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
511 // We need natively blocking fast locks
512 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
513 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
514 if ( $time < $wgJobSerialCommitThreshold ) {
515 $dbwSerial = false;
516 }
517 } else {
518 $dbwSerial = false;
519 }
520 } else {
521 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
522 $dbwSerial = false;
523 }
524
525 if ( !$dbwSerial ) {
526 $lbFactory->commitMasterChanges( $fnameTrxOwner );
527 return;
528 }
529
530 $ms = intval( 1000 * $time );
531 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
532 $this->logger->info( $msg );
533 $this->debugCallback( $msg );
534
535 // Wait for an exclusive lock to commit
536 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
537 // This will trigger a rollback in the main loop
538 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
539 }
540 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
541 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
542 } );
543
544 // Wait for the replica DBs to catch up
545 $pos = $lb->getMasterPos();
546 if ( $pos ) {
547 $lb->waitForAll( $pos );
548 }
549
550 // Actually commit the DB master changes
551 $lbFactory->commitMasterChanges( $fnameTrxOwner );
552 ScopedCallback::consume( $unlocker );
553 }
554 }