Merge "Make DBAccessBase use DBConnRef, rename $wiki, and hide getLoadBalancer()"
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactoryInterface;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
30 use Wikimedia\Rdbms\LBFactory;
31 use Wikimedia\Rdbms\DBError;
32
33 /**
34 * Job queue runner utility methods
35 *
36 * @ingroup JobQueue
37 * @since 1.24
38 */
39 class JobRunner implements LoggerAwareInterface {
40 /** @var Config */
41 protected $config;
42 /** @var callable|null Debug output handler */
43 protected $debug;
44
45 /**
46 * @var LoggerInterface $logger
47 */
48 protected $logger;
49
50 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
51 const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
52 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
53 const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
54
55 /**
56 * @param callable $debug Optional debug output handler
57 */
58 public function setDebugHandler( $debug ) {
59 $this->debug = $debug;
60 }
61
62 /**
63 * @param LoggerInterface $logger
64 * @return void
65 */
66 public function setLogger( LoggerInterface $logger ) {
67 $this->logger = $logger;
68 }
69
70 /**
71 * @param LoggerInterface|null $logger
72 */
73 public function __construct( LoggerInterface $logger = null ) {
74 if ( $logger === null ) {
75 $logger = LoggerFactory::getInstance( 'runJobs' );
76 }
77 $this->setLogger( $logger );
78 $this->config = MediaWikiServices::getInstance()->getMainConfig();
79 }
80
81 /**
82 * Run jobs of the specified number/type for the specified time
83 *
84 * The response map has a 'job' field that lists status of each job, including:
85 * - type : the job type
86 * - status : ok/failed
87 * - error : any error message string
88 * - time : the job run time in ms
89 * The response map also has:
90 * - backoffs : the (job type => seconds) map of backoff times
91 * - elapsed : the total time spent running tasks in ms
92 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
93 * memory-limit)
94 *
95 * This method outputs status information only if a debug handler was set.
96 * Any exceptions are caught and logged, but are not reported as output.
97 *
98 * @param array $options Map of parameters:
99 * - type : the job type (or false for the default types)
100 * - maxJobs : maximum number of jobs to run
101 * - maxTime : maximum time in seconds before stopping
102 * - throttle : whether to respect job backoff configuration
103 * @return array Summary response that can easily be JSON serialized
104 */
105 public function run( array $options ) {
106 $jobClasses = $this->config->get( 'JobClasses' );
107 $profilerLimits = $this->config->get( 'TrxProfilerLimits' );
108
109 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
110
111 $type = $options['type'] ?? false;
112 $maxJobs = $options['maxJobs'] ?? false;
113 $maxTime = $options['maxTime'] ?? false;
114 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
115
116 // Bail if job type is invalid
117 if ( $type !== false && !isset( $jobClasses[$type] ) ) {
118 $response['reached'] = 'none-possible';
119 return $response;
120 }
121
122 // Bail out if DB is in read-only mode
123 if ( wfReadOnly() ) {
124 $response['reached'] = 'read-only';
125 return $response;
126 }
127
128 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
129 if ( $lbFactory->hasTransactionRound() ) {
130 throw new LogicException( __METHOD__ . ' called with an active transaction round.' );
131 }
132 // Bail out if there is too much DB lag.
133 // This check should not block as we want to try other wiki queues.
134 list( , $maxLag ) = $lbFactory->getMainLB()->getMaxLag();
135 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
136 $response['reached'] = 'replica-lag-limit';
137 return $response;
138 }
139
140 // Catch huge single updates that lead to replica DB lag
141 $trxProfiler = Profiler::instance()->getTransactionProfiler();
142 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
143 $trxProfiler->setExpectations( $profilerLimits['JobRunner'], __METHOD__ );
144
145 // Some jobs types should not run until a certain timestamp
146 $backoffs = []; // map of (type => UNIX expiry)
147 $backoffDeltas = []; // map of (type => seconds)
148 $wait = 'wait'; // block to read backoffs the first time
149
150 $group = JobQueueGroup::singleton();
151 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
152 $jobsPopped = 0;
153 $timeMsTotal = 0;
154 $startTime = microtime( true ); // time since jobs started running
155 $lastCheckTime = 1; // timestamp of last replica DB check
156 do {
157 // Sync the persistent backoffs with concurrent runners
158 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
159 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
160 $wait = 'nowait'; // less important now
161
162 if ( $type === false ) {
163 $job = $group->pop(
164 JobQueueGroup::TYPE_DEFAULT,
165 JobQueueGroup::USE_CACHE,
166 $blacklist
167 );
168 } elseif ( in_array( $type, $blacklist ) ) {
169 $job = false; // requested queue in backoff state
170 } else {
171 $job = $group->pop( $type ); // job from a single queue
172 }
173
174 if ( $job ) { // found a job
175 ++$jobsPopped;
176 $popTime = time();
177 $jType = $job->getType();
178
179 WebRequest::overrideRequestId( $job->getRequestId() );
180
181 // Back off of certain jobs for a while (for throttling and for errors)
182 $ttw = $this->getBackoffTimeToWait( $job );
183 if ( $ttw > 0 ) {
184 // Always add the delta for other runners in case the time running the
185 // job negated the backoff for each individually but not collectively.
186 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
187 ? $backoffDeltas[$jType] + $ttw
188 : $ttw;
189 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
190 }
191
192 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
193 if ( $info['status'] !== false || !$job->allowRetries() ) {
194 $group->ack( $job ); // succeeded or job cannot be retried
195 }
196
197 // Back off of certain jobs for a while (for throttling and for errors)
198 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
199 $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
200 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
201 ? $backoffDeltas[$jType] + $ttw
202 : $ttw;
203 }
204
205 $response['jobs'][] = [
206 'type' => $jType,
207 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
208 'error' => $info['error'],
209 'time' => $info['timeMs']
210 ];
211 $timeMsTotal += $info['timeMs'];
212
213 // Break out if we hit the job count or wall time limits...
214 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
215 $response['reached'] = 'job-limit';
216 break;
217 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
218 $response['reached'] = 'time-limit';
219 break;
220 }
221
222 // Don't let any of the main DB replica DBs get backed up.
223 // This only waits for so long before exiting and letting
224 // other wikis in the farm (on different masters) get a chance.
225 $timePassed = microtime( true ) - $lastCheckTime;
226 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
227 $success = $lbFactory->waitForReplication( [
228 'ifWritesSince' => $lastCheckTime,
229 'timeout' => self::MAX_ALLOWED_LAG,
230 ] );
231 if ( !$success ) {
232 $response['reached'] = 'replica-lag-limit';
233 break;
234 }
235 $lastCheckTime = microtime( true );
236 }
237
238 // Bail if near-OOM instead of in a job
239 if ( !$this->checkMemoryOK() ) {
240 $response['reached'] = 'memory-limit';
241 break;
242 }
243 }
244 } while ( $job ); // stop when there are no jobs
245
246 // Sync the persistent backoffs for the next runJobs.php pass
247 if ( $backoffDeltas ) {
248 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
249 }
250
251 $response['backoffs'] = $backoffs;
252 $response['elapsed'] = $timeMsTotal;
253
254 return $response;
255 }
256
257 /**
258 * @param string $error
259 * @return int TTL in seconds
260 */
261 private function getErrorBackoffTTL( $error ) {
262 return strpos( $error, 'DBReadOnlyError' ) !== false
263 ? self::READONLY_BACKOFF_TTL
264 : self::ERROR_BACKOFF_TTL;
265 }
266
267 /**
268 * @param RunnableJob $job
269 * @param LBFactory $lbFactory
270 * @param StatsdDataFactoryInterface $stats
271 * @param float $popTime
272 * @return array Map of status/error/timeMs
273 */
274 private function executeJob( RunnableJob $job, LBFactory $lbFactory, $stats, $popTime ) {
275 $jType = $job->getType();
276 $msg = $job->toString() . " STARTING";
277 $this->logger->debug( $msg, [
278 'job_type' => $job->getType(),
279 ] );
280 $this->debugCallback( $msg );
281
282 // Clear out title cache data from prior snapshots
283 // (e.g. from before JobRunner was invoked in this process)
284 MediaWikiServices::getInstance()->getLinkCache()->clear();
285
286 // Run the job...
287 $rssStart = $this->getMaxRssKb();
288 $jobStartTime = microtime( true );
289 try {
290 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
291 // Flush any pending changes left over from an implicit transaction round
292 if ( $job->hasExecutionFlag( $job::JOB_NO_EXPLICIT_TRX_ROUND ) ) {
293 $lbFactory->commitMasterChanges( $fnameTrxOwner ); // new implicit round
294 } else {
295 $lbFactory->beginMasterChanges( $fnameTrxOwner ); // new explicit round
296 }
297 // Clear any stale REPEATABLE-READ snapshots from replica DB connections
298 $lbFactory->flushReplicaSnapshots( $fnameTrxOwner );
299 $status = $job->run();
300 $error = $job->getLastError();
301 // Commit all pending changes from this job
302 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
303 // Run any deferred update tasks; doUpdates() manages transactions itself
304 DeferredUpdates::doUpdates();
305 } catch ( Exception $e ) {
306 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
307 $status = false;
308 $error = get_class( $e ) . ': ' . $e->getMessage();
309 }
310 // Always attempt to call teardown() even if Job throws exception.
311 try {
312 $job->tearDown( $status );
313 } catch ( Exception $e ) {
314 MWExceptionHandler::logException( $e );
315 }
316
317 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
318 $rssEnd = $this->getMaxRssKb();
319
320 // Record how long jobs wait before getting popped
321 $readyTs = $job->getReadyTimestamp();
322 if ( $readyTs ) {
323 $pickupDelay = max( 0, $popTime - $readyTs );
324 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
325 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
326 }
327 // Record root job age for jobs being run
328 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
329 if ( $rootTimestamp ) {
330 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
331 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
332 }
333 // Track the execution time for jobs
334 $stats->timing( "jobqueue.run.$jType", $timeMs );
335 // Track RSS increases for jobs (in case of memory leaks)
336 if ( $rssStart && $rssEnd ) {
337 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
338 }
339
340 if ( $status === false ) {
341 $msg = $job->toString() . " t={job_duration} error={job_error}";
342 $this->logger->error( $msg, [
343 'job_type' => $job->getType(),
344 'job_duration' => $timeMs,
345 'job_error' => $error,
346 ] );
347
348 $msg = $job->toString() . " t=$timeMs error={$error}";
349 $this->debugCallback( $msg );
350 } else {
351 $msg = $job->toString() . " t={job_duration} good";
352 $this->logger->info( $msg, [
353 'job_type' => $job->getType(),
354 'job_duration' => $timeMs,
355 ] );
356
357 $msg = $job->toString() . " t=$timeMs good";
358 $this->debugCallback( $msg );
359 }
360
361 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
362 }
363
364 /**
365 * @return int|null Max memory RSS in kilobytes
366 */
367 private function getMaxRssKb() {
368 $info = wfGetRusage() ?: [];
369 // see https://linux.die.net/man/2/getrusage
370 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
371 }
372
373 /**
374 * @param RunnableJob $job
375 * @return int Seconds for this runner to avoid doing more jobs of this type
376 * @see $wgJobBackoffThrottling
377 */
378 private function getBackoffTimeToWait( RunnableJob $job ) {
379 $throttling = $this->config->get( 'JobBackoffThrottling' );
380
381 if ( !isset( $throttling[$job->getType()] ) || $job instanceof DuplicateJob ) {
382 return 0; // not throttled
383 }
384
385 $itemsPerSecond = $throttling[$job->getType()];
386 if ( $itemsPerSecond <= 0 ) {
387 return 0; // not throttled
388 }
389
390 $seconds = 0;
391 if ( $job->workItemCount() > 0 ) {
392 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
393 // use randomized rounding
394 $seconds = floor( $exactSeconds );
395 $remainder = $exactSeconds - $seconds;
396 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
397 }
398
399 return (int)$seconds;
400 }
401
402 /**
403 * Get the previous backoff expiries from persistent storage
404 * On I/O or lock acquisition failure this returns the original $backoffs.
405 *
406 * @param array $backoffs Map of (job type => UNIX timestamp)
407 * @param string $mode Lock wait mode - "wait" or "nowait"
408 * @return array Map of (job type => backoff expiry timestamp)
409 */
410 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
411 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
412 if ( is_file( $file ) ) {
413 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
414 $handle = fopen( $file, 'rb' );
415 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
416 fclose( $handle );
417 return $backoffs; // don't wait on lock
418 }
419 $content = stream_get_contents( $handle );
420 flock( $handle, LOCK_UN );
421 fclose( $handle );
422 $ctime = microtime( true );
423 $cBackoffs = json_decode( $content, true ) ?: [];
424 foreach ( $cBackoffs as $type => $timestamp ) {
425 if ( $timestamp < $ctime ) {
426 unset( $cBackoffs[$type] );
427 }
428 }
429 } else {
430 $cBackoffs = [];
431 }
432
433 return $cBackoffs;
434 }
435
436 /**
437 * Merge the current backoff expiries from persistent storage
438 *
439 * The $deltas map is set to an empty array on success.
440 * On I/O or lock acquisition failure this returns the original $backoffs.
441 *
442 * @param array $backoffs Map of (job type => UNIX timestamp)
443 * @param array $deltas Map of (job type => seconds)
444 * @param string $mode Lock wait mode - "wait" or "nowait"
445 * @return array The new backoffs account for $backoffs and the latest file data
446 */
447 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
448 if ( !$deltas ) {
449 return $this->loadBackoffs( $backoffs, $mode );
450 }
451
452 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
453 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
454 $handle = fopen( $file, 'wb+' );
455 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
456 fclose( $handle );
457 return $backoffs; // don't wait on lock
458 }
459 $ctime = microtime( true );
460 $content = stream_get_contents( $handle );
461 $cBackoffs = json_decode( $content, true ) ?: [];
462 foreach ( $deltas as $type => $seconds ) {
463 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
464 ? $cBackoffs[$type] + $seconds
465 : $ctime + $seconds;
466 }
467 foreach ( $cBackoffs as $type => $timestamp ) {
468 if ( $timestamp < $ctime ) {
469 unset( $cBackoffs[$type] );
470 }
471 }
472 ftruncate( $handle, 0 );
473 fwrite( $handle, json_encode( $cBackoffs ) );
474 flock( $handle, LOCK_UN );
475 fclose( $handle );
476
477 $deltas = [];
478
479 return $cBackoffs;
480 }
481
482 /**
483 * Make sure that this script is not too close to the memory usage limit.
484 * It is better to die in between jobs than OOM right in the middle of one.
485 * @return bool
486 */
487 private function checkMemoryOK() {
488 static $maxBytes = null;
489 if ( $maxBytes === null ) {
490 $m = [];
491 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
492 list( , $num, $unit ) = $m;
493 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
494 $maxBytes = $num * $conv[strtolower( $unit )];
495 } else {
496 $maxBytes = 0;
497 }
498 }
499 $usedBytes = memory_get_usage();
500 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
501 $msg = "Detected excessive memory usage ({used_bytes}/{max_bytes}).";
502 $this->logger->error( $msg, [
503 'used_bytes' => $usedBytes,
504 'max_bytes' => $maxBytes,
505 ] );
506
507 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
508 $this->debugCallback( $msg );
509
510 return false;
511 }
512
513 return true;
514 }
515
516 /**
517 * Log the job message
518 * @param string $msg The message to log
519 */
520 private function debugCallback( $msg ) {
521 if ( $this->debug ) {
522 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
523 }
524 }
525
526 /**
527 * Issue a commit on all masters who are currently in a transaction and have
528 * made changes to the database. It also supports sometimes waiting for the
529 * local wiki's replica DBs to catch up. See the documentation for
530 * $wgJobSerialCommitThreshold for more.
531 *
532 * @param LBFactory $lbFactory
533 * @param RunnableJob $job
534 * @param string $fnameTrxOwner
535 * @throws DBError
536 */
537 private function commitMasterChanges( LBFactory $lbFactory, RunnableJob $job, $fnameTrxOwner ) {
538 $syncThreshold = $this->config->get( 'JobSerialCommitThreshold' );
539
540 $time = false;
541 $lb = $lbFactory->getMainLB();
542 if ( $syncThreshold !== false && $lb->hasStreamingReplicaServers() ) {
543 // Generally, there is one master connection to the local DB
544 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
545 // We need natively blocking fast locks
546 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
547 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
548 if ( $time < $syncThreshold ) {
549 $dbwSerial = false;
550 }
551 } else {
552 $dbwSerial = false;
553 }
554 } else {
555 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
556 $dbwSerial = false;
557 }
558
559 if ( !$dbwSerial ) {
560 $lbFactory->commitMasterChanges(
561 $fnameTrxOwner,
562 // Abort if any transaction was too big
563 [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
564 );
565
566 return;
567 }
568
569 $ms = intval( 1000 * $time );
570
571 $msg = $job->toString() . " COMMIT ENQUEUED [{job_commit_write_ms}ms of writes]";
572 $this->logger->info( $msg, [
573 'job_type' => $job->getType(),
574 'job_commit_write_ms' => $ms,
575 ] );
576
577 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
578 $this->debugCallback( $msg );
579
580 // Wait for an exclusive lock to commit
581 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', $fnameTrxOwner, 30 ) ) {
582 // This will trigger a rollback in the main loop
583 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
584 }
585 $unlocker = new ScopedCallback( function () use ( $dbwSerial, $fnameTrxOwner ) {
586 $dbwSerial->unlock( 'jobrunner-serial-commit', $fnameTrxOwner );
587 } );
588
589 // Wait for the replica DBs to catch up
590 $pos = $lb->getMasterPos();
591 if ( $pos ) {
592 $lb->waitForAll( $pos );
593 }
594
595 // Actually commit the DB master changes
596 $lbFactory->commitMasterChanges(
597 $fnameTrxOwner,
598 // Abort if any transaction was too big
599 [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
600 );
601 ScopedCallback::consume( $unlocker );
602 }
603 }