Merge "Drop index oi_name_archive_name on table oldimage"
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
30 use Wikimedia\Rdbms\LBFactory;
31
32 /**
33 * Job queue runner utility methods
34 *
35 * @ingroup JobQueue
36 * @since 1.24
37 */
38 class JobRunner implements LoggerAwareInterface {
39 /** @var callable|null Debug output handler */
40 protected $debug;
41
42 /**
43 * @var LoggerInterface $logger
44 */
45 protected $logger;
46
47 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
48 const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
49 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
50 const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
51
52 /**
53 * @param callable $debug Optional debug output handler
54 */
55 public function setDebugHandler( $debug ) {
56 $this->debug = $debug;
57 }
58
59 /**
60 * @param LoggerInterface $logger
61 * @return void
62 */
63 public function setLogger( LoggerInterface $logger ) {
64 $this->logger = $logger;
65 }
66
67 /**
68 * @param LoggerInterface $logger
69 */
70 public function __construct( LoggerInterface $logger = null ) {
71 if ( $logger === null ) {
72 $logger = LoggerFactory::getInstance( 'runJobs' );
73 }
74 $this->setLogger( $logger );
75 }
76
77 /**
78 * Run jobs of the specified number/type for the specified time
79 *
80 * The response map has a 'job' field that lists status of each job, including:
81 * - type : the job type
82 * - status : ok/failed
83 * - error : any error message string
84 * - time : the job run time in ms
85 * The response map also has:
86 * - backoffs : the (job type => seconds) map of backoff times
87 * - elapsed : the total time spent running tasks in ms
88 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
89 * memory-limit)
90 *
91 * This method outputs status information only if a debug handler was set.
92 * Any exceptions are caught and logged, but are not reported as output.
93 *
94 * @param array $options Map of parameters:
95 * - type : the job type (or false for the default types)
96 * - maxJobs : maximum number of jobs to run
97 * - maxTime : maximum time in seconds before stopping
98 * - throttle : whether to respect job backoff configuration
99 * @return array Summary response that can easily be JSON serialized
100 */
101 public function run( array $options ) {
102 global $wgJobClasses, $wgTrxProfilerLimits;
103
104 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
105
106 $type = isset( $options['type'] ) ? $options['type'] : false;
107 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
108 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
109 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
110
111 // Bail if job type is invalid
112 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
113 $response['reached'] = 'none-possible';
114 return $response;
115 }
116 // Bail out if DB is in read-only mode
117 if ( wfReadOnly() ) {
118 $response['reached'] = 'read-only';
119 return $response;
120 }
121
122 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
123 // Bail out if there is too much DB lag.
124 // This check should not block as we want to try other wiki queues.
125 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
126 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
127 $response['reached'] = 'replica-lag-limit';
128 return $response;
129 }
130
131 // Flush any pending DB writes for sanity
132 $lbFactory->commitAll( __METHOD__ );
133
134 // Catch huge single updates that lead to replica DB lag
135 $trxProfiler = Profiler::instance()->getTransactionProfiler();
136 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
137 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
138
139 // Some jobs types should not run until a certain timestamp
140 $backoffs = []; // map of (type => UNIX expiry)
141 $backoffDeltas = []; // map of (type => seconds)
142 $wait = 'wait'; // block to read backoffs the first time
143
144 $group = JobQueueGroup::singleton();
145 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
146 $jobsPopped = 0;
147 $timeMsTotal = 0;
148 $startTime = microtime( true ); // time since jobs started running
149 $lastCheckTime = 1; // timestamp of last replica DB check
150 do {
151 // Sync the persistent backoffs with concurrent runners
152 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
153 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
154 $wait = 'nowait'; // less important now
155
156 if ( $type === false ) {
157 $job = $group->pop(
158 JobQueueGroup::TYPE_DEFAULT,
159 JobQueueGroup::USE_CACHE,
160 $blacklist
161 );
162 } elseif ( in_array( $type, $blacklist ) ) {
163 $job = false; // requested queue in backoff state
164 } else {
165 $job = $group->pop( $type ); // job from a single queue
166 }
167 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
168
169 if ( $job ) { // found a job
170 ++$jobsPopped;
171 $popTime = time();
172 $jType = $job->getType();
173
174 WebRequest::overrideRequestId( $job->getRequestId() );
175
176 // Back off of certain jobs for a while (for throttling and for errors)
177 $ttw = $this->getBackoffTimeToWait( $job );
178 if ( $ttw > 0 ) {
179 // Always add the delta for other runners in case the time running the
180 // job negated the backoff for each individually but not collectively.
181 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
182 ? $backoffDeltas[$jType] + $ttw
183 : $ttw;
184 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
185 }
186
187 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
188 if ( $info['status'] !== false || !$job->allowRetries() ) {
189 $group->ack( $job ); // succeeded or job cannot be retried
190 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
191 }
192
193 // Back off of certain jobs for a while (for throttling and for errors)
194 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
195 $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
196 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
197 ? $backoffDeltas[$jType] + $ttw
198 : $ttw;
199 }
200
201 $response['jobs'][] = [
202 'type' => $jType,
203 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
204 'error' => $info['error'],
205 'time' => $info['timeMs']
206 ];
207 $timeMsTotal += $info['timeMs'];
208
209 // Break out if we hit the job count or wall time limits...
210 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
211 $response['reached'] = 'job-limit';
212 break;
213 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
214 $response['reached'] = 'time-limit';
215 break;
216 }
217
218 // Don't let any of the main DB replica DBs get backed up.
219 // This only waits for so long before exiting and letting
220 // other wikis in the farm (on different masters) get a chance.
221 $timePassed = microtime( true ) - $lastCheckTime;
222 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
223 try {
224 $lbFactory->waitForReplication( [
225 'ifWritesSince' => $lastCheckTime,
226 'timeout' => self::MAX_ALLOWED_LAG
227 ] );
228 } catch ( DBReplicationWaitError $e ) {
229 $response['reached'] = 'replica-lag-limit';
230 break;
231 }
232 $lastCheckTime = microtime( true );
233 }
234 // Don't let any queue replica DBs/backups fall behind
235 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
236 $group->waitForBackups();
237 }
238
239 // Bail if near-OOM instead of in a job
240 if ( !$this->checkMemoryOK() ) {
241 $response['reached'] = 'memory-limit';
242 break;
243 }
244 }
245 } while ( $job ); // stop when there are no jobs
246
247 // Sync the persistent backoffs for the next runJobs.php pass
248 if ( $backoffDeltas ) {
249 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
250 }
251
252 $response['backoffs'] = $backoffs;
253 $response['elapsed'] = $timeMsTotal;
254
255 return $response;
256 }
257
258 /**
259 * @param string $error
260 * @return int TTL in seconds
261 */
262 private function getErrorBackoffTTL( $error ) {
263 return strpos( $error, 'DBReadOnlyError' ) !== false
264 ? self::READONLY_BACKOFF_TTL
265 : self::ERROR_BACKOFF_TTL;
266 }
267
268 /**
269 * @param Job $job
270 * @param LBFactory $lbFactory
271 * @param StatsdDataFactory $stats
272 * @param float $popTime
273 * @return array Map of status/error/timeMs
274 */
275 private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
276 $jType = $job->getType();
277 $msg = $job->toString() . " STARTING";
278 $this->logger->debug( $msg );
279 $this->debugCallback( $msg );
280
281 // Run the job...
282 $rssStart = $this->getMaxRssKb();
283 $jobStartTime = microtime( true );
284 try {
285 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
286 $lbFactory->beginMasterChanges( $fnameTrxOwner );
287 $status = $job->run();
288 $error = $job->getLastError();
289 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
290 // Run any deferred update tasks; doUpdates() manages transactions itself
291 DeferredUpdates::doUpdates();
292 } catch ( Exception $e ) {
293 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
294 $status = false;
295 $error = get_class( $e ) . ': ' . $e->getMessage();
296 }
297 // Always attempt to call teardown() even if Job throws exception.
298 try {
299 $job->teardown( $status );
300 } catch ( Exception $e ) {
301 MWExceptionHandler::logException( $e );
302 }
303
304 // Commit all outstanding connections that are in a transaction
305 // to get a fresh repeatable read snapshot on every connection.
306 // Note that jobs are still responsible for handling replica DB lag.
307 $lbFactory->flushReplicaSnapshots( __METHOD__ );
308 // Clear out title cache data from prior snapshots
309 MediaWikiServices::getInstance()->getLinkCache()->clear();
310 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
311 $rssEnd = $this->getMaxRssKb();
312
313 // Record how long jobs wait before getting popped
314 $readyTs = $job->getReadyTimestamp();
315 if ( $readyTs ) {
316 $pickupDelay = max( 0, $popTime - $readyTs );
317 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
318 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
319 }
320 // Record root job age for jobs being run
321 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
322 if ( $rootTimestamp ) {
323 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
324 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
325 }
326 // Track the execution time for jobs
327 $stats->timing( "jobqueue.run.$jType", $timeMs );
328 // Track RSS increases for jobs (in case of memory leaks)
329 if ( $rssStart && $rssEnd ) {
330 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
331 }
332
333 if ( $status === false ) {
334 $msg = $job->toString() . " t=$timeMs error={$error}";
335 $this->logger->error( $msg );
336 $this->debugCallback( $msg );
337 } else {
338 $msg = $job->toString() . " t=$timeMs good";
339 $this->logger->info( $msg );
340 $this->debugCallback( $msg );
341 }
342
343 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
344 }
345
346 /**
347 * @return int|null Max memory RSS in kilobytes
348 */
349 private function getMaxRssKb() {
350 $info = wfGetRusage() ?: [];
351 // see https://linux.die.net/man/2/getrusage
352 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
353 }
354
355 /**
356 * @param Job $job
357 * @return int Seconds for this runner to avoid doing more jobs of this type
358 * @see $wgJobBackoffThrottling
359 */
360 private function getBackoffTimeToWait( Job $job ) {
361 global $wgJobBackoffThrottling;
362
363 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
364 $job instanceof DuplicateJob // no work was done
365 ) {
366 return 0; // not throttled
367 }
368
369 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
370 if ( $itemsPerSecond <= 0 ) {
371 return 0; // not throttled
372 }
373
374 $seconds = 0;
375 if ( $job->workItemCount() > 0 ) {
376 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
377 // use randomized rounding
378 $seconds = floor( $exactSeconds );
379 $remainder = $exactSeconds - $seconds;
380 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
381 }
382
383 return (int)$seconds;
384 }
385
386 /**
387 * Get the previous backoff expiries from persistent storage
388 * On I/O or lock acquisition failure this returns the original $backoffs.
389 *
390 * @param array $backoffs Map of (job type => UNIX timestamp)
391 * @param string $mode Lock wait mode - "wait" or "nowait"
392 * @return array Map of (job type => backoff expiry timestamp)
393 */
394 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
395 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
396 if ( is_file( $file ) ) {
397 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
398 $handle = fopen( $file, 'rb' );
399 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
400 fclose( $handle );
401 return $backoffs; // don't wait on lock
402 }
403 $content = stream_get_contents( $handle );
404 flock( $handle, LOCK_UN );
405 fclose( $handle );
406 $ctime = microtime( true );
407 $cBackoffs = json_decode( $content, true ) ?: [];
408 foreach ( $cBackoffs as $type => $timestamp ) {
409 if ( $timestamp < $ctime ) {
410 unset( $cBackoffs[$type] );
411 }
412 }
413 } else {
414 $cBackoffs = [];
415 }
416
417 return $cBackoffs;
418 }
419
420 /**
421 * Merge the current backoff expiries from persistent storage
422 *
423 * The $deltas map is set to an empty array on success.
424 * On I/O or lock acquisition failure this returns the original $backoffs.
425 *
426 * @param array $backoffs Map of (job type => UNIX timestamp)
427 * @param array $deltas Map of (job type => seconds)
428 * @param string $mode Lock wait mode - "wait" or "nowait"
429 * @return array The new backoffs account for $backoffs and the latest file data
430 */
431 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
432 if ( !$deltas ) {
433 return $this->loadBackoffs( $backoffs, $mode );
434 }
435
436 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
437 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
438 $handle = fopen( $file, 'wb+' );
439 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
440 fclose( $handle );
441 return $backoffs; // don't wait on lock
442 }
443 $ctime = microtime( true );
444 $content = stream_get_contents( $handle );
445 $cBackoffs = json_decode( $content, true ) ?: [];
446 foreach ( $deltas as $type => $seconds ) {
447 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
448 ? $cBackoffs[$type] + $seconds
449 : $ctime + $seconds;
450 }
451 foreach ( $cBackoffs as $type => $timestamp ) {
452 if ( $timestamp < $ctime ) {
453 unset( $cBackoffs[$type] );
454 }
455 }
456 ftruncate( $handle, 0 );
457 fwrite( $handle, json_encode( $cBackoffs ) );
458 flock( $handle, LOCK_UN );
459 fclose( $handle );
460
461 $deltas = [];
462
463 return $cBackoffs;
464 }
465
466 /**
467 * Make sure that this script is not too close to the memory usage limit.
468 * It is better to die in between jobs than OOM right in the middle of one.
469 * @return bool
470 */
471 private function checkMemoryOK() {
472 static $maxBytes = null;
473 if ( $maxBytes === null ) {
474 $m = [];
475 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
476 list( , $num, $unit ) = $m;
477 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
478 $maxBytes = $num * $conv[strtolower( $unit )];
479 } else {
480 $maxBytes = 0;
481 }
482 }
483 $usedBytes = memory_get_usage();
484 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
485 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
486 $this->debugCallback( $msg );
487 $this->logger->error( $msg );
488
489 return false;
490 }
491
492 return true;
493 }
494
495 /**
496 * Log the job message
497 * @param string $msg The message to log
498 */
499 private function debugCallback( $msg ) {
500 if ( $this->debug ) {
501 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
502 }
503 }
504
505 /**
506 * Issue a commit on all masters who are currently in a transaction and have
507 * made changes to the database. It also supports sometimes waiting for the
508 * local wiki's replica DBs to catch up. See the documentation for
509 * $wgJobSerialCommitThreshold for more.
510 *
511 * @param LBFactory $lbFactory
512 * @param Job $job
513 * @param string $fnameTrxOwner
514 * @throws DBError
515 */
516 private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
517 global $wgJobSerialCommitThreshold;
518
519 $time = false;
520 $lb = $lbFactory->getMainLB( wfWikiID() );
521 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
522 // Generally, there is one master connection to the local DB
523 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
524 // We need natively blocking fast locks
525 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
526 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
527 if ( $time < $wgJobSerialCommitThreshold ) {
528 $dbwSerial = false;
529 }
530 } else {
531 $dbwSerial = false;
532 }
533 } else {
534 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
535 $dbwSerial = false;
536 }
537
538 if ( !$dbwSerial ) {
539 $lbFactory->commitMasterChanges( $fnameTrxOwner );
540 return;
541 }
542
543 $ms = intval( 1000 * $time );
544 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
545 $this->logger->info( $msg );
546 $this->debugCallback( $msg );
547
548 // Wait for an exclusive lock to commit
549 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
550 // This will trigger a rollback in the main loop
551 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
552 }
553 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
554 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
555 } );
556
557 // Wait for the replica DBs to catch up
558 $pos = $lb->getMasterPos();
559 if ( $pos ) {
560 $lb->waitForAll( $pos );
561 }
562
563 // Actually commit the DB master changes
564 $lbFactory->commitMasterChanges( $fnameTrxOwner );
565 ScopedCallback::consume( $unlocker );
566 }
567 }