Merge "Enable $wgResourceLoaderStorageEnabled by default"
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Psr\Log\LoggerAwareInterface;
27 use Psr\Log\LoggerInterface;
28
29 /**
30 * Job queue runner utility methods
31 *
32 * @ingroup JobQueue
33 * @since 1.24
34 */
35 class JobRunner implements LoggerAwareInterface {
36 /** @var callable|null Debug output handler */
37 protected $debug;
38
39 /**
40 * @var LoggerInterface $logger
41 */
42 protected $logger;
43
44 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
45 const LAG_CHECK_PERIOD = 1.0; // check slave lag this many seconds
46 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
47
48 /**
49 * @param callable $debug Optional debug output handler
50 */
51 public function setDebugHandler( $debug ) {
52 $this->debug = $debug;
53 }
54
55 /**
56 * @param LoggerInterface $logger
57 * @return void
58 */
59 public function setLogger( LoggerInterface $logger ) {
60 $this->logger = $logger;
61 }
62
63 /**
64 * @param LoggerInterface $logger
65 */
66 public function __construct( LoggerInterface $logger = null ) {
67 if ( $logger === null ) {
68 $logger = LoggerFactory::getInstance( 'runJobs' );
69 }
70 $this->setLogger( $logger );
71 }
72
73 /**
74 * Run jobs of the specified number/type for the specified time
75 *
76 * The response map has a 'job' field that lists status of each job, including:
77 * - type : the job type
78 * - status : ok/failed
79 * - error : any error message string
80 * - time : the job run time in ms
81 * The response map also has:
82 * - backoffs : the (job type => seconds) map of backoff times
83 * - elapsed : the total time spent running tasks in ms
84 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
85 * memory-limit)
86 *
87 * This method outputs status information only if a debug handler was set.
88 * Any exceptions are caught and logged, but are not reported as output.
89 *
90 * @param array $options Map of parameters:
91 * - type : the job type (or false for the default types)
92 * - maxJobs : maximum number of jobs to run
93 * - maxTime : maximum time in seconds before stopping
94 * - throttle : whether to respect job backoff configuration
95 * @return array Summary response that can easily be JSON serialized
96 */
97 public function run( array $options ) {
98 global $wgJobClasses, $wgTrxProfilerLimits;
99
100 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
101
102 $type = isset( $options['type'] ) ? $options['type'] : false;
103 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
104 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
105 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
106
107 // Bail if job type is invalid
108 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
109 $response['reached'] = 'none-possible';
110 return $response;
111 }
112 // Bail out if DB is in read-only mode
113 if ( wfReadOnly() ) {
114 $response['reached'] = 'read-only';
115 return $response;
116 }
117 // Bail out if there is too much DB lag.
118 // This check should not block as we want to try other wiki queues.
119 list( , $maxLag ) = wfGetLB( wfWikiID() )->getMaxLag();
120 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
121 $response['reached'] = 'slave-lag-limit';
122 return $response;
123 }
124
125 // Flush any pending DB writes for sanity
126 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
127 $lbFactory->commitAll( __METHOD__ );
128
129 // Catch huge single updates that lead to slave lag
130 $trxProfiler = Profiler::instance()->getTransactionProfiler();
131 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
132 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
133
134 // Some jobs types should not run until a certain timestamp
135 $backoffs = []; // map of (type => UNIX expiry)
136 $backoffDeltas = []; // map of (type => seconds)
137 $wait = 'wait'; // block to read backoffs the first time
138
139 $group = JobQueueGroup::singleton();
140 $stats = RequestContext::getMain()->getStats();
141 $jobsPopped = 0;
142 $timeMsTotal = 0;
143 $startTime = microtime( true ); // time since jobs started running
144 $lastCheckTime = 1; // timestamp of last slave check
145 do {
146 // Sync the persistent backoffs with concurrent runners
147 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
148 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
149 $wait = 'nowait'; // less important now
150
151 if ( $type === false ) {
152 $job = $group->pop(
153 JobQueueGroup::TYPE_DEFAULT,
154 JobQueueGroup::USE_CACHE,
155 $blacklist
156 );
157 } elseif ( in_array( $type, $blacklist ) ) {
158 $job = false; // requested queue in backoff state
159 } else {
160 $job = $group->pop( $type ); // job from a single queue
161 }
162
163 if ( $job ) { // found a job
164 ++$jobsPopped;
165 $popTime = time();
166 $jType = $job->getType();
167
168 WebRequest::overrideRequestId( $job->getRequestId() );
169
170 // Back off of certain jobs for a while (for throttling and for errors)
171 $ttw = $this->getBackoffTimeToWait( $job );
172 if ( $ttw > 0 ) {
173 // Always add the delta for other runners in case the time running the
174 // job negated the backoff for each individually but not collectively.
175 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
176 ? $backoffDeltas[$jType] + $ttw
177 : $ttw;
178 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
179 }
180
181 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
182 $info = $this->executeJob( $job, $stats, $popTime );
183 if ( $info['status'] !== false || !$job->allowRetries() ) {
184 $group->ack( $job ); // succeeded or job cannot be retried
185 $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
186 }
187
188 // Back off of certain jobs for a while (for throttling and for errors)
189 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
190 $ttw = max( $ttw, self::ERROR_BACKOFF_TTL ); // too many errors
191 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
192 ? $backoffDeltas[$jType] + $ttw
193 : $ttw;
194 }
195
196 $response['jobs'][] = [
197 'type' => $jType,
198 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
199 'error' => $info['error'],
200 'time' => $info['timeMs']
201 ];
202 $timeMsTotal += $info['timeMs'];
203
204 // Break out if we hit the job count or wall time limits...
205 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
206 $response['reached'] = 'job-limit';
207 break;
208 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
209 $response['reached'] = 'time-limit';
210 break;
211 }
212
213 // Don't let any of the main DB slaves get backed up.
214 // This only waits for so long before exiting and letting
215 // other wikis in the farm (on different masters) get a chance.
216 $timePassed = microtime( true ) - $lastCheckTime;
217 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
218 try {
219 $lbFactory->waitForReplication( [
220 'ifWritesSince' => $lastCheckTime,
221 'timeout' => self::MAX_ALLOWED_LAG
222 ] );
223 } catch ( DBReplicationWaitError $e ) {
224 $response['reached'] = 'slave-lag-limit';
225 break;
226 }
227 $lastCheckTime = microtime( true );
228 }
229 // Don't let any queue slaves/backups fall behind
230 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
231 $group->waitForBackups();
232 }
233
234 // Bail if near-OOM instead of in a job
235 if ( !$this->checkMemoryOK() ) {
236 $response['reached'] = 'memory-limit';
237 break;
238 }
239 }
240 } while ( $job ); // stop when there are no jobs
241
242 // Sync the persistent backoffs for the next runJobs.php pass
243 if ( $backoffDeltas ) {
244 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
245 }
246
247 $response['backoffs'] = $backoffs;
248 $response['elapsed'] = $timeMsTotal;
249
250 return $response;
251 }
252
253 /**
254 * @param Job $job
255 * @param BufferingStatsdDataFactory $stats
256 * @param float $popTime
257 * @return array Map of status/error/timeMs
258 */
259 private function executeJob( Job $job, $stats, $popTime ) {
260 $jType = $job->getType();
261 $msg = $job->toString() . " STARTING";
262 $this->logger->debug( $msg );
263 $this->debugCallback( $msg );
264 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
265
266 // Run the job...
267 $rssStart = $this->getMaxRssKb();
268 $jobStartTime = microtime( true );
269 try {
270 $status = $job->run();
271 $error = $job->getLastError();
272 $this->commitMasterChanges( $job );
273
274 DeferredUpdates::doUpdates();
275 $this->commitMasterChanges( $job );
276 } catch ( Exception $e ) {
277 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
278 $status = false;
279 $error = get_class( $e ) . ': ' . $e->getMessage();
280 MWExceptionHandler::logException( $e );
281 }
282 // Always attempt to call teardown() even if Job throws exception.
283 try {
284 $job->teardown();
285 } catch ( Exception $e ) {
286 MWExceptionHandler::logException( $e );
287 }
288
289 // Commit all outstanding connections that are in a transaction
290 // to get a fresh repeatable read snapshot on every connection.
291 // Note that jobs are still responsible for handling slave lag.
292 $lbFactory->commitAll( __METHOD__ );
293 // Clear out title cache data from prior snapshots
294 LinkCache::singleton()->clear();
295 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
296 $rssEnd = $this->getMaxRssKb();
297
298 // Record how long jobs wait before getting popped
299 $readyTs = $job->getReadyTimestamp();
300 if ( $readyTs ) {
301 $pickupDelay = max( 0, $popTime - $readyTs );
302 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
303 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
304 }
305 // Record root job age for jobs being run
306 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
307 if ( $rootTimestamp ) {
308 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
309 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
310 }
311 // Track the execution time for jobs
312 $stats->timing( "jobqueue.run.$jType", $timeMs );
313 // Track RSS increases for jobs (in case of memory leaks)
314 if ( $rssStart && $rssEnd ) {
315 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
316 }
317
318 if ( $status === false ) {
319 $msg = $job->toString() . " t=$timeMs error={$error}";
320 $this->logger->error( $msg );
321 $this->debugCallback( $msg );
322 } else {
323 $msg = $job->toString() . " t=$timeMs good";
324 $this->logger->info( $msg );
325 $this->debugCallback( $msg );
326 }
327
328 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
329 }
330
331 /**
332 * @return int|null Max memory RSS in kilobytes
333 */
334 private function getMaxRssKb() {
335 $info = wfGetRusage() ?: [];
336 // see http://linux.die.net/man/2/getrusage
337 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
338 }
339
340 /**
341 * @param Job $job
342 * @return int Seconds for this runner to avoid doing more jobs of this type
343 * @see $wgJobBackoffThrottling
344 */
345 private function getBackoffTimeToWait( Job $job ) {
346 global $wgJobBackoffThrottling;
347
348 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
349 $job instanceof DuplicateJob // no work was done
350 ) {
351 return 0; // not throttled
352 }
353
354 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
355 if ( $itemsPerSecond <= 0 ) {
356 return 0; // not throttled
357 }
358
359 $seconds = 0;
360 if ( $job->workItemCount() > 0 ) {
361 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
362 // use randomized rounding
363 $seconds = floor( $exactSeconds );
364 $remainder = $exactSeconds - $seconds;
365 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
366 }
367
368 return (int)$seconds;
369 }
370
371 /**
372 * Get the previous backoff expiries from persistent storage
373 * On I/O or lock acquisition failure this returns the original $backoffs.
374 *
375 * @param array $backoffs Map of (job type => UNIX timestamp)
376 * @param string $mode Lock wait mode - "wait" or "nowait"
377 * @return array Map of (job type => backoff expiry timestamp)
378 */
379 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
380 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
381 if ( is_file( $file ) ) {
382 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
383 $handle = fopen( $file, 'rb' );
384 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
385 fclose( $handle );
386 return $backoffs; // don't wait on lock
387 }
388 $content = stream_get_contents( $handle );
389 flock( $handle, LOCK_UN );
390 fclose( $handle );
391 $ctime = microtime( true );
392 $cBackoffs = json_decode( $content, true ) ?: [];
393 foreach ( $cBackoffs as $type => $timestamp ) {
394 if ( $timestamp < $ctime ) {
395 unset( $cBackoffs[$type] );
396 }
397 }
398 } else {
399 $cBackoffs = [];
400 }
401
402 return $cBackoffs;
403 }
404
405 /**
406 * Merge the current backoff expiries from persistent storage
407 *
408 * The $deltas map is set to an empty array on success.
409 * On I/O or lock acquisition failure this returns the original $backoffs.
410 *
411 * @param array $backoffs Map of (job type => UNIX timestamp)
412 * @param array $deltas Map of (job type => seconds)
413 * @param string $mode Lock wait mode - "wait" or "nowait"
414 * @return array The new backoffs account for $backoffs and the latest file data
415 */
416 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
417 if ( !$deltas ) {
418 return $this->loadBackoffs( $backoffs, $mode );
419 }
420
421 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
422 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
423 $handle = fopen( $file, 'wb+' );
424 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
425 fclose( $handle );
426 return $backoffs; // don't wait on lock
427 }
428 $ctime = microtime( true );
429 $content = stream_get_contents( $handle );
430 $cBackoffs = json_decode( $content, true ) ?: [];
431 foreach ( $deltas as $type => $seconds ) {
432 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
433 ? $cBackoffs[$type] + $seconds
434 : $ctime + $seconds;
435 }
436 foreach ( $cBackoffs as $type => $timestamp ) {
437 if ( $timestamp < $ctime ) {
438 unset( $cBackoffs[$type] );
439 }
440 }
441 ftruncate( $handle, 0 );
442 fwrite( $handle, json_encode( $cBackoffs ) );
443 flock( $handle, LOCK_UN );
444 fclose( $handle );
445
446 $deltas = [];
447
448 return $cBackoffs;
449 }
450
451 /**
452 * Make sure that this script is not too close to the memory usage limit.
453 * It is better to die in between jobs than OOM right in the middle of one.
454 * @return bool
455 */
456 private function checkMemoryOK() {
457 static $maxBytes = null;
458 if ( $maxBytes === null ) {
459 $m = [];
460 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
461 list( , $num, $unit ) = $m;
462 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
463 $maxBytes = $num * $conv[strtolower( $unit )];
464 } else {
465 $maxBytes = 0;
466 }
467 }
468 $usedBytes = memory_get_usage();
469 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
470 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
471 $this->debugCallback( $msg );
472 $this->logger->error( $msg );
473
474 return false;
475 }
476
477 return true;
478 }
479
480 /**
481 * Log the job message
482 * @param string $msg The message to log
483 */
484 private function debugCallback( $msg ) {
485 if ( $this->debug ) {
486 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
487 }
488 }
489
490 /**
491 * Issue a commit on all masters who are currently in a transaction and have
492 * made changes to the database. It also supports sometimes waiting for the
493 * local wiki's slaves to catch up. See the documentation for
494 * $wgJobSerialCommitThreshold for more.
495 *
496 * @param Job $job
497 * @throws DBError
498 */
499 private function commitMasterChanges( Job $job ) {
500 global $wgJobSerialCommitThreshold;
501
502 $lb = wfGetLB( wfWikiID() );
503 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
504 // Generally, there is one master connection to the local DB
505 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
506 } else {
507 $dbwSerial = false;
508 }
509
510 if ( !$dbwSerial
511 || !$dbwSerial->namedLocksEnqueue()
512 || $dbwSerial->pendingWriteQueryDuration() < $wgJobSerialCommitThreshold
513 ) {
514 // Writes are all to foreign DBs, named locks don't form queues,
515 // or $wgJobSerialCommitThreshold is not reached; commit changes now
516 wfGetLBFactory()->commitMasterChanges( __METHOD__ );
517 return;
518 }
519
520 $ms = intval( 1000 * $dbwSerial->pendingWriteQueryDuration() );
521 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
522 $this->logger->info( $msg );
523 $this->debugCallback( $msg );
524
525 // Wait for an exclusive lock to commit
526 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
527 // This will trigger a rollback in the main loop
528 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
529 }
530 // Wait for the slave DBs to catch up
531 $pos = $lb->getMasterPos();
532 if ( $pos ) {
533 $lb->waitForAll( $pos );
534 }
535
536 // Actually commit the DB master changes
537 wfGetLBFactory()->commitMasterChanges( __METHOD__ );
538
539 // Release the lock
540 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
541 }
542 }