Document memory-limit as a possible exit reason
[lhc/web/wiklou.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3 * Job queue runner utility methods
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup JobQueue
22 */
23
24 use MediaWiki\Logger\LoggerFactory;
25 use Psr\Log\LoggerAwareInterface;
26 use Psr\Log\LoggerInterface;
27
28 /**
29 * Job queue runner utility methods
30 *
31 * @ingroup JobQueue
32 * @since 1.24
33 */
34 class JobRunner implements LoggerAwareInterface {
35 /** @var callable|null Debug output handler */
36 protected $debug;
37
38 /**
39 * @var LoggerInterface $logger
40 */
41 protected $logger;
42
43 const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
44 const LAG_CHECK_PERIOD = 1.0; // check slave lag this many seconds
45 const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
46
47 /**
48 * @param callable $debug Optional debug output handler
49 */
50 public function setDebugHandler( $debug ) {
51 $this->debug = $debug;
52 }
53
54 /**
55 * @param LoggerInterface $logger
56 * @return void
57 */
58 public function setLogger( LoggerInterface $logger ) {
59 $this->logger = $logger;
60 }
61
62 /**
63 * @param LoggerInterface $logger
64 */
65 public function __construct( LoggerInterface $logger = null ) {
66 if ( $logger === null ) {
67 $logger = LoggerFactory::getInstance( 'runJobs' );
68 }
69 $this->setLogger( $logger );
70 }
71
72 /**
73 * Run jobs of the specified number/type for the specified time
74 *
75 * The response map has a 'job' field that lists status of each job, including:
76 * - type : the job type
77 * - status : ok/failed
78 * - error : any error message string
79 * - time : the job run time in ms
80 * The response map also has:
81 * - backoffs : the (job type => seconds) map of backoff times
82 * - elapsed : the total time spent running tasks in ms
83 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
84 * memory-limit)
85 *
86 * This method outputs status information only if a debug handler was set.
87 * Any exceptions are caught and logged, but are not reported as output.
88 *
89 * @param array $options Map of parameters:
90 * - type : the job type (or false for the default types)
91 * - maxJobs : maximum number of jobs to run
92 * - maxTime : maximum time in seconds before stopping
93 * - throttle : whether to respect job backoff configuration
94 * @return array Summary response that can easily be JSON serialized
95 */
96 public function run( array $options ) {
97 global $wgJobClasses, $wgTrxProfilerLimits;
98
99 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
100
101 $type = isset( $options['type'] ) ? $options['type'] : false;
102 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
103 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
104 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
105
106 // Bail if job type is invalid
107 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
108 $response['reached'] = 'none-possible';
109 return $response;
110 }
111 // Bail out if DB is in read-only mode
112 if ( wfReadOnly() ) {
113 $response['reached'] = 'read-only';
114 return $response;
115 }
116 // Bail out if there is too much DB lag.
117 // This check should not block as we want to try other wiki queues.
118 list( , $maxLag ) = wfGetLB( wfWikiID() )->getMaxLag();
119 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
120 $response['reached'] = 'slave-lag-limit';
121 return $response;
122 }
123
124 // Flush any pending DB writes for sanity
125 wfGetLBFactory()->commitAll( __METHOD__ );
126
127 // Catch huge single updates that lead to slave lag
128 $trxProfiler = Profiler::instance()->getTransactionProfiler();
129 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
130 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
131
132 // Some jobs types should not run until a certain timestamp
133 $backoffs = []; // map of (type => UNIX expiry)
134 $backoffDeltas = []; // map of (type => seconds)
135 $wait = 'wait'; // block to read backoffs the first time
136
137 $group = JobQueueGroup::singleton();
138 $stats = RequestContext::getMain()->getStats();
139 $jobsPopped = 0;
140 $timeMsTotal = 0;
141 $startTime = microtime( true ); // time since jobs started running
142 $lastCheckTime = 1; // timestamp of last slave check
143 do {
144 // Sync the persistent backoffs with concurrent runners
145 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
146 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
147 $wait = 'nowait'; // less important now
148
149 if ( $type === false ) {
150 $job = $group->pop(
151 JobQueueGroup::TYPE_DEFAULT,
152 JobQueueGroup::USE_CACHE,
153 $blacklist
154 );
155 } elseif ( in_array( $type, $blacklist ) ) {
156 $job = false; // requested queue in backoff state
157 } else {
158 $job = $group->pop( $type ); // job from a single queue
159 }
160
161 if ( $job ) { // found a job
162 ++$jobsPopped;
163 $popTime = time();
164 $jType = $job->getType();
165
166 // Back off of certain jobs for a while (for throttling and for errors)
167 $ttw = $this->getBackoffTimeToWait( $job );
168 if ( $ttw > 0 ) {
169 // Always add the delta for other runners in case the time running the
170 // job negated the backoff for each individually but not collectively.
171 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
172 ? $backoffDeltas[$jType] + $ttw
173 : $ttw;
174 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
175 }
176
177 $info = $this->executeJob( $job, $stats, $popTime );
178 if ( $info['status'] !== false || !$job->allowRetries() ) {
179 $group->ack( $job ); // succeeded or job cannot be retried
180 }
181
182 // Back off of certain jobs for a while (for throttling and for errors)
183 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
184 $ttw = max( $ttw, self::ERROR_BACKOFF_TTL ); // too many errors
185 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
186 ? $backoffDeltas[$jType] + $ttw
187 : $ttw;
188 }
189
190 $response['jobs'][] = [
191 'type' => $jType,
192 'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
193 'error' => $info['error'],
194 'time' => $info['timeMs']
195 ];
196 $timeMsTotal += $info['timeMs'];
197
198 // Break out if we hit the job count or wall time limits...
199 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
200 $response['reached'] = 'job-limit';
201 break;
202 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
203 $response['reached'] = 'time-limit';
204 break;
205 }
206
207 // Don't let any of the main DB slaves get backed up.
208 // This only waits for so long before exiting and letting
209 // other wikis in the farm (on different masters) get a chance.
210 $timePassed = microtime( true ) - $lastCheckTime;
211 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
212 try {
213 wfGetLBFactory()->waitForReplication( [
214 'ifWritesSince' => $lastCheckTime,
215 'timeout' => self::MAX_ALLOWED_LAG
216 ] );
217 } catch ( DBReplicationWaitError $e ) {
218 $response['reached'] = 'slave-lag-limit';
219 break;
220 }
221 $lastCheckTime = microtime( true );
222 }
223 // Don't let any queue slaves/backups fall behind
224 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
225 $group->waitForBackups();
226 }
227
228 // Bail if near-OOM instead of in a job
229 if ( !$this->checkMemoryOK() ) {
230 $response['reached'] = 'memory-limit';
231 break;
232 }
233 }
234 } while ( $job ); // stop when there are no jobs
235
236 // Sync the persistent backoffs for the next runJobs.php pass
237 if ( $backoffDeltas ) {
238 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
239 }
240
241 $response['backoffs'] = $backoffs;
242 $response['elapsed'] = $timeMsTotal;
243
244 return $response;
245 }
246
247 /**
248 * @param Job $job
249 * @param BufferingStatsdDataFactory $stats
250 * @param float $popTime
251 * @return array Map of status/error/timeMs
252 */
253 private function executeJob( Job $job, $stats, $popTime ) {
254 $jType = $job->getType();
255 $msg = $job->toString() . " STARTING";
256 $this->logger->debug( $msg );
257 $this->debugCallback( $msg );
258
259 // Run the job...
260 $rssStart = $this->getMaxRssKb();
261 $jobStartTime = microtime( true );
262 try {
263 $status = $job->run();
264 $error = $job->getLastError();
265 $this->commitMasterChanges( $job );
266
267 DeferredUpdates::doUpdates();
268 $this->commitMasterChanges( $job );
269 } catch ( Exception $e ) {
270 MWExceptionHandler::rollbackMasterChangesAndLog( $e );
271 $status = false;
272 $error = get_class( $e ) . ': ' . $e->getMessage();
273 MWExceptionHandler::logException( $e );
274 }
275 // Commit all outstanding connections that are in a transaction
276 // to get a fresh repeatable read snapshot on every connection.
277 // Note that jobs are still responsible for handling slave lag.
278 wfGetLBFactory()->commitAll( __METHOD__ );
279 // Clear out title cache data from prior snapshots
280 LinkCache::singleton()->clear();
281 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
282 $rssEnd = $this->getMaxRssKb();
283
284 // Record how long jobs wait before getting popped
285 $readyTs = $job->getReadyTimestamp();
286 if ( $readyTs ) {
287 $pickupDelay = max( 0, $popTime - $readyTs );
288 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
289 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
290 }
291 // Record root job age for jobs being run
292 $root = $job->getRootJobParams();
293 if ( $root['rootJobTimestamp'] ) {
294 $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $root['rootJobTimestamp'] ) );
295 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
296 }
297 // Track the execution time for jobs
298 $stats->timing( "jobqueue.run.$jType", $timeMs );
299 // Track RSS increases for jobs (in case of memory leaks)
300 if ( $rssStart && $rssEnd ) {
301 $stats->increment( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
302 }
303
304 if ( $status === false ) {
305 $msg = $job->toString() . " t=$timeMs error={$error}";
306 $this->logger->error( $msg );
307 $this->debugCallback( $msg );
308 } else {
309 $msg = $job->toString() . " t=$timeMs good";
310 $this->logger->info( $msg );
311 $this->debugCallback( $msg );
312 }
313
314 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
315 }
316
317 /**
318 * @return int|null Max memory RSS in kilobytes
319 */
320 private function getMaxRssKb() {
321 $info = wfGetRusage() ?: [];
322 // see http://linux.die.net/man/2/getrusage
323 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
324 }
325
326 /**
327 * @param Job $job
328 * @return int Seconds for this runner to avoid doing more jobs of this type
329 * @see $wgJobBackoffThrottling
330 */
331 private function getBackoffTimeToWait( Job $job ) {
332 global $wgJobBackoffThrottling;
333
334 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
335 $job instanceof DuplicateJob // no work was done
336 ) {
337 return 0; // not throttled
338 }
339
340 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
341 if ( $itemsPerSecond <= 0 ) {
342 return 0; // not throttled
343 }
344
345 $seconds = 0;
346 if ( $job->workItemCount() > 0 ) {
347 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
348 // use randomized rounding
349 $seconds = floor( $exactSeconds );
350 $remainder = $exactSeconds - $seconds;
351 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
352 }
353
354 return (int)$seconds;
355 }
356
357 /**
358 * Get the previous backoff expiries from persistent storage
359 * On I/O or lock acquisition failure this returns the original $backoffs.
360 *
361 * @param array $backoffs Map of (job type => UNIX timestamp)
362 * @param string $mode Lock wait mode - "wait" or "nowait"
363 * @return array Map of (job type => backoff expiry timestamp)
364 */
365 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
366 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
367 if ( is_file( $file ) ) {
368 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
369 $handle = fopen( $file, 'rb' );
370 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
371 fclose( $handle );
372 return $backoffs; // don't wait on lock
373 }
374 $content = stream_get_contents( $handle );
375 flock( $handle, LOCK_UN );
376 fclose( $handle );
377 $ctime = microtime( true );
378 $cBackoffs = json_decode( $content, true ) ?: [];
379 foreach ( $cBackoffs as $type => $timestamp ) {
380 if ( $timestamp < $ctime ) {
381 unset( $cBackoffs[$type] );
382 }
383 }
384 } else {
385 $cBackoffs = [];
386 }
387
388 return $cBackoffs;
389 }
390
391 /**
392 * Merge the current backoff expiries from persistent storage
393 *
394 * The $deltas map is set to an empty array on success.
395 * On I/O or lock acquisition failure this returns the original $backoffs.
396 *
397 * @param array $backoffs Map of (job type => UNIX timestamp)
398 * @param array $deltas Map of (job type => seconds)
399 * @param string $mode Lock wait mode - "wait" or "nowait"
400 * @return array The new backoffs account for $backoffs and the latest file data
401 */
402 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
403 if ( !$deltas ) {
404 return $this->loadBackoffs( $backoffs, $mode );
405 }
406
407 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
408 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
409 $handle = fopen( $file, 'wb+' );
410 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
411 fclose( $handle );
412 return $backoffs; // don't wait on lock
413 }
414 $ctime = microtime( true );
415 $content = stream_get_contents( $handle );
416 $cBackoffs = json_decode( $content, true ) ?: [];
417 foreach ( $deltas as $type => $seconds ) {
418 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
419 ? $cBackoffs[$type] + $seconds
420 : $ctime + $seconds;
421 }
422 foreach ( $cBackoffs as $type => $timestamp ) {
423 if ( $timestamp < $ctime ) {
424 unset( $cBackoffs[$type] );
425 }
426 }
427 ftruncate( $handle, 0 );
428 fwrite( $handle, json_encode( $cBackoffs ) );
429 flock( $handle, LOCK_UN );
430 fclose( $handle );
431
432 $deltas = [];
433
434 return $cBackoffs;
435 }
436
437 /**
438 * Make sure that this script is not too close to the memory usage limit.
439 * It is better to die in between jobs than OOM right in the middle of one.
440 * @return bool
441 */
442 private function checkMemoryOK() {
443 static $maxBytes = null;
444 if ( $maxBytes === null ) {
445 $m = [];
446 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
447 list( , $num, $unit ) = $m;
448 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
449 $maxBytes = $num * $conv[strtolower( $unit )];
450 } else {
451 $maxBytes = 0;
452 }
453 }
454 $usedBytes = memory_get_usage();
455 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
456 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
457 $this->debugCallback( $msg );
458 $this->logger->error( $msg );
459
460 return false;
461 }
462
463 return true;
464 }
465
466 /**
467 * Log the job message
468 * @param string $msg The message to log
469 */
470 private function debugCallback( $msg ) {
471 if ( $this->debug ) {
472 call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
473 }
474 }
475
476 /**
477 * Issue a commit on all masters who are currently in a transaction and have
478 * made changes to the database. It also supports sometimes waiting for the
479 * local wiki's slaves to catch up. See the documentation for
480 * $wgJobSerialCommitThreshold for more.
481 *
482 * @param Job $job
483 * @throws DBError
484 */
485 private function commitMasterChanges( Job $job ) {
486 global $wgJobSerialCommitThreshold;
487
488 $lb = wfGetLB( wfWikiID() );
489 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
490 // Generally, there is one master connection to the local DB
491 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
492 } else {
493 $dbwSerial = false;
494 }
495
496 if ( !$dbwSerial
497 || !$dbwSerial->namedLocksEnqueue()
498 || $dbwSerial->pendingWriteQueryDuration() < $wgJobSerialCommitThreshold
499 ) {
500 // Writes are all to foreign DBs, named locks don't form queues,
501 // or $wgJobSerialCommitThreshold is not reached; commit changes now
502 wfGetLBFactory()->commitMasterChanges( __METHOD__ );
503 return;
504 }
505
506 $ms = intval( 1000 * $dbwSerial->pendingWriteQueryDuration() );
507 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
508 $this->logger->warning( $msg );
509 $this->debugCallback( $msg );
510
511 // Wait for an exclusive lock to commit
512 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
513 // This will trigger a rollback in the main loop
514 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
515 }
516 // Wait for the generic slave to catch up
517 $pos = $lb->getMasterPos();
518 if ( $pos ) {
519 $lb->waitForOne( $pos );
520 }
521
522 $fname = __METHOD__;
523 // Re-ping all masters with transactions. This throws DBError if some
524 // connection died while waiting on locks/slaves, triggering a rollback.
525 wfGetLBFactory()->forEachLB( function( LoadBalancer $lb ) use ( $fname ) {
526 $lb->forEachOpenConnection( function( IDatabase $conn ) use ( $fname ) {
527 if ( $conn->writesOrCallbacksPending() ) {
528 $conn->query( "SELECT 1", $fname );
529 }
530 } );
531 } );
532
533 // Actually commit the DB master changes
534 wfGetLBFactory()->commitMasterChanges( __METHOD__ );
535
536 // Release the lock
537 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
538 }
539 }