// may still be a wikipage redirect to another article or URL.
$article = $this->initializeArticle();
if ( is_object( $article ) ) {
- $url = $request->getFullRequestURL(); // requested URL
- if (
- $request->getMethod() === 'GET' &&
- $url === $article->getTitle()->getCanonicalURL() &&
- $article->checkTouched() &&
- $output->checkLastModified( $article->getTouched() )
- ) {
- wfDebug( __METHOD__ . ": done 304\n" );
- return;
- }
$this->performAction( $article, $requestTitle );
} elseif ( is_string( $article ) ) {
$output->redirect( $article );
/**
* @see MediaWiki::preOutputCommit()
+ * @param callable $postCommitWork [default: null]
* @since 1.26
*/
- public function doPreOutputCommit() {
- self::preOutputCommit( $this->context );
+ public function doPreOutputCommit( callable $postCommitWork = null ) {
+ self::preOutputCommit( $this->context, $postCommitWork );
}
/**
* the user can receive a response (in case commit fails)
*
* @param IContextSource $context
+ * @param callable $postCommitWork [default: null]
* @since 1.27
*/
- public static function preOutputCommit( IContextSource $context ) {
+ public static function preOutputCommit(
+ IContextSource $context, callable $postCommitWork = null
+ ) {
// Either all DBs should commit or none
ignore_user_abort( true );
$config = $context->getConfig();
+ $request = $context->getRequest();
+ $output = $context->getOutput();
+ $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
- $factory = wfGetLBFactory();
// Commit all changes
- $factory->commitMasterChanges(
+ $lbFactory->commitMasterChanges(
__METHOD__,
// Abort if any transaction was too big
[ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ]
);
- // Record ChronologyProtector positions
- $factory->shutdown();
- wfDebug( __METHOD__ . ': all transactions committed' );
+ wfDebug( __METHOD__ . ': primary transaction round committed' );
+ // Run updates that need to block the user or affect output (this is the last chance)
DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND );
wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
+ // Decide when clients block on ChronologyProtector DB position writes
+ $urlDomainDistance = (
+ $request->wasPosted() &&
+ $output->getRedirect() &&
+ $lbFactory->hasOrMadeRecentMasterChanges( INF )
+ ) ? self::getUrlDomainDistance( $output->getRedirect(), $context ) : false;
+
+ if ( $urlDomainDistance === 'local' || $urlDomainDistance === 'remote' ) {
+ // OutputPage::output() will be fast; $postCommitWork will not be useful for
+ // masking the latency of syncing DB positions accross all datacenters synchronously.
+ // Instead, make use of the RTT time of the client follow redirects.
+ $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC;
+ $cpPosTime = microtime( true );
+ // Client's next request should see 1+ positions with this DBMasterPos::asOf() time
+ if ( $urlDomainDistance === 'local' ) {
+ // Client will stay on this domain, so set an unobtrusive cookie
+ $expires = time() + ChronologyProtector::POSITION_TTL;
+ $options = [ 'prefix' => '' ];
+ $request->response()->setCookie( 'cpPosTime', $cpPosTime, $expires, $options );
+ } else {
+ // Cookies may not work across wiki domains, so use a URL parameter
+ $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery(
+ $output->getRedirect(),
+ $cpPosTime
+ );
+ $output->redirect( $safeUrl );
+ }
+ } else {
+ // OutputPage::output() is fairly slow; run it in $postCommitWork to mask
+ // the latency of syncing DB positions accross all datacenters synchronously
+ $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC;
+ if ( $lbFactory->hasOrMadeRecentMasterChanges( INF ) ) {
+ $cpPosTime = microtime( true );
+ // Set a cookie in case the DB position store cannot sync accross datacenters.
+ // This will at least cover the common case of the user staying on the domain.
+ $expires = time() + ChronologyProtector::POSITION_TTL;
+ $options = [ 'prefix' => '' ];
+ $request->response()->setCookie( 'cpPosTime', $cpPosTime, $expires, $options );
+ }
+ }
+ // Record ChronologyProtector positions for DBs affected in this request at this point
+ $lbFactory->shutdown( $flags, $postCommitWork );
+ wfDebug( __METHOD__ . ': LBFactory shutdown completed' );
+
// Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this
// POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so
// ChronologyProtector works for cacheable URLs.
- $request = $context->getRequest();
- if ( $request->wasPosted() && $factory->hasOrMadeRecentMasterChanges() ) {
+ if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) {
$expires = time() + $config->get( 'DataCenterUpdateStickTTL' );
$options = [ 'prefix' => '' ];
$request->response()->setCookie( 'UseDC', 'master', $expires, $options );
$request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options );
}
- // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is
+ // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is
// also intimately related to the value of $wgCdnReboundPurgeDelay.
- if ( $factory->laggedSlaveUsed() ) {
+ if ( $lbFactory->laggedReplicaUsed() ) {
$maxAge = $config->get( 'CdnMaxageLagged' );
- $context->getOutput()->lowerCdnMaxage( $maxAge );
+ $output->lowerCdnMaxage( $maxAge );
$request->response()->header( "X-Database-Lagged: true" );
wfDebugLog( 'replication', "Lagged DB used; CDN cache TTL limited to $maxAge seconds" );
}
// Avoid long-term cache pollution due to message cache rebuild timeouts (T133069)
if ( MessageCache::singleton()->isDisabled() ) {
$maxAge = $config->get( 'CdnMaxageSubstitute' );
- $context->getOutput()->lowerCdnMaxage( $maxAge );
+ $output->lowerCdnMaxage( $maxAge );
$request->response()->header( "X-Response-Substitute: true" );
}
}
+ /**
+ * @param string $url
+ * @param IContextSource $context
+ * @return string|bool Either "local" or "remote" if in the farm, false otherwise
+ */
+ private function getUrlDomainDistance( $url, IContextSource $context ) {
+ static $relevantKeys = [ 'host' => true, 'port' => true ];
+
+ $infoCandidate = wfParseUrl( $url );
+ if ( $infoCandidate === false ) {
+ return false;
+ }
+
+ $infoCandidate = array_intersect_key( $infoCandidate, $relevantKeys );
+ $clusterHosts = array_merge(
+ // Local wiki host (the most common case)
+ [ $context->getConfig()->get( 'CanonicalServer' ) ],
+ // Any local/remote wiki virtual hosts for this wiki farm
+ $context->getConfig()->get( 'LocalVirtualHosts' )
+ );
+
+ foreach ( $clusterHosts as $i => $clusterHost ) {
+ $parseUrl = wfParseUrl( $clusterHost );
+ if ( !$parseUrl ) {
+ continue;
+ }
+ $infoHost = array_intersect_key( $parseUrl, $relevantKeys );
+ if ( $infoCandidate === $infoHost ) {
+ return ( $i === 0 ) ? 'local' : 'remote';
+ }
+ }
+
+ return false;
+ }
+
/**
* This function does work that can be done *after* the
* user gets the HTTP response so they don't block on it
// Show visible profiling data if enabled (which cannot be post-send)
Profiler::instance()->logDataPageOutputOnly();
- $that = $this;
- $callback = function () use ( $that, $mode ) {
+ $callback = function () use ( $mode ) {
try {
- $that->restInPeace( $mode );
+ $this->restInPeace( $mode );
} catch ( Exception $e ) {
MWExceptionHandler::handleException( $e );
}
fastcgi_finish_request();
} else {
// Either all DB and deferred updates should happen or none.
- // The later should not be cancelled due to client disconnect.
+ // The latter should not be cancelled due to client disconnect.
ignore_user_abort( true );
}
private function main() {
global $wgTitle;
+ $output = $this->context->getOutput();
$request = $this->context->getRequest();
// Send Ajax requests to the Ajax dispatcher.
$dispatcher = new AjaxDispatcher( $this->config );
$dispatcher->performAction( $this->context->getUser() );
+
return;
}
// Setup dummy Title, otherwise OutputPage::redirect will fail
$title = Title::newFromText( 'REDIR', NS_MAIN );
$this->context->setTitle( $title );
- $output = $this->context->getOutput();
// Since we only do this redir to change proto, always send a vary header
$output->addVaryHeader( 'X-Forwarded-Proto' );
$output->redirect( $redirUrl );
$output->output();
+
return;
}
}
if ( $cache->isCacheGood( /* Assume up to date */ ) ) {
// Check incoming headers to see if client has this cached
$timestamp = $cache->cacheTimestamp();
- if ( !$this->context->getOutput()->checkLastModified( $timestamp ) ) {
+ if ( !$output->checkLastModified( $timestamp ) ) {
$cache->loadFromFileCache( $this->context );
}
// Do any stats increment/watchlist stuff
// Assume we're viewing the latest revision (this should always be the case with file cache)
$this->context->getWikiPage()->doViewUpdates( $this->context->getUser() );
// Tell OutputPage that output is taken care of
- $this->context->getOutput()->disable();
+ $output->disable();
+
return;
}
}
// Actually do the work of the request and build up any output
$this->performRequest();
+ // GUI-ify and stash the page output in MediaWiki::doPreOutputCommit() while
+ // ChronologyProtector synchronizes DB positions or slaves accross all datacenters.
+ $buffer = null;
+ $outputWork = function () use ( $output, &$buffer ) {
+ if ( $buffer === null ) {
+ $buffer = $output->output( true );
+ }
+
+ return $buffer;
+ };
+
// Now commit any transactions, so that unreported errors after
// output() don't roll back the whole DB transaction and so that
// we avoid having both success and error text in the response
- $this->doPreOutputCommit();
+ $this->doPreOutputCommit( $outputWork );
- // Output everything!
- $this->context->getOutput()->output();
+ // Now send the actual output
+ print $outputWork();
}
/**
* @param string $mode Use 'fast' to always skip job running
*/
public function restInPeace( $mode = 'fast' ) {
- $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
+ $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
// Assure deferred updates are not in the main transaction
- $factory->commitMasterChanges( __METHOD__ );
+ $lbFactory->commitMasterChanges( __METHOD__ );
// Loosen DB query expectations since the HTTP client is unblocked
$trxProfiler = Profiler::instance()->getTransactionProfiler();
wfLogProfilingData();
// Commit and close up!
- $factory->commitMasterChanges( __METHOD__ );
- $factory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
+ $lbFactory->commitMasterChanges( __METHOD__ );
+ $lbFactory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
wfDebug( "Request ended normally\n" );
}
$runJobsLogger = LoggerFactory::getInstance( 'runJobs' );
+ // Fall back to running the job(s) while the user waits if needed
if ( !$this->config->get( 'RunJobsAsync' ) ) {
- // Fall back to running the job here while the user waits
$runner = new JobRunner( $runJobsLogger );
- $runner->run( [ 'maxJobs' => $n ] );
+ $runner->run( [ 'maxJobs' => $n ] );
return;
}
+ // Do not send request if there are probably no jobs
try {
- if ( !JobQueueGroup::singleton()->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
- return; // do not send request if there are probably no jobs
+ $group = JobQueueGroup::singleton();
+ if ( !$group->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
+ return;
}
} catch ( JobQueueError $e ) {
MWExceptionHandler::logException( $e );
$errno = $errstr = null;
$info = wfParseUrl( $this->config->get( 'CanonicalServer' ) );
- MediaWiki\suppressWarnings();
- $host = $info['host'];
+ $host = $info ? $info['host'] : null;
$port = 80;
if ( isset( $info['scheme'] ) && $info['scheme'] == 'https' ) {
$host = "tls://" . $host;
if ( isset( $info['port'] ) ) {
$port = $info['port'];
}
- $sock = fsockopen(
+
+ MediaWiki\suppressWarnings();
+ $sock = $host ? fsockopen(
$host,
$port,
$errno,
$errstr,
- // If it takes more than 100ms to connect to ourselves there
- // is a problem elsewhere.
- 0.1
- );
+ // If it takes more than 100ms to connect to ourselves there is a problem...
+ 0.100
+ ) : false;
MediaWiki\restoreWarnings();
- if ( !$sock ) {
+
+ $invokedWithSuccess = true;
+ if ( $sock ) {
+ $special = SpecialPageFactory::getPage( 'RunJobs' );
+ $url = $special->getPageTitle()->getCanonicalURL( $query );
+ $req = (
+ "POST $url HTTP/1.1\r\n" .
+ "Host: {$info['host']}\r\n" .
+ "Connection: Close\r\n" .
+ "Content-Length: 0\r\n\r\n"
+ );
+
+ $runJobsLogger->info( "Running $n job(s) via '$url'" );
+ // Send a cron API request to be performed in the background.
+ // Give up if this takes too long to send (which should be rare).
+ stream_set_timeout( $sock, 2 );
+ $bytes = fwrite( $sock, $req );
+ if ( $bytes !== strlen( $req ) ) {
+ $invokedWithSuccess = false;
+ $runJobsLogger->error( "Failed to start cron API (socket write error)" );
+ } else {
+ // Do not wait for the response (the script should handle client aborts).
+ // Make sure that we don't close before that script reaches ignore_user_abort().
+ $start = microtime( true );
+ $status = fgets( $sock );
+ $sec = microtime( true ) - $start;
+ if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
+ $invokedWithSuccess = false;
+ $runJobsLogger->error( "Failed to start cron API: received '$status' ($sec)" );
+ }
+ }
+ fclose( $sock );
+ } else {
+ $invokedWithSuccess = false;
$runJobsLogger->error( "Failed to start cron API (socket error $errno): $errstr" );
- // Fall back to running the job here while the user waits
- $runner = new JobRunner( $runJobsLogger );
- $runner->run( [ 'maxJobs' => $n ] );
- return;
}
- $special = SpecialPageFactory::getPage( 'RunJobs' );
- $url = $special->getPageTitle()->getCanonicalURL( $query );
- $req = (
- "POST $url HTTP/1.1\r\n" .
- "Host: {$info['host']}\r\n" .
- "Connection: Close\r\n" .
- "Content-Length: 0\r\n\r\n"
- );
+ // Fall back to running the job(s) while the user waits if needed
+ if ( !$invokedWithSuccess ) {
+ $runJobsLogger->warning( "Jobs switched to blocking; Special:RunJobs disabled" );
- $runJobsLogger->info( "Running $n job(s) via '$url'" );
- // Send a cron API request to be performed in the background.
- // Give up if this takes too long to send (which should be rare).
- stream_set_timeout( $sock, 2 );
- $bytes = fwrite( $sock, $req );
- if ( $bytes !== strlen( $req ) ) {
- $runJobsLogger->error( "Failed to start cron API (socket write error)" );
- } else {
- // Do not wait for the response (the script should handle client aborts).
- // Make sure that we don't close before that script reaches ignore_user_abort().
- $status = fgets( $sock );
- if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
- $runJobsLogger->error( "Failed to start cron API: received '$status'" );
- }
+ $runner = new JobRunner( $runJobsLogger );
+ $runner->run( [ 'maxJobs' => $n ] );
}
- fclose( $sock );
}
}