/**
* @see MediaWiki::preOutputCommit()
+ * @param callable $postCommitWork [default: null]
* @since 1.26
*/
- public function doPreOutputCommit() {
- self::preOutputCommit( $this->context );
+ public function doPreOutputCommit( callable $postCommitWork = null ) {
+ self::preOutputCommit( $this->context, $postCommitWork );
}
/**
* the user can receive a response (in case commit fails)
*
* @param IContextSource $context
+ * @param callable $postCommitWork [default: null]
* @since 1.27
*/
- public static function preOutputCommit( IContextSource $context ) {
+ public static function preOutputCommit(
+ IContextSource $context, callable $postCommitWork = null
+ ) {
// Either all DBs should commit or none
ignore_user_abort( true );
$config = $context->getConfig();
-
+ $request = $context->getRequest();
+ $output = $context->getOutput();
$lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
+
// Commit all changes
$lbFactory->commitMasterChanges(
__METHOD__,
// Abort if any transaction was too big
[ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ]
);
+ wfDebug( __METHOD__ . ': primary transaction round committed' );
+ // Run updates that need to block the user or affect output (this is the last chance)
DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND );
wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
- // Record ChronologyProtector positions
- $lbFactory->shutdown();
- wfDebug( __METHOD__ . ': all transactions committed' );
+ // Decide when clients block on ChronologyProtector DB position writes
+ if (
+ $request->wasPosted() &&
+ $output->getRedirect() &&
+ $lbFactory->hasOrMadeRecentMasterChanges( INF ) &&
+ self::isWikiClusterURL( $output->getRedirect(), $context )
+ ) {
+ // OutputPage::output() will be fast; $postCommitWork will not be useful for
+ // masking the latency of syncing DB positions accross all datacenters synchronously.
+ // Instead, make use of the RTT time of the client follow redirects.
+ $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC;
+ // Client's next request should see 1+ positions with this DBMasterPos::asOf() time
+ $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery(
+ $output->getRedirect(),
+ microtime( true )
+ );
+ $output->redirect( $safeUrl );
+ } else {
+ // OutputPage::output() is fairly slow; run it in $postCommitWork to mask
+ // the latency of syncing DB positions accross all datacenters synchronously
+ $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC;
+ }
+ // Record ChronologyProtector positions for DBs affected in this request at this point
+ $lbFactory->shutdown( $flags, $postCommitWork );
+ wfDebug( __METHOD__ . ': LBFactory shutdown completed' );
// Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this
// POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so
// ChronologyProtector works for cacheable URLs.
- $request = $context->getRequest();
if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) {
$expires = time() + $config->get( 'DataCenterUpdateStickTTL' );
$options = [ 'prefix' => '' ];
// also intimately related to the value of $wgCdnReboundPurgeDelay.
if ( $lbFactory->laggedReplicaUsed() ) {
$maxAge = $config->get( 'CdnMaxageLagged' );
- $context->getOutput()->lowerCdnMaxage( $maxAge );
+ $output->lowerCdnMaxage( $maxAge );
$request->response()->header( "X-Database-Lagged: true" );
wfDebugLog( 'replication', "Lagged DB used; CDN cache TTL limited to $maxAge seconds" );
}
// Avoid long-term cache pollution due to message cache rebuild timeouts (T133069)
if ( MessageCache::singleton()->isDisabled() ) {
$maxAge = $config->get( 'CdnMaxageSubstitute' );
- $context->getOutput()->lowerCdnMaxage( $maxAge );
+ $output->lowerCdnMaxage( $maxAge );
$request->response()->header( "X-Response-Substitute: true" );
}
}
+ /**
+ * @param string $url
+ * @param IContextSource $context
+ * @return bool Whether $url is to something on this wiki farm
+ */
+ private function isWikiClusterURL( $url, IContextSource $context ) {
+ static $relevantKeys = [ 'host' => true, 'port' => true ];
+
+ $infoCandidate = wfParseUrl( $url );
+ if ( $infoCandidate === false ) {
+ return false;
+ }
+
+ $infoCandidate = array_intersect_key( $infoCandidate, $relevantKeys );
+ $clusterHosts = array_merge(
+ // Local wiki host (the most common case)
+ [ $context->getConfig()->get( 'CanonicalServer' ) ],
+ // Any local/remote wiki virtual hosts for this wiki farm
+ $context->getConfig()->get( 'LocalVirtualHosts' )
+ );
+
+ foreach ( $clusterHosts as $clusterHost ) {
+ $parseUrl = wfParseUrl( $clusterHost );
+ if ( !$parseUrl ) {
+ continue;
+ }
+ $infoHost = array_intersect_key( $parseUrl, $relevantKeys );
+ if ( $infoCandidate === $infoHost ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
/**
* This function does work that can be done *after* the
* user gets the HTTP response so they don't block on it
// Show visible profiling data if enabled (which cannot be post-send)
Profiler::instance()->logDataPageOutputOnly();
- $that = $this;
- $callback = function () use ( $that, $mode ) {
+ $callback = function () use ( $mode ) {
try {
- $that->restInPeace( $mode );
+ $this->restInPeace( $mode );
} catch ( Exception $e ) {
MWExceptionHandler::handleException( $e );
}
fastcgi_finish_request();
} else {
// Either all DB and deferred updates should happen or none.
- // The later should not be cancelled due to client disconnect.
+ // The latter should not be cancelled due to client disconnect.
ignore_user_abort( true );
}
private function main() {
global $wgTitle;
+ $output = $this->context->getOutput();
$request = $this->context->getRequest();
// Send Ajax requests to the Ajax dispatcher.
$dispatcher = new AjaxDispatcher( $this->config );
$dispatcher->performAction( $this->context->getUser() );
+
return;
}
// Setup dummy Title, otherwise OutputPage::redirect will fail
$title = Title::newFromText( 'REDIR', NS_MAIN );
$this->context->setTitle( $title );
- $output = $this->context->getOutput();
// Since we only do this redir to change proto, always send a vary header
$output->addVaryHeader( 'X-Forwarded-Proto' );
$output->redirect( $redirUrl );
$output->output();
+
return;
}
}
if ( $cache->isCacheGood( /* Assume up to date */ ) ) {
// Check incoming headers to see if client has this cached
$timestamp = $cache->cacheTimestamp();
- if ( !$this->context->getOutput()->checkLastModified( $timestamp ) ) {
+ if ( !$output->checkLastModified( $timestamp ) ) {
$cache->loadFromFileCache( $this->context );
}
// Do any stats increment/watchlist stuff
// Assume we're viewing the latest revision (this should always be the case with file cache)
$this->context->getWikiPage()->doViewUpdates( $this->context->getUser() );
// Tell OutputPage that output is taken care of
- $this->context->getOutput()->disable();
+ $output->disable();
+
return;
}
}
// Actually do the work of the request and build up any output
$this->performRequest();
+ // GUI-ify and stash the page output in MediaWiki::doPreOutputCommit() while
+ // ChronologyProtector synchronizes DB positions or slaves accross all datacenters.
+ $buffer = null;
+ $outputWork = function () use ( $output, &$buffer ) {
+ if ( $buffer === null ) {
+ $buffer = $output->output( true );
+ }
+
+ return $buffer;
+ };
+
// Now commit any transactions, so that unreported errors after
// output() don't roll back the whole DB transaction and so that
// we avoid having both success and error text in the response
- $this->doPreOutputCommit();
+ $this->doPreOutputCommit( $outputWork );
- // Output everything!
- $this->context->getOutput()->output();
+ // Now send the actual output
+ print $outputWork();
}
/**