X-Git-Url: http://git.heureux-cyclage.org/?a=blobdiff_plain;f=includes%2FMediaWiki.php;h=0bd183f1d06c246d2da94e01799d84e5e527a4b0;hb=3635881b44a3b1069b1f44218fe6269cb2dfd3df;hp=52eca31c8d936941d387ea1e370d8d3963c84285;hpb=e73e04c239cc1ef4aecf7e11d9f0cb723df2af99;p=lhc%2Fweb%2Fwiklou.git diff --git a/includes/MediaWiki.php b/includes/MediaWiki.php index 52eca31c8d..0bd183f1d0 100644 --- a/includes/MediaWiki.php +++ b/includes/MediaWiki.php @@ -535,10 +535,11 @@ class MediaWiki { /** * @see MediaWiki::preOutputCommit() + * @param callable $postCommitWork [default: null] * @since 1.26 */ - public function doPreOutputCommit() { - self::preOutputCommit( $this->context ); + public function doPreOutputCommit( callable $postCommitWork = null ) { + self::preOutputCommit( $this->context, $postCommitWork ); } /** @@ -546,33 +547,61 @@ class MediaWiki { * the user can receive a response (in case commit fails) * * @param IContextSource $context + * @param callable $postCommitWork [default: null] * @since 1.27 */ - public static function preOutputCommit( IContextSource $context ) { + public static function preOutputCommit( + IContextSource $context, callable $postCommitWork = null + ) { // Either all DBs should commit or none ignore_user_abort( true ); $config = $context->getConfig(); - + $request = $context->getRequest(); + $output = $context->getOutput(); $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); + // Commit all changes $lbFactory->commitMasterChanges( __METHOD__, // Abort if any transaction was too big [ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ] ); + wfDebug( __METHOD__ . ': primary transaction round committed' ); + // Run updates that need to block the user or affect output (this is the last chance) DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND ); wfDebug( __METHOD__ . ': pre-send deferred updates completed' ); - // Record ChronologyProtector positions - $lbFactory->shutdown(); - wfDebug( __METHOD__ . ': all transactions committed' ); + // Decide when clients block on ChronologyProtector DB position writes + if ( + $request->wasPosted() && + $output->getRedirect() && + $lbFactory->hasOrMadeRecentMasterChanges( INF ) && + self::isWikiClusterURL( $output->getRedirect(), $context ) + ) { + // OutputPage::output() will be fast; $postCommitWork will not be useful for + // masking the latency of syncing DB positions accross all datacenters synchronously. + // Instead, make use of the RTT time of the client follow redirects. + $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC; + // Client's next request should see 1+ positions with this DBMasterPos::asOf() time + $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery( + $output->getRedirect(), + microtime( true ) + ); + $output->redirect( $safeUrl ); + } else { + // OutputPage::output() is fairly slow; run it in $postCommitWork to mask + // the latency of syncing DB positions accross all datacenters synchronously + $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC; + } + // Record ChronologyProtector positions for DBs affected in this request at this point + $lbFactory->shutdown( $flags, $postCommitWork ); + wfDebug( __METHOD__ . ': LBFactory shutdown completed' ); // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this // POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so // ChronologyProtector works for cacheable URLs. - $request = $context->getRequest(); if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) { $expires = time() + $config->get( 'DataCenterUpdateStickTTL' ); $options = [ 'prefix' => '' ]; @@ -584,7 +613,7 @@ class MediaWiki { // also intimately related to the value of $wgCdnReboundPurgeDelay. if ( $lbFactory->laggedReplicaUsed() ) { $maxAge = $config->get( 'CdnMaxageLagged' ); - $context->getOutput()->lowerCdnMaxage( $maxAge ); + $output->lowerCdnMaxage( $maxAge ); $request->response()->header( "X-Database-Lagged: true" ); wfDebugLog( 'replication', "Lagged DB used; CDN cache TTL limited to $maxAge seconds" ); } @@ -592,11 +621,46 @@ class MediaWiki { // Avoid long-term cache pollution due to message cache rebuild timeouts (T133069) if ( MessageCache::singleton()->isDisabled() ) { $maxAge = $config->get( 'CdnMaxageSubstitute' ); - $context->getOutput()->lowerCdnMaxage( $maxAge ); + $output->lowerCdnMaxage( $maxAge ); $request->response()->header( "X-Response-Substitute: true" ); } } + /** + * @param string $url + * @param IContextSource $context + * @return bool Whether $url is to something on this wiki farm + */ + private function isWikiClusterURL( $url, IContextSource $context ) { + static $relevantKeys = [ 'host' => true, 'port' => true ]; + + $infoCandidate = wfParseUrl( $url ); + if ( $infoCandidate === false ) { + return false; + } + + $infoCandidate = array_intersect_key( $infoCandidate, $relevantKeys ); + $clusterHosts = array_merge( + // Local wiki host (the most common case) + [ $context->getConfig()->get( 'CanonicalServer' ) ], + // Any local/remote wiki virtual hosts for this wiki farm + $context->getConfig()->get( 'LocalVirtualHosts' ) + ); + + foreach ( $clusterHosts as $clusterHost ) { + $parseUrl = wfParseUrl( $clusterHost ); + if ( !$parseUrl ) { + continue; + } + $infoHost = array_intersect_key( $parseUrl, $relevantKeys ); + if ( $infoCandidate === $infoHost ) { + return true; + } + } + + return false; + } + /** * This function does work that can be done *after* the * user gets the HTTP response so they don't block on it @@ -614,10 +678,9 @@ class MediaWiki { // Show visible profiling data if enabled (which cannot be post-send) Profiler::instance()->logDataPageOutputOnly(); - $that = $this; - $callback = function () use ( $that, $mode ) { + $callback = function () use ( $mode ) { try { - $that->restInPeace( $mode ); + $this->restInPeace( $mode ); } catch ( Exception $e ) { MWExceptionHandler::handleException( $e ); } @@ -632,7 +695,7 @@ class MediaWiki { fastcgi_finish_request(); } else { // Either all DB and deferred updates should happen or none. - // The later should not be cancelled due to client disconnect. + // The latter should not be cancelled due to client disconnect. ignore_user_abort( true ); } @@ -643,6 +706,7 @@ class MediaWiki { private function main() { global $wgTitle; + $output = $this->context->getOutput(); $request = $this->context->getRequest(); // Send Ajax requests to the Ajax dispatcher. @@ -656,6 +720,7 @@ class MediaWiki { $dispatcher = new AjaxDispatcher( $this->config ); $dispatcher->performAction( $this->context->getUser() ); + return; } @@ -717,11 +782,11 @@ class MediaWiki { // Setup dummy Title, otherwise OutputPage::redirect will fail $title = Title::newFromText( 'REDIR', NS_MAIN ); $this->context->setTitle( $title ); - $output = $this->context->getOutput(); // Since we only do this redir to change proto, always send a vary header $output->addVaryHeader( 'X-Forwarded-Proto' ); $output->redirect( $redirUrl ); $output->output(); + return; } } @@ -733,14 +798,15 @@ class MediaWiki { if ( $cache->isCacheGood( /* Assume up to date */ ) ) { // Check incoming headers to see if client has this cached $timestamp = $cache->cacheTimestamp(); - if ( !$this->context->getOutput()->checkLastModified( $timestamp ) ) { + if ( !$output->checkLastModified( $timestamp ) ) { $cache->loadFromFileCache( $this->context ); } // Do any stats increment/watchlist stuff // Assume we're viewing the latest revision (this should always be the case with file cache) $this->context->getWikiPage()->doViewUpdates( $this->context->getUser() ); // Tell OutputPage that output is taken care of - $this->context->getOutput()->disable(); + $output->disable(); + return; } } @@ -749,13 +815,24 @@ class MediaWiki { // Actually do the work of the request and build up any output $this->performRequest(); + // GUI-ify and stash the page output in MediaWiki::doPreOutputCommit() while + // ChronologyProtector synchronizes DB positions or slaves accross all datacenters. + $buffer = null; + $outputWork = function () use ( $output, &$buffer ) { + if ( $buffer === null ) { + $buffer = $output->output( true ); + } + + return $buffer; + }; + // Now commit any transactions, so that unreported errors after // output() don't roll back the whole DB transaction and so that // we avoid having both success and error text in the response - $this->doPreOutputCommit(); + $this->doPreOutputCommit( $outputWork ); - // Output everything! - $this->context->getOutput()->output(); + // Now send the actual output + print $outputWork(); } /**