*/
public function run() {
try {
+ $this->setDBProfilingAgent();
try {
$this->main();
} catch ( ErrorPageError $e ) {
$e->report(); // display the GUI error
}
} catch ( Exception $e ) {
+ $context = $this->context;
+ $action = $context->getRequest()->getVal( 'action', 'view' );
+ if (
+ $e instanceof DBConnectionError &&
+ $context->hasTitle() &&
+ $context->getTitle()->canExist() &&
+ in_array( $action, [ 'view', 'history' ], true ) &&
+ HTMLFileCache::useFileCache( $this->context, HTMLFileCache::MODE_OUTAGE )
+ ) {
+ // Try to use any (even stale) file during outages...
+ $cache = new HTMLFileCache( $context->getTitle(), 'view' );
+ if ( $cache->isCached() ) {
+ $cache->loadFromFileCache( $context, HTMLFileCache::MODE_OUTAGE );
+ print MWExceptionRenderer::getHTML( $e );
+ exit;
+ }
+
+ }
+
MWExceptionHandler::handleException( $e );
}
$this->doPostOutputShutdown( 'normal' );
}
+ private function setDBProfilingAgent() {
+ $services = MediaWikiServices::getInstance();
+ // Add a comment for easy SHOW PROCESSLIST interpretation
+ $name = $this->context->getUser()->getName();
+ $services->getDBLoadBalancerFactory()->setAgentName(
+ mb_strlen( $name ) > 15 ? mb_substr( $name, 0, 15 ) . '...' : $name
+ );
+ }
+
/**
* @see MediaWiki::preOutputCommit()
* @param callable $postCommitWork [default: null]
wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
// Decide when clients block on ChronologyProtector DB position writes
- if (
+ $urlDomainDistance = (
$request->wasPosted() &&
$output->getRedirect() &&
- $lbFactory->hasOrMadeRecentMasterChanges( INF ) &&
- self::isWikiClusterURL( $output->getRedirect(), $context )
- ) {
+ $lbFactory->hasOrMadeRecentMasterChanges( INF )
+ ) ? self::getUrlDomainDistance( $output->getRedirect(), $context ) : false;
+
+ if ( $urlDomainDistance === 'local' || $urlDomainDistance === 'remote' ) {
// OutputPage::output() will be fast; $postCommitWork will not be useful for
// masking the latency of syncing DB positions accross all datacenters synchronously.
// Instead, make use of the RTT time of the client follow redirects.
$flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC;
+ $cpPosTime = microtime( true );
// Client's next request should see 1+ positions with this DBMasterPos::asOf() time
- $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery(
- $output->getRedirect(),
- microtime( true )
- );
- $output->redirect( $safeUrl );
+ if ( $urlDomainDistance === 'local' ) {
+ // Client will stay on this domain, so set an unobtrusive cookie
+ $expires = time() + ChronologyProtector::POSITION_TTL;
+ $options = [ 'prefix' => '' ];
+ $request->response()->setCookie( 'cpPosTime', $cpPosTime, $expires, $options );
+ } else {
+ // Cookies may not work across wiki domains, so use a URL parameter
+ $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery(
+ $output->getRedirect(),
+ $cpPosTime
+ );
+ $output->redirect( $safeUrl );
+ }
} else {
// OutputPage::output() is fairly slow; run it in $postCommitWork to mask
// the latency of syncing DB positions accross all datacenters synchronously
$flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC;
+ if ( $lbFactory->hasOrMadeRecentMasterChanges( INF ) ) {
+ $cpPosTime = microtime( true );
+ // Set a cookie in case the DB position store cannot sync accross datacenters.
+ // This will at least cover the common case of the user staying on the domain.
+ $expires = time() + ChronologyProtector::POSITION_TTL;
+ $options = [ 'prefix' => '' ];
+ $request->response()->setCookie( 'cpPosTime', $cpPosTime, $expires, $options );
+ }
}
// Record ChronologyProtector positions for DBs affected in this request at this point
$lbFactory->shutdown( $flags, $postCommitWork );
/**
* @param string $url
* @param IContextSource $context
- * @return bool Whether $url is to something on this wiki farm
+ * @return string|bool Either "local" or "remote" if in the farm, false otherwise
*/
- private function isWikiClusterURL( $url, IContextSource $context ) {
+ private function getUrlDomainDistance( $url, IContextSource $context ) {
static $relevantKeys = [ 'host' => true, 'port' => true ];
$infoCandidate = wfParseUrl( $url );
$context->getConfig()->get( 'LocalVirtualHosts' )
);
- foreach ( $clusterHosts as $clusterHost ) {
+ foreach ( $clusterHosts as $i => $clusterHost ) {
$parseUrl = wfParseUrl( $clusterHost );
if ( !$parseUrl ) {
continue;
}
$infoHost = array_intersect_key( $parseUrl, $relevantKeys );
if ( $infoCandidate === $infoHost ) {
- return true;
+ return ( $i === 0 ) ? 'local' : 'remote';
}
}
}
}
- if ( $this->config->get( 'UseFileCache' ) && $title->getNamespace() >= 0 ) {
- if ( HTMLFileCache::useFileCache( $this->context ) ) {
- // Try low-level file cache hit
- $cache = new HTMLFileCache( $title, $action );
- if ( $cache->isCacheGood( /* Assume up to date */ ) ) {
- // Check incoming headers to see if client has this cached
- $timestamp = $cache->cacheTimestamp();
- if ( !$output->checkLastModified( $timestamp ) ) {
- $cache->loadFromFileCache( $this->context );
- }
- // Do any stats increment/watchlist stuff
- // Assume we're viewing the latest revision (this should always be the case with file cache)
- $this->context->getWikiPage()->doViewUpdates( $this->context->getUser() );
- // Tell OutputPage that output is taken care of
- $output->disable();
-
- return;
+ if ( $title->canExist() && HTMLFileCache::useFileCache( $this->context ) ) {
+ // Try low-level file cache hit
+ $cache = new HTMLFileCache( $title, $action );
+ if ( $cache->isCacheGood( /* Assume up to date */ ) ) {
+ // Check incoming headers to see if client has this cached
+ $timestamp = $cache->cacheTimestamp();
+ if ( !$output->checkLastModified( $timestamp ) ) {
+ $cache->loadFromFileCache( $this->context );
}
+ // Do any stats increment/watchlist stuff, assuming user is viewing the
+ // latest revision (which should always be the case for file cache)
+ $this->context->getWikiPage()->doViewUpdates( $this->context->getUser() );
+ // Tell OutputPage that output is taken care of
+ $output->disable();
+
+ return;
}
}