Avoid warnings when unable to parse the URL given to us
[lhc/web/wiklou.git] / includes / MediaWiki.php
index 77ac76a..0bd183f 100644 (file)
@@ -535,10 +535,11 @@ class MediaWiki {
 
        /**
         * @see MediaWiki::preOutputCommit()
+        * @param callable $postCommitWork [default: null]
         * @since 1.26
         */
-       public function doPreOutputCommit() {
-               self::preOutputCommit( $this->context );
+       public function doPreOutputCommit( callable $postCommitWork = null ) {
+               self::preOutputCommit( $this->context, $postCommitWork );
        }
 
        /**
@@ -546,44 +547,73 @@ class MediaWiki {
         * the user can receive a response (in case commit fails)
         *
         * @param IContextSource $context
+        * @param callable $postCommitWork [default: null]
         * @since 1.27
         */
-       public static function preOutputCommit( IContextSource $context ) {
+       public static function preOutputCommit(
+               IContextSource $context, callable $postCommitWork = null
+       ) {
                // Either all DBs should commit or none
                ignore_user_abort( true );
 
                $config = $context->getConfig();
+               $request = $context->getRequest();
+               $output = $context->getOutput();
+               $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
 
-               $factory = wfGetLBFactory();
                // Commit all changes
-               $factory->commitMasterChanges(
+               $lbFactory->commitMasterChanges(
                        __METHOD__,
                        // Abort if any transaction was too big
                        [ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ]
                );
-               // Record ChronologyProtector positions
-               $factory->shutdown();
-               wfDebug( __METHOD__ . ': all transactions committed' );
+               wfDebug( __METHOD__ . ': primary transaction round committed' );
 
+               // Run updates that need to block the user or affect output (this is the last chance)
                DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND );
                wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
 
+               // Decide when clients block on ChronologyProtector DB position writes
+               if (
+                       $request->wasPosted() &&
+                       $output->getRedirect() &&
+                       $lbFactory->hasOrMadeRecentMasterChanges( INF ) &&
+                       self::isWikiClusterURL( $output->getRedirect(), $context )
+               ) {
+                       // OutputPage::output() will be fast; $postCommitWork will not be useful for
+                       // masking the latency of syncing DB positions accross all datacenters synchronously.
+                       // Instead, make use of the RTT time of the client follow redirects.
+                       $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC;
+                       // Client's next request should see 1+ positions with this DBMasterPos::asOf() time
+                       $safeUrl = $lbFactory->appendPreShutdownTimeAsQuery(
+                               $output->getRedirect(),
+                               microtime( true )
+                       );
+                       $output->redirect( $safeUrl );
+               } else {
+                       // OutputPage::output() is fairly slow; run it in $postCommitWork to mask
+                       // the latency of syncing DB positions accross all datacenters synchronously
+                       $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC;
+               }
+               // Record ChronologyProtector positions for DBs affected in this request at this point
+               $lbFactory->shutdown( $flags, $postCommitWork );
+               wfDebug( __METHOD__ . ': LBFactory shutdown completed' );
+
                // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this
                // POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so
                // ChronologyProtector works for cacheable URLs.
-               $request = $context->getRequest();
-               if ( $request->wasPosted() && $factory->hasOrMadeRecentMasterChanges() ) {
+               if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) {
                        $expires = time() + $config->get( 'DataCenterUpdateStickTTL' );
                        $options = [ 'prefix' => '' ];
                        $request->response()->setCookie( 'UseDC', 'master', $expires, $options );
                        $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options );
                }
 
-               // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is
+               // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is
                // also intimately related to the value of $wgCdnReboundPurgeDelay.
-               if ( $factory->laggedSlaveUsed() ) {
+               if ( $lbFactory->laggedReplicaUsed() ) {
                        $maxAge = $config->get( 'CdnMaxageLagged' );
-                       $context->getOutput()->lowerCdnMaxage( $maxAge );
+                       $output->lowerCdnMaxage( $maxAge );
                        $request->response()->header( "X-Database-Lagged: true" );
                        wfDebugLog( 'replication', "Lagged DB used; CDN cache TTL limited to $maxAge seconds" );
                }
@@ -591,11 +621,46 @@ class MediaWiki {
                // Avoid long-term cache pollution due to message cache rebuild timeouts (T133069)
                if ( MessageCache::singleton()->isDisabled() ) {
                        $maxAge = $config->get( 'CdnMaxageSubstitute' );
-                       $context->getOutput()->lowerCdnMaxage( $maxAge );
+                       $output->lowerCdnMaxage( $maxAge );
                        $request->response()->header( "X-Response-Substitute: true" );
                }
        }
 
+       /**
+        * @param string $url
+        * @param IContextSource $context
+        * @return bool Whether $url is to something on this wiki farm
+        */
+       private function isWikiClusterURL( $url, IContextSource $context ) {
+               static $relevantKeys = [ 'host' => true, 'port' => true ];
+
+               $infoCandidate = wfParseUrl( $url );
+               if ( $infoCandidate === false ) {
+                       return false;
+               }
+
+               $infoCandidate = array_intersect_key( $infoCandidate, $relevantKeys );
+               $clusterHosts = array_merge(
+                       // Local wiki host (the most common case)
+                       [ $context->getConfig()->get( 'CanonicalServer' ) ],
+                       // Any local/remote wiki virtual hosts for this wiki farm
+                       $context->getConfig()->get( 'LocalVirtualHosts' )
+               );
+
+               foreach ( $clusterHosts as $clusterHost ) {
+                       $parseUrl = wfParseUrl( $clusterHost );
+                       if ( !$parseUrl ) {
+                               continue;
+                       }
+                       $infoHost = array_intersect_key( $parseUrl, $relevantKeys );
+                       if ( $infoCandidate === $infoHost ) {
+                               return true;
+                       }
+               }
+
+               return false;
+       }
+
        /**
         * This function does work that can be done *after* the
         * user gets the HTTP response so they don't block on it
@@ -613,10 +678,9 @@ class MediaWiki {
                // Show visible profiling data if enabled (which cannot be post-send)
                Profiler::instance()->logDataPageOutputOnly();
 
-               $that = $this;
-               $callback = function () use ( $that, $mode ) {
+               $callback = function () use ( $mode ) {
                        try {
-                               $that->restInPeace( $mode );
+                               $this->restInPeace( $mode );
                        } catch ( Exception $e ) {
                                MWExceptionHandler::handleException( $e );
                        }
@@ -631,7 +695,7 @@ class MediaWiki {
                                fastcgi_finish_request();
                        } else {
                                // Either all DB and deferred updates should happen or none.
-                               // The later should not be cancelled due to client disconnect.
+                               // The latter should not be cancelled due to client disconnect.
                                ignore_user_abort( true );
                        }
 
@@ -642,6 +706,7 @@ class MediaWiki {
        private function main() {
                global $wgTitle;
 
+               $output = $this->context->getOutput();
                $request = $this->context->getRequest();
 
                // Send Ajax requests to the Ajax dispatcher.
@@ -655,6 +720,7 @@ class MediaWiki {
 
                        $dispatcher = new AjaxDispatcher( $this->config );
                        $dispatcher->performAction( $this->context->getUser() );
+
                        return;
                }
 
@@ -716,11 +782,11 @@ class MediaWiki {
                                // Setup dummy Title, otherwise OutputPage::redirect will fail
                                $title = Title::newFromText( 'REDIR', NS_MAIN );
                                $this->context->setTitle( $title );
-                               $output = $this->context->getOutput();
                                // Since we only do this redir to change proto, always send a vary header
                                $output->addVaryHeader( 'X-Forwarded-Proto' );
                                $output->redirect( $redirUrl );
                                $output->output();
+
                                return;
                        }
                }
@@ -732,14 +798,15 @@ class MediaWiki {
                                if ( $cache->isCacheGood( /* Assume up to date */ ) ) {
                                        // Check incoming headers to see if client has this cached
                                        $timestamp = $cache->cacheTimestamp();
-                                       if ( !$this->context->getOutput()->checkLastModified( $timestamp ) ) {
+                                       if ( !$output->checkLastModified( $timestamp ) ) {
                                                $cache->loadFromFileCache( $this->context );
                                        }
                                        // Do any stats increment/watchlist stuff
                                        // Assume we're viewing the latest revision (this should always be the case with file cache)
                                        $this->context->getWikiPage()->doViewUpdates( $this->context->getUser() );
                                        // Tell OutputPage that output is taken care of
-                                       $this->context->getOutput()->disable();
+                                       $output->disable();
+
                                        return;
                                }
                        }
@@ -748,13 +815,24 @@ class MediaWiki {
                // Actually do the work of the request and build up any output
                $this->performRequest();
 
+               // GUI-ify and stash the page output in MediaWiki::doPreOutputCommit() while
+               // ChronologyProtector synchronizes DB positions or slaves accross all datacenters.
+               $buffer = null;
+               $outputWork = function () use ( $output, &$buffer ) {
+                       if ( $buffer === null ) {
+                               $buffer = $output->output( true );
+                       }
+
+                       return $buffer;
+               };
+
                // Now commit any transactions, so that unreported errors after
                // output() don't roll back the whole DB transaction and so that
                // we avoid having both success and error text in the response
-               $this->doPreOutputCommit();
+               $this->doPreOutputCommit( $outputWork );
 
-               // Output everything!
-               $this->context->getOutput()->output();
+               // Now send the actual output
+               print $outputWork();
        }
 
        /**
@@ -762,9 +840,9 @@ class MediaWiki {
         * @param string $mode Use 'fast' to always skip job running
         */
        public function restInPeace( $mode = 'fast' ) {
-               $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
+               $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
                // Assure deferred updates are not in the main transaction
-               $factory->commitMasterChanges( __METHOD__ );
+               $lbFactory->commitMasterChanges( __METHOD__ );
 
                // Loosen DB query expectations since the HTTP client is unblocked
                $trxProfiler = Profiler::instance()->getTransactionProfiler();
@@ -790,8 +868,8 @@ class MediaWiki {
                wfLogProfilingData();
 
                // Commit and close up!
-               $factory->commitMasterChanges( __METHOD__ );
-               $factory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
+               $lbFactory->commitMasterChanges( __METHOD__ );
+               $lbFactory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
 
                wfDebug( "Request ended normally\n" );
        }
@@ -821,16 +899,18 @@ class MediaWiki {
 
                $runJobsLogger = LoggerFactory::getInstance( 'runJobs' );
 
+               // Fall back to running the job(s) while the user waits if needed
                if ( !$this->config->get( 'RunJobsAsync' ) ) {
-                       // Fall back to running the job here while the user waits
                        $runner = new JobRunner( $runJobsLogger );
-                       $runner->run( [ 'maxJobs'  => $n ] );
+                       $runner->run( [ 'maxJobs' => $n ] );
                        return;
                }
 
+               // Do not send request if there are probably no jobs
                try {
-                       if ( !JobQueueGroup::singleton()->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
-                               return; // do not send request if there are probably no jobs
+                       $group = JobQueueGroup::singleton();
+                       if ( !$group->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
+                               return;
                        }
                } catch ( JobQueueError $e ) {
                        MWExceptionHandler::logException( $e );
@@ -844,8 +924,7 @@ class MediaWiki {
 
                $errno = $errstr = null;
                $info = wfParseUrl( $this->config->get( 'CanonicalServer' ) );
-               MediaWiki\suppressWarnings();
-               $host = $info['host'];
+               $host = $info ? $info['host'] : null;
                $port = 80;
                if ( isset( $info['scheme'] ) && $info['scheme'] == 'https' ) {
                        $host = "tls://" . $host;
@@ -854,48 +933,60 @@ class MediaWiki {
                if ( isset( $info['port'] ) ) {
                        $port = $info['port'];
                }
-               $sock = fsockopen(
+
+               MediaWiki\suppressWarnings();
+               $sock = $host ? fsockopen(
                        $host,
                        $port,
                        $errno,
                        $errstr,
-                       // If it takes more than 100ms to connect to ourselves there
-                       // is a problem elsewhere.
-                       0.1
-               );
+                       // If it takes more than 100ms to connect to ourselves there is a problem...
+                       0.100
+               ) : false;
                MediaWiki\restoreWarnings();
-               if ( !$sock ) {
+
+               $invokedWithSuccess = true;
+               if ( $sock ) {
+                       $special = SpecialPageFactory::getPage( 'RunJobs' );
+                       $url = $special->getPageTitle()->getCanonicalURL( $query );
+                       $req = (
+                               "POST $url HTTP/1.1\r\n" .
+                               "Host: {$info['host']}\r\n" .
+                               "Connection: Close\r\n" .
+                               "Content-Length: 0\r\n\r\n"
+                       );
+
+                       $runJobsLogger->info( "Running $n job(s) via '$url'" );
+                       // Send a cron API request to be performed in the background.
+                       // Give up if this takes too long to send (which should be rare).
+                       stream_set_timeout( $sock, 2 );
+                       $bytes = fwrite( $sock, $req );
+                       if ( $bytes !== strlen( $req ) ) {
+                               $invokedWithSuccess = false;
+                               $runJobsLogger->error( "Failed to start cron API (socket write error)" );
+                       } else {
+                               // Do not wait for the response (the script should handle client aborts).
+                               // Make sure that we don't close before that script reaches ignore_user_abort().
+                               $start = microtime( true );
+                               $status = fgets( $sock );
+                               $sec = microtime( true ) - $start;
+                               if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
+                                       $invokedWithSuccess = false;
+                                       $runJobsLogger->error( "Failed to start cron API: received '$status' ($sec)" );
+                               }
+                       }
+                       fclose( $sock );
+               } else {
+                       $invokedWithSuccess = false;
                        $runJobsLogger->error( "Failed to start cron API (socket error $errno): $errstr" );
-                       // Fall back to running the job here while the user waits
-                       $runner = new JobRunner( $runJobsLogger );
-                       $runner->run( [ 'maxJobs'  => $n ] );
-                       return;
                }
 
-               $special = SpecialPageFactory::getPage( 'RunJobs' );
-               $url = $special->getPageTitle()->getCanonicalURL( $query );
-               $req = (
-                       "POST $url HTTP/1.1\r\n" .
-                       "Host: {$info['host']}\r\n" .
-                       "Connection: Close\r\n" .
-                       "Content-Length: 0\r\n\r\n"
-               );
+               // Fall back to running the job(s) while the user waits if needed
+               if ( !$invokedWithSuccess ) {
+                       $runJobsLogger->warning( "Jobs switched to blocking; Special:RunJobs disabled" );
 
-               $runJobsLogger->info( "Running $n job(s) via '$url'" );
-               // Send a cron API request to be performed in the background.
-               // Give up if this takes too long to send (which should be rare).
-               stream_set_timeout( $sock, 2 );
-               $bytes = fwrite( $sock, $req );
-               if ( $bytes !== strlen( $req ) ) {
-                       $runJobsLogger->error( "Failed to start cron API (socket write error)" );
-               } else {
-                       // Do not wait for the response (the script should handle client aborts).
-                       // Make sure that we don't close before that script reaches ignore_user_abort().
-                       $status = fgets( $sock );
-                       if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
-                               $runJobsLogger->error( "Failed to start cron API: received '$status'" );
-                       }
+                       $runner = new JobRunner( $runJobsLogger );
+                       $runner->run( [ 'maxJobs'  => $n ] );
                }
-               fclose( $sock );
        }
 }