Make adaptiveTTL() less strict about $mtime type
[lhc/web/wiklou.git] / includes / MediaWiki.php
index 2a00900..77a1969 100644 (file)
@@ -286,16 +286,6 @@ class MediaWiki {
                                // may still be a wikipage redirect to another article or URL.
                                $article = $this->initializeArticle();
                                if ( is_object( $article ) ) {
-                                       $url = $request->getFullRequestURL(); // requested URL
-                                       if (
-                                               $request->getMethod() === 'GET' &&
-                                               $url === $article->getTitle()->getCanonicalURL() &&
-                                               $article->checkTouched() &&
-                                               $output->checkLastModified( $article->getTouched() )
-                                       ) {
-                                               wfDebug( __METHOD__ . ": done 304\n" );
-                                               return;
-                                       }
                                        $this->performAction( $article, $requestTitle );
                                } elseif ( is_string( $article ) ) {
                                        $output->redirect( $article );
@@ -571,13 +561,14 @@ class MediaWiki {
                        // Abort if any transaction was too big
                        [ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ]
                );
-               // Record ChronologyProtector positions
-               $factory->shutdown();
-               wfDebug( __METHOD__ . ': all transactions committed' );
 
                DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND );
                wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
 
+               // Record ChronologyProtector positions
+               $factory->shutdown();
+               wfDebug( __METHOD__ . ': all transactions committed' );
+
                // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this
                // POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so
                // ChronologyProtector works for cacheable URLs.
@@ -589,9 +580,9 @@ class MediaWiki {
                        $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options );
                }
 
-               // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is
+               // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is
                // also intimately related to the value of $wgCdnReboundPurgeDelay.
-               if ( $factory->laggedSlaveUsed() ) {
+               if ( $factory->laggedReplicaUsed() ) {
                        $maxAge = $config->get( 'CdnMaxageLagged' );
                        $context->getOutput()->lowerCdnMaxage( $maxAge );
                        $request->response()->header( "X-Database-Lagged: true" );
@@ -831,16 +822,18 @@ class MediaWiki {
 
                $runJobsLogger = LoggerFactory::getInstance( 'runJobs' );
 
+               // Fall back to running the job(s) while the user waits if needed
                if ( !$this->config->get( 'RunJobsAsync' ) ) {
-                       // Fall back to running the job here while the user waits
                        $runner = new JobRunner( $runJobsLogger );
-                       $runner->run( [ 'maxJobs'  => $n ] );
+                       $runner->run( [ 'maxJobs' => $n ] );
                        return;
                }
 
+               // Do not send request if there are probably no jobs
                try {
-                       if ( !JobQueueGroup::singleton()->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
-                               return; // do not send request if there are probably no jobs
+                       $group = JobQueueGroup::singleton();
+                       if ( !$group->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
+                               return;
                        }
                } catch ( JobQueueError $e ) {
                        MWExceptionHandler::logException( $e );
@@ -854,8 +847,7 @@ class MediaWiki {
 
                $errno = $errstr = null;
                $info = wfParseUrl( $this->config->get( 'CanonicalServer' ) );
-               MediaWiki\suppressWarnings();
-               $host = $info['host'];
+               $host = $info ? $info['host'] : null;
                $port = 80;
                if ( isset( $info['scheme'] ) && $info['scheme'] == 'https' ) {
                        $host = "tls://" . $host;
@@ -864,48 +856,60 @@ class MediaWiki {
                if ( isset( $info['port'] ) ) {
                        $port = $info['port'];
                }
-               $sock = fsockopen(
+
+               MediaWiki\suppressWarnings();
+               $sock = $host ? fsockopen(
                        $host,
                        $port,
                        $errno,
                        $errstr,
-                       // If it takes more than 100ms to connect to ourselves there
-                       // is a problem elsewhere.
-                       0.1
-               );
+                       // If it takes more than 100ms to connect to ourselves there is a problem...
+                       0.100
+               ) : false;
                MediaWiki\restoreWarnings();
-               if ( !$sock ) {
+
+               $invokedWithSuccess = true;
+               if ( $sock ) {
+                       $special = SpecialPageFactory::getPage( 'RunJobs' );
+                       $url = $special->getPageTitle()->getCanonicalURL( $query );
+                       $req = (
+                               "POST $url HTTP/1.1\r\n" .
+                               "Host: {$info['host']}\r\n" .
+                               "Connection: Close\r\n" .
+                               "Content-Length: 0\r\n\r\n"
+                       );
+
+                       $runJobsLogger->info( "Running $n job(s) via '$url'" );
+                       // Send a cron API request to be performed in the background.
+                       // Give up if this takes too long to send (which should be rare).
+                       stream_set_timeout( $sock, 2 );
+                       $bytes = fwrite( $sock, $req );
+                       if ( $bytes !== strlen( $req ) ) {
+                               $invokedWithSuccess = false;
+                               $runJobsLogger->error( "Failed to start cron API (socket write error)" );
+                       } else {
+                               // Do not wait for the response (the script should handle client aborts).
+                               // Make sure that we don't close before that script reaches ignore_user_abort().
+                               $start = microtime( true );
+                               $status = fgets( $sock );
+                               $sec = microtime( true ) - $start;
+                               if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
+                                       $invokedWithSuccess = false;
+                                       $runJobsLogger->error( "Failed to start cron API: received '$status' ($sec)" );
+                               }
+                       }
+                       fclose( $sock );
+               } else {
+                       $invokedWithSuccess = false;
                        $runJobsLogger->error( "Failed to start cron API (socket error $errno): $errstr" );
-                       // Fall back to running the job here while the user waits
-                       $runner = new JobRunner( $runJobsLogger );
-                       $runner->run( [ 'maxJobs'  => $n ] );
-                       return;
                }
 
-               $special = SpecialPageFactory::getPage( 'RunJobs' );
-               $url = $special->getPageTitle()->getCanonicalURL( $query );
-               $req = (
-                       "POST $url HTTP/1.1\r\n" .
-                       "Host: {$info['host']}\r\n" .
-                       "Connection: Close\r\n" .
-                       "Content-Length: 0\r\n\r\n"
-               );
+               // Fall back to running the job(s) while the user waits if needed
+               if ( !$invokedWithSuccess ) {
+                       $runJobsLogger->warning( "Jobs switched to blocking; Special:RunJobs disabled" );
 
-               $runJobsLogger->info( "Running $n job(s) via '$url'" );
-               // Send a cron API request to be performed in the background.
-               // Give up if this takes too long to send (which should be rare).
-               stream_set_timeout( $sock, 2 );
-               $bytes = fwrite( $sock, $req );
-               if ( $bytes !== strlen( $req ) ) {
-                       $runJobsLogger->error( "Failed to start cron API (socket write error)" );
-               } else {
-                       // Do not wait for the response (the script should handle client aborts).
-                       // Make sure that we don't close before that script reaches ignore_user_abort().
-                       $status = fgets( $sock );
-                       if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
-                               $runJobsLogger->error( "Failed to start cron API: received '$status'" );
-                       }
+                       $runner = new JobRunner( $runJobsLogger );
+                       $runner->run( [ 'maxJobs'  => $n ] );
                }
-               fclose( $sock );
        }
 }