$id = $title->getArticleID();
$dbr =& wfGetDB( DB_SLAVE );
- $links = $dbr->tableName( 'links' );
- $page = $dbr->tableName( 'page' );
-
$res = $dbr->select( array( 'links', 'page' ),
array( 'page_namespace', 'page_title' ),
array(
if ( $dbr->numRows( $res ) <= $this->mMaxTitles ) {
while ( $BL = $dbr->fetchObject ( $res ) )
{
- $tobj = Title::makeTitle( $BL->page_namespace, $BL->page_title ) ;
+ $tobj = Title::makeTitle( $BL->page_namespace, $BL->page_title ) ;
$blurlArr[] = $tobj->getInternalURL();
}
}
}
/* Purges a list of Squids defined in $wgSquidServers.
- $urlArr should contain the full URLs to purge as values
+ $urlArr should contain the full URLs to purge as values
(example: $urlArr[] = 'http://my.host/something')
XXX report broken Squids per mail or log */
$fname = 'SquidUpdate::purge';
wfProfileIn( $fname );
-
+
$maxsocketspersquid = 8; // socket cap per Squid
$urlspersocket = 400; // 400 seems to be a good tradeoff, opening a socket takes a while
- $firsturl = $urlArr[0];
+ $firsturl = SquidUpdate::expand( $urlArr[0] );
unset($urlArr[0]);
$urlArr = array_values($urlArr);
$sockspersq = max(ceil(count($urlArr) / $urlspersocket ),1);
@list($server, $port) = explode(':', $wgSquidServers[$ss]);
if(!isset($port)) $port = 80;
#$this->debug("Opening socket to $server:$port");
+ $error = $errstr = false;
$socket = @fsockopen($server, $port, $error, $errstr, 3);
#$this->debug("\n");
if (!$socket) {
#$this->debug("...");
$res = @fread($socket,512);
#$this->debug("\n");
- /* Squid only returns http headers with 200 or 404 status,
+ /* Squid only returns http headers with 200 or 404 status,
if there's more returned something's wrong */
if (strlen($res) > 250) {
fclose($socket);
@stream_set_blocking($socket,false);
$sockets[] = $socket;
}
- }
+ }
} else {
/* open the remaining sockets for this server */
list($server, $port) = explode(':', $wgSquidServers[$ss]);
if(!isset($port)) $port = 80;
- $sockets[] = @fsockopen($server, $port, $error, $errstr, 2);
- @stream_set_blocking($sockets[$s],false);
+ $sockets[$so+1] = @fsockopen($server, $port, $error, $errstr, 2);
+ @stream_set_blocking($sockets[$so+1],false);
}
$so++;
}
}
}
$urindex = $r + $urlspersocket * ($s - $sockspersq * floor($s / $sockspersq));
- $msg = 'PURGE ' . $urlArr[$urindex] . " HTTP/1.0\r\n".
+ $url = SquidUpdate::expand( $urlArr[$urindex] );
+ $msg = 'PURGE ' . $url . " HTTP/1.0\r\n".
"Connection: Keep-Alive\r\n\r\n";
#$this->debug($msg);
@fputs($sockets[$s],$msg);
$wgHTCPMulticastTTL );
foreach ( $urlArr as $url ) {
+ $url = SquidUpdate::expand( $url );
+
// Construct a minimal HTCP request diagram
// as per RFC 2756
// Opcode 'CLR', no response desired, no auth
wfDebug( $text );
}
}
+
+ /**
+ * Expand local URLs to fully-qualified URLs using the internal protocol
+ * and host defined in $wgInternalServer. Input that's already fully-
+ * qualified will be passed through unchanged.
+ *
+ * This is used to generate purge URLs that may be either local to the
+ * main wiki or include a non-native host, such as images hosted on a
+ * second internal server.
+ *
+ * Client functions should not need to call this.
+ *
+ * @return string
+ */
+ static function expand( $url ) {
+ global $wgInternalServer;
+ if( $url != '' && $url{0} == '/' ) {
+ return $wgInternalServer . $url;
+ }
+ return $url;
+ }
}
?>