/* WHERE */ [ 'cl_from' => $this->next ],
__METHOD__ . '-1'
);
- foreach ( $res as $o ) {
- $k = $o->cl_to;
+ foreach ( $res as $row ) {
+ $k = $row->cl_to;
# Update parent tree
- if ( !isset( $this->parents[$o->cl_from] ) ) {
- $this->parents[$o->cl_from] = [];
+ if ( !isset( $this->parents[$row->cl_from] ) ) {
+ $this->parents[$row->cl_from] = [];
}
- $this->parents[$o->cl_from][$k] = $o;
+ $this->parents[$row->cl_from][$k] = $row;
# Ignore those we already have
if ( in_array( $k, $this->deadend ) ) {
/* WHERE */ [ 'page_namespace' => NS_CATEGORY, 'page_title' => $layer ],
__METHOD__ . '-2'
);
- foreach ( $res as $o ) {
- $id = $o->page_id;
- $name = $o->page_title;
+ foreach ( $res as $row ) {
+ $id = $row->page_id;
+ $name = $row->page_title;
$this->name2id[$name] = $id;
$this->next[] = $id;
unset( $layer[$name] );
$wgEnableWANCacheReaper = false;
/**
- * Main object stash type. This should be a fast storage system for storing
- * lightweight data like hit counters and user activity. Sites with multiple
- * data-centers should have this use a store that replicates all writes. The
- * store should have enough consistency for CAS operations to be usable.
- * Reads outside of those needed for merge() may be eventually consistent.
+ * The object store type of the main stash.
+ *
+ * This store should be a very fast storage system optimized for holding lightweight data
+ * like incrementable hit counters and current user activity. The store should replicate the
+ * dataset among all data-centers. Any add(), merge(), lock(), and unlock() operations should
+ * maintain "best effort" linearizability; as long as connectivity is strong, latency is low,
+ * and there is no eviction pressure prompted by low free space, those operations should be
+ * linearizable. In terms of PACELC (https://en.wikipedia.org/wiki/PACELC_theorem), the store
+ * should act as a PA/EL distributed system for these operations. One optimization for these
+ * operations is to route them to a "primary" data-center (e.g. one that serves HTTP POST) for
+ * synchronous execution and then replicate to the others asynchronously. This means that at
+ * least calls to these operations during HTTP POST requests would quickly return.
+ *
+ * All other operations, such as get(), set(), delete(), changeTTL(), incr(), and decr(),
+ * should be synchronous in the local data-center, replicating asynchronously to the others.
+ * This behavior can be overriden by the use of the WRITE_SYNC and READ_LATEST flags.
+ *
+ * The store should *preferably* have eventual consistency to handle network partitions.
+ *
+ * Modules that rely on the stash should be prepared for:
+ * - add(), merge(), lock(), and unlock() to be slower than other write operations,
+ * at least in "secondary" data-centers (e.g. one that only serves HTTP GET/HEAD)
+ * - Other write operations to have race conditions accross data-centers
+ * - Read operations to have race conditions accross data-centers
+ * - Consistency to be either eventual (with Last-Write-Wins) or just "best effort"
+ *
+ * In general, this means avoiding updates during idempotent HTTP requests (GET/HEAD) and
+ * avoiding assumptions of true linearizability (e.g. accepting anomalies). Modules that need
+ * these kind of guarantees should use other storage mediums.
*
* The options are:
* - db: Store cache objects in the DB
* Returns an HTML link element in a string styled as a button
* (when $wgUseMediaWikiUIEverywhere is enabled).
*
- * @param string $contents The raw HTML contents of the element: *not*
- * escaped!
+ * @param string $text The text of the element. Will be escaped (not raw HTML)
* @param array $attrs Associative array of attributes, e.g., [
* 'href' => 'https://www.mediawiki.org/' ]. See expandAttributes() for
* further documentation.
* @see https://tools.wmflabs.org/styleguide/desktop/index.html for guidance on available modifiers
* @return string Raw HTML
*/
- public static function linkButton( $contents, array $attrs, array $modifiers = [] ) {
+ public static function linkButton( $text, array $attrs, array $modifiers = [] ) {
return self::element( 'a',
self::buttonAttributes( $attrs, $modifiers ),
- $contents
+ $text
);
}
$this->server,
$this->user,
$this->password,
- $this->getDBname(),
- $this->dbSchema(),
+ $this->currentDomain->getDatabase(),
+ $this->currentDomain->getSchema(),
$this->tablePrefix()
);
$this->lastPing = microtime( true );
$this->server,
$this->user,
$this->password,
- $this->getDBname(),
- $this->dbSchema(),
+ $this->currentDomain->getDatabase(),
+ $this->currentDomain->getSchema(),
$this->tablePrefix()
);
$this->lastPing = microtime( true );
/**
* Get the cache object for the main stash.
*
- * Stash objects are BagOStuff instances suitable for storing light
- * weight data that is not canonically stored elsewhere (such as RDBMS).
- * Stashes should be configured to propagate changes to all data-centers.
- *
- * Callers should be prepared for:
- * - a) Writes to be slower in non-"primary" (e.g. HTTP GET/HEAD only) DCs
- * - b) Reads to be eventually consistent, e.g. for get()/getMulti()
- * In general, this means avoiding updates on idempotent HTTP requests and
- * avoiding an assumption of perfect serializability (or accepting anomalies).
- * Reads may be eventually consistent or data might rollback as nodes flap.
- * Callers can use BagOStuff:READ_LATEST to see the latest available data.
- *
* @return BagOStuff
* @since 1.26
* @deprecated Since 1.28 Use MediaWikiServices::getInstance()->getMainObjectStash()
/**
* Rebuild pass 1: Insert `recentchanges` entries for page revisions.
+ *
+ * @param ILBFactory $lbFactory
*/
private function rebuildRecentChangesTablePass1( ILBFactory $lbFactory ) {
$dbw = $this->getDB( DB_MASTER );
/**
* Rebuild pass 2: Enhance entries for page revisions with references to the previous revision
* (rc_last_oldid, rc_new etc.) and size differences (rc_old_len, rc_new_len).
+ *
+ * @param ILBFactory $lbFactory
*/
private function rebuildRecentChangesTablePass2( ILBFactory $lbFactory ) {
$dbw = $this->getDB( DB_MASTER );
$lastOldId = 0;
$lastSize = null;
$updated = 0;
- foreach ( $res as $obj ) {
+ foreach ( $res as $row ) {
$new = 0;
- if ( $obj->rc_cur_id != $lastCurId ) {
+ if ( $row->rc_cur_id != $lastCurId ) {
# Switch! Look up the previous last edit, if any
- $lastCurId = intval( $obj->rc_cur_id );
- $emit = $obj->rc_timestamp;
+ $lastCurId = intval( $row->rc_cur_id );
+ $emit = $row->rc_timestamp;
- $row = $dbw->selectRow(
+ $revRow = $dbw->selectRow(
'revision',
[ 'rev_id', 'rev_len' ],
[ 'rev_page' => $lastCurId, "rev_timestamp < " . $dbw->addQuotes( $emit ) ],
__METHOD__,
[ 'ORDER BY' => 'rev_timestamp DESC' ]
);
- if ( $row ) {
- $lastOldId = intval( $row->rev_id );
+ if ( $revRow ) {
+ $lastOldId = intval( $revRow->rev_id );
# Grab the last text size if available
- $lastSize = !is_null( $row->rev_len ) ? intval( $row->rev_len ) : null;
+ $lastSize = !is_null( $revRow->rev_len ) ? intval( $revRow->rev_len ) : null;
} else {
# No previous edit
$lastOldId = 0;
$size = (int)$dbw->selectField(
'revision',
'rev_len',
- [ 'rev_id' => $obj->rc_this_oldid ],
+ [ 'rev_id' => $row->rc_this_oldid ],
__METHOD__
);
],
[
'rc_cur_id' => $lastCurId,
- 'rc_this_oldid' => $obj->rc_this_oldid,
- 'rc_timestamp' => $obj->rc_timestamp // index usage
+ 'rc_this_oldid' => $row->rc_this_oldid,
+ 'rc_timestamp' => $row->rc_timestamp // index usage
],
__METHOD__
);
- $lastOldId = intval( $obj->rc_this_oldid );
+ $lastOldId = intval( $row->rc_this_oldid );
$lastSize = $size;
if ( ( ++$updated % $this->getBatchSize() ) == 0 ) {
/**
* Rebuild pass 3: Insert `recentchanges` entries for action logs.
+ *
+ * @param ILBFactory $lbFactory
*/
private function rebuildRecentChangesTablePass3( ILBFactory $lbFactory ) {
global $wgLogRestrictions, $wgFilterLogTypes;
/**
* Rebuild pass 4: Mark bot and autopatrolled entries.
+ *
+ * @param ILBFactory $lbFactory
*/
private function rebuildRecentChangesTablePass4( ILBFactory $lbFactory ) {
global $wgUseRCPatrol, $wgMiserMode;
);
$botusers = [];
- foreach ( $res as $obj ) {
- $botusers[] = User::newFromRow( $obj );
+ foreach ( $res as $row ) {
+ $botusers[] = User::newFromRow( $row );
}
# Fill in the rc_bot field
[ 'user_groups' => [ 'JOIN', 'user_id = ug_user' ] ] + $userQuery['joins']
);
- foreach ( $res as $obj ) {
- $patrolusers[] = User::newFromRow( $obj );
+ foreach ( $res as $row ) {
+ $patrolusers[] = User::newFromRow( $row );
}
# Fill in the rc_patrolled field
}
/**
- * Rebuild pass 5: Delete duplicate entries where we generate both a page revision and a log entry
- * for a single action (upload only, at the moment, but potentially also move, protect, ...).
+ * Rebuild pass 5: Delete duplicate entries where we generate both a page revision and a log
+ * entry for a single action (upload only, at the moment, but potentially move, protect, ...).
+ *
+ * @param ILBFactory $lbFactory
*/
private function rebuildRecentChangesTablePass5( ILBFactory $lbFactory ) {
$dbw = wfGetDB( DB_MASTER );
);
$updates = 0;
- foreach ( $res as $obj ) {
- $rev_id = $obj->ls_value;
- $log_id = $obj->ls_log_id;
+ foreach ( $res as $row ) {
+ $rev_id = $row->ls_value;
+ $log_id = $row->ls_log_id;
// Mark the logging row as having an associated rev id
$dbw->update(
$hashes = [];
$maxSize = 0;
- foreach ( $res as $boRow ) {
- $extDB = $this->getDB( $boRow->bo_cluster );
+ foreach ( $res as $row ) {
+ $extDB = $this->getDB( $row->bo_cluster );
$blobRow = $extDB->selectRow(
'blobs',
'*',
- [ 'blob_id' => $boRow->bo_blob_id ],
+ [ 'blob_id' => $row->bo_blob_id ],
__METHOD__
);