* @param array $ipChain List of IPs (strings), usually retrieved from the
* X-Forwarded-For header of the request
* @param bool $isAnon Exclude anonymous-only blocks if false
- * @param bool $fromMaster Whether to query the master or slave database
+ * @param bool $fromMaster Whether to query the master or replica DB
* @return array Array of Blocks
* @since 1.22
*/
/** @var string "AND" or "OR" */
protected $mode;
- /** @var IDatabase Read-DB slave */
+ /** @var IDatabase Read-DB replica DB */
protected $dbr;
/**
* - DBO_COMPRESS -- uses internal compression in database connections,
* if available
*
- * - max lag: (optional) Maximum replication lag before a slave will taken out of rotation
+ * - max lag: (optional) Maximum replication lag before a replica DB goes out of rotation
* - is static: (optional) Set to true if the dataset is static and no replication is used.
* - cliMode: (optional) Connection handles will not assume that requests are short-lived
* nor that INSERT..SELECT can be rewritten into a buffered SELECT and INSERT.
* perhaps in some command-line scripts).
*
* The first server listed in this array (with key 0) will be the master. The
- * rest of the servers will be slaves. To prevent writes to your slaves due to
+ * rest of the servers will be replica DBs. To prevent writes to your replica DBs due to
* accidental misconfiguration or MediaWiki bugs, set read_only=1 on all your
- * slaves in my.cnf. You can set read_only mode at runtime using:
+ * replica DBs in my.cnf. You can set read_only mode at runtime using:
*
* @code
* SET @@read_only=1;
* @endcode
*
- * Since the effect of writing to a slave is so damaging and difficult to clean
+ * Since the effect of writing to a replica DB is so damaging and difficult to clean
* up, we at Wikimedia set read_only=1 in my.cnf on all our DB servers, even
* our masters, and then set read_only=0 on masters at runtime.
*/
$wgSquidMaxage = 18000;
/**
- * Cache timeout for the CDN when DB slave lag is high
+ * Cache timeout for the CDN when DB replica DB lag is high
* @see $wgSquidMaxage
* @since 1.27
*/
* If set, any SquidPurge call on a URL or URLs will send a second purge no less than
* this many seconds later via the job queue. This requires delayed job support.
* This should be safely higher than the 'max lag' value in $wgLBFactoryConf, so that
- * slave lag does not cause page to be stuck in stales states in CDN.
+ * replica DB lag does not cause page to be stuck in stales states in CDN.
*
* This also fixes race conditions in two-tiered CDN setups (e.g. cdn2 => cdn1 => MediaWiki).
* If a purge for a URL reaches cdn2 before cdn1 and a request reaches cdn2 for that URL,
$wgJobBackoffThrottling = [];
/**
- * Make job runners commit changes for slave-lag prone jobs one job at a time.
- * This is useful if there are many job workers that race on slave lag checks.
+ * Make job runners commit changes for replica DB-lag prone jobs one job at a time.
+ * This is useful if there are many job workers that race on replica DB lag checks.
* If set, jobs taking this many seconds of DB write time have serialized commits.
*
* Note that affected jobs may have worse lock contention. Also, if they affect
$wgAPIMaxUncachedDiffs = 1;
/**
- * Maximum amount of DB lag on a majority of DB slaves to tolerate
+ * Maximum amount of DB lag on a majority of DB replica DBs to tolerate
* before forcing bots to retry any write requests via API errors.
* This should be lower than the 'max lag' value in $wgLBFactoryConf.
*/
* Valid database indexes
* Operation-based indexes
*/
-define( 'DB_SLAVE', -1 ); # Read from the slave (or only server)
+define( 'DB_SLAVE', -1 ); # Read from the replica DB (or only server)
define( 'DB_MASTER', -2 ); # Write to master (or only server)
/**@}*/
* Check if the site is in read-only mode and return the message if so
*
* This checks wfConfiguredReadOnlyReason() and the main load balancer
- * for slave lag. This may result in DB_SLAVE connection being made.
+ * for replica DB lag. This may result in DB_SLAVE connection being made.
*
* @return string|bool String when in read-only mode; false otherwise
*/
}
/**
- * Waits for the slaves to catch up to the master position
+ * Waits for the replica DBs to catch up to the master position
*
* Use this when updating very large numbers of rows, as in maintenance scripts,
- * to avoid causing too much lag. Of course, this is a no-op if there are no slaves.
+ * to avoid causing too much lag. Of course, this is a no-op if there are no replica DBs.
*
* By default this waits on the main DB cluster of the current wiki.
* If $cluster is set to "*" it will wait on all DB clusters, including
$request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options );
}
- // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is
+ // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is
// also intimately related to the value of $wgCdnReboundPurgeDelay.
if ( $factory->laggedSlaveUsed() ) {
$maxAge = $config->get( 'CdnMaxageLagged' );
*/
class MergeHistory {
- /** @const int Maximum number of revisions that can be merged at once (avoid too much slave lag) */
+ /** @const int Maximum number of revisions that can be merged at once */
const REVISION_LIMIT = 5000;
/** @var Title Page from which history will be merged */
}
/**
- * Show a warning about slave lag
+ * Show a warning about replica DB lag
*
* If the lag is higher than $wgSlaveLagCritical seconds,
* then the warning is a bit more obvious. If the lag is
* Given a set of conditions, fetch a revision
*
* This method is used then a revision ID is qualified and
- * will incorporate some basic slave/master fallback logic
+ * will incorporate some basic replica DB/master fallback logic
*
* @param array $conditions
* @param int $flags (optional)
}
if ( !$row ) {
- // Text data is immutable; check slaves first.
+ // Text data is immutable; check replica DBs first.
$dbr = wfGetDB( DB_SLAVE );
$row = $dbr->selectRow( 'text',
[ 'old_text', 'old_flags' ],
__METHOD__ );
}
- // Fallback to the master in case of slave lag. Also use FOR UPDATE if it was
+ // Fallback to the master in case of replica DB lag. Also use FOR UPDATE if it was
// used to fetch this revision to avoid missing the row due to REPEATABLE-READ.
$forUpdate = ( $this->mQueryFlags & self::READ_LOCKING == self::READ_LOCKING );
if ( !$row && ( $forUpdate || wfGetLB()->getServerCount() > 1 ) ) {
wfDebugLog( 'Revision', "No blob for text row '$textId' (revision {$this->getId()})." );
}
- # No negative caching -- negative hits on text rows may be due to corrupted slave servers
+ # No negative caching -- negative hits on text rows may be due to corrupted replica DB servers
if ( $wgRevisionCacheExpiry && $text !== false ) {
$processCache->set( $key, $text );
$cache->set( $key, $text, $wgRevisionCacheExpiry );
static function loadAndLazyInit() {
global $wgMiserMode;
- wfDebug( __METHOD__ . ": reading site_stats from slave\n" );
+ wfDebug( __METHOD__ . ": reading site_stats from replica DB\n" );
$row = self::doLoad( wfGetDB( DB_SLAVE ) );
if ( !self::isSane( $row ) ) {
// Might have just been initialized during this request? Underflow?
- wfDebug( __METHOD__ . ": site_stats damaged or missing on slave\n" );
+ wfDebug( __METHOD__ . ": site_stats damaged or missing on replica DB\n" );
$row = self::doLoad( wfGetDB( DB_MASTER ) );
}
* @param string $action Action that permission needs to be checked for
* @param User $user User to check
* @param string $rigor One of (quick,full,secure)
- * - quick : does cheap permission checks from slaves (usable for GUI creation)
- * - full : does cheap and expensive checks possibly from a slave
+ * - quick : does cheap permission checks from replica DBs (usable for GUI creation)
+ * - full : does cheap and expensive checks possibly from a replica DB
* - secure : does cheap and expensive checks, using the master as needed
* @param array $ignoreErrors Array of Strings Set this to a list of message keys
* whose corresponding errors may be ignored.
* @param string $action Action that permission needs to be checked for
* @param User $user User to check
* @param string $rigor One of (quick,full,secure)
- * - quick : does cheap permission checks from slaves (usable for GUI creation)
- * - full : does cheap and expensive checks possibly from a slave
+ * - quick : does cheap permission checks from replica DBs (usable for GUI creation)
+ * - full : does cheap and expensive checks possibly from a replica DB
* - secure : does cheap and expensive checks, using the master as needed
* @param bool $short Set this to true to stop after the first permission error.
* @return array Array of arrays of the arguments to wfMessage to explain permissions problems.
}
/**
- * Gets a default slave database connection object
+ * Gets a default replica DB connection object
* @return DatabaseBase
*/
protected function getDB() {
* @param array $params
* @param bool|string $load Whether load the object's state from the database:
* - false: don't load (if the pageid is given, it will still be loaded)
- * - 'fromdb': load from a slave database
+ * - 'fromdb': load from a replica DB
* - 'fromdbmaster': load from the master database
* @return WikiPage
*/
}
}
- // If a majority of slaves are too lagged then disallow writes
+ // If a majority of replica DBs are too lagged then disallow writes
$slaveCount = wfGetLB()->getServerCount() - 1;
if ( $numLagged >= ceil( $slaveCount / 2 ) ) {
$laggedServers = implode( ', ', $laggedServers );
$this->fld_patrolled = isset( $prop['patrolled'] );
$this->fld_tags = isset( $prop['tags'] );
- // Most of this code will use the 'contributions' group DB, which can map to slaves
+ // Most of this code will use the 'contributions' group DB, which can map to replica DBs
// with extra user based indexes or partioning by user. The additional metadata
- // queries should use a regular slave since the lookup pattern is not all by user.
- $dbSecondary = $this->getDB(); // any random slave
+ // queries should use a regular replica DB since the lookup pattern is not all by user.
+ $dbSecondary = $this->getDB(); // any random replica DB
// TODO: if the query is going only against the revision table, should this be done?
$this->selectNamedDB( 'contributions', DB_SLAVE, 'contributions' );
$username = $user->getName();
- // Try the local user from the slave DB
+ // Try the local user from the replica DB
$localId = User::idFromName( $username );
$flags = User::READ_NORMAL;
}
/**
- * Get the slave connection to the database
+ * Get the replica DB connection to the database
* When non existing, will initialize the connection.
* @return DatabaseBase
*/
public function updateMessage( $key ) {
$moduleNames = $this->getResourceLoader()->getModulesByMessage( $key );
foreach ( $moduleNames as $moduleName ) {
- // Uses a holdoff to account for database slave lag (for MessageCache)
+ // Uses a holdoff to account for database replica DB lag (for MessageCache)
$this->wanCache->touchCheckKey( $this->wanCache->makeKey( __CLASS__, $moduleName ) );
}
}
$where[] = 'global cache is expired';
$staleCache = $cache;
} elseif ( $hashVolatile ) {
- # DB results are slave lag prone until the holdoff TTL passes.
+ # DB results are replica DB lag prone until the holdoff TTL passes.
# By then, updates should be reflected in loadFromDBWithLock().
# One thread renerates the cache while others use old values.
$where[] = 'global cache is expired/volatile';
}
// Mark this cache as definitely "latest" (non-volatile) so
- // load() calls do try to refresh the cache with slave data
+ // load() calls do try to refresh the cache with replica DB data
$this->mCache[$code]['LATEST'] = time();
// Update caches if the lock was acquired
/**
* T109700 - Default bot flag to true when there is no corresponding RC entry
* This means all changes caused by parser functions & Lua on reparse are marked as bot
- * Also in the case no RC entry could be found due to slave lag
+ * Also in the case no RC entry could be found due to replica DB lag
*/
$bot = 1;
$lastRevId = 0;
// Might as well look for rcids and so on.
if ( !$rc_id ) {
- // Info might be out of date, somewhat fractionally, on slave.
+ // Info might be out of date, somewhat fractionally, on replica DB.
// LogEntry/LogPage and WikiPage match rev/log/rc timestamps,
// so use that relation to avoid full table scans.
if ( $log_id ) {
);
}
} elseif ( !$log_id && !$rev_id ) {
- // Info might be out of date, somewhat fractionally, on slave.
+ // Info might be out of date, somewhat fractionally, on replica DB.
$log_id = $dbw->selectField(
'recentchanges',
'rc_logid',
$tagsToAdd = array_diff( $tagsToAdd, $tagsToRemove );
// Update the summary row.
- // $prevTags can be out of date on slaves, especially when addTags is called consecutively,
+ // $prevTags can be out of date on replica DBs, especially when addTags is called consecutively,
// causing loss of tags added recently in tag_summary table.
$prevTags = $dbw->selectField( 'tag_summary', 'ts_tags', $tsConds, __METHOD__ );
$prevTags = $prevTags ? $prevTags : '';
* though certain objects may assume READ_LATEST for common use case or legacy reasons.
*
* There are four types of reads:
- * - READ_NORMAL : Potentially cached read of data (e.g. from a slave or stale replica)
+ * - READ_NORMAL : Potentially cached read of data (e.g. from a replica DB or stale replica)
* - READ_LATEST : Up-to-date read as of transaction start (e.g. from master or a quorum read)
* - READ_LOCKING : Up-to-date read as of now, that locks (shared) the records
* - READ_EXCLUSIVE : Up-to-date read as of now, that locks (exclusive) the records
* All record locks persist for the duration of the transaction.
*
* A special constant READ_LATEST_IMMUTABLE can be used for fetching append-only data. Such
- * data is either (a) on a slave and up-to-date or (b) not yet there, but on the master/quorum.
- * Because the data is append-only, it can never be stale on a slave if present.
+ * data is either (a) on a replica DB and up-to-date or (b) not yet there, but on the master/quorum.
+ * Because the data is append-only, it can never be stale on a replica DB if present.
*
* Callers should use READ_NORMAL (or pass in no flags) unless the read determines a write.
* In theory, such cases may require READ_LOCKING, though to avoid contention, READ_LATEST is
*/
interface IDBAccessObject {
/** Constants for object loading bitfield flags (higher => higher QoS) */
- /** @var integer Read from a slave/non-quorum */
+ /** @var integer Read from a replica DB/non-quorum */
const READ_NORMAL = 0;
/** @var integer Read from the master/quorum */
const READ_LATEST = 1;
/** @var integer Read from the master/quorum and lock out other writers and locking readers */
const READ_EXCLUSIVE = 7; // READ_LOCKING (3) and "FOR UPDATE" (4)
- /** @var integer Read from a slave/non-quorum immutable data, using the master/quorum on miss */
+ /** @var integer Read from a replica DB or without a quorum, using the master/quorum on miss */
const READ_LATEST_IMMUTABLE = 8;
// Convenience constant for tracking how data was loaded (higher => higher QoS)
}
/**
- * Get a slave database connection for the specified cluster
+ * Get a replica DB connection for the specified cluster
*
* @param string $cluster Cluster name
* @return IDatabase
}
/**
- * Helper function for self::batchFetchBlobs for merging master/slave results
+ * Helper function for self::batchFetchBlobs for merging master/replica DB results
* @param array &$ret Current self::batchFetchBlobs return value
* @param array &$ids Map from blob_id to requested itemIDs
* @param mixed $res DB result from Database::select
}
/**
- * Get a connection to the slave DB
+ * Get a connection to the replica DB
* @return DatabaseBase
*/
function getSlaveDB() {
// Update article count statistics (T42009)
// The normal counting logic in WikiPage->doEditUpdates() is designed for
// one-revision-at-a-time editing, not bulk imports. In this situation it
- // suffers from issues of slave lag. We let WikiPage handle the total page
+ // suffers from issues of replica DB lag. We let WikiPage handle the total page
// and revision count, and we implement our own custom logic for the
// article (content page) count.
$page = WikiPage::factory( $title );
* and tree storage backends (SQL, CDB, and plain PHP arrays).
*
* All information is loaded on creation when called by $this->fetch( $prefix ).
- * All work is done on slave, because this should *never* change (except during
+ * All work is done on replica DB, because this should *never* change (except during
* schema updates etc, which aren't wiki-related)
*
* @since 1.28
}
/**
- * Wait for any slaves or backup servers to catch up.
+ * Wait for any replica DBs or backup servers to catch up.
*
* This does nothing for certain queue classes.
*
}
// Build the full list of job rows to insert
$rows = array_merge( $rowList, array_values( $rowSet ) );
- // Insert the job rows in chunks to avoid slave lag...
+ // Insert the job rows in chunks to avoid replica DB lag...
foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
$dbw->insert( 'job', $rowBatch, $method );
}
}
/**
- * Wait for any slaves or backup queue servers to catch up.
+ * Wait for any replica DBs or backup queue servers to catch up.
*
* This does nothing for certain queue classes.
*
protected $logger;
const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
- const LAG_CHECK_PERIOD = 1.0; // check slave lag this many seconds
+ const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
/**
$lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
$lbFactory->commitAll( __METHOD__ );
- // Catch huge single updates that lead to slave lag
+ // Catch huge single updates that lead to replica DB lag
$trxProfiler = Profiler::instance()->getTransactionProfiler();
$trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
$trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
$jobsPopped = 0;
$timeMsTotal = 0;
$startTime = microtime( true ); // time since jobs started running
- $lastCheckTime = 1; // timestamp of last slave check
+ $lastCheckTime = 1; // timestamp of last replica DB check
do {
// Sync the persistent backoffs with concurrent runners
$backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
break;
}
- // Don't let any of the main DB slaves get backed up.
+ // Don't let any of the main DB replica DBs get backed up.
// This only waits for so long before exiting and letting
// other wikis in the farm (on different masters) get a chance.
$timePassed = microtime( true ) - $lastCheckTime;
}
$lastCheckTime = microtime( true );
}
- // Don't let any queue slaves/backups fall behind
+ // Don't let any queue replica DBs/backups fall behind
if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
$group->waitForBackups();
}
// Commit all outstanding connections that are in a transaction
// to get a fresh repeatable read snapshot on every connection.
- // Note that jobs are still responsible for handling slave lag.
+ // Note that jobs are still responsible for handling replica DB lag.
$lbFactory->flushReplicaSnapshots( __METHOD__ );
// Clear out title cache data from prior snapshots
LinkCache::singleton()->clear();
/**
* Issue a commit on all masters who are currently in a transaction and have
* made changes to the database. It also supports sometimes waiting for the
- * local wiki's slaves to catch up. See the documentation for
+ * local wiki's replica DBs to catch up. See the documentation for
* $wgJobSerialCommitThreshold for more.
*
* @param Job $job
$dbwSerial = false;
}
} else {
- // There are no slaves or writes are all to foreign DB (we don't handle that)
+ // There are no replica DBs or writes are all to foreign DB (we don't handle that)
$dbwSerial = false;
}
// This will trigger a rollback in the main loop
throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
}
- // Wait for the slave DBs to catch up
+ // Wait for the replica DBs to catch up
$pos = $lb->getMasterPos();
if ( $pos ) {
$lb->waitForAll( $pos );
}
$dbr = wfGetDB( DB_SLAVE, [ 'recentchanges' ] );
- // Wait till the slave is caught up so that jobs for this page see each others' changes
+ // Wait till the replica DB is caught up so that jobs for this page see each others' changes
if ( !wfGetLB()->safeWaitForMasterPos( $dbr ) ) {
- $this->setLastError( "Timed out while waiting for slave to catch up" );
+ $this->setLastError( "Timed out while waiting for replica DB to catch up" );
return false;
}
// Clear any stale REPEATABLE-READ snapshot
);
if ( $rcIds ) {
$dbw->delete( 'recentchanges', [ 'rc_id' => $rcIds ], __METHOD__ );
- // There might be more, so try waiting for slaves
+ // There might be more, so try waiting for replica DBs
try {
$factory->commitAndWaitForReplication(
__METHOD__, $ticket, [ 'timeout' => 3 ]
const PARSE_THRESHOLD_SEC = 1.0;
/** @var integer Lag safety margin when comparing root job times to last-refresh times */
const CLOCK_FUDGE = 10;
- /** @var integer How many seconds to wait for slaves to catch up */
+ /** @var integer How many seconds to wait for replica DBs to catch up */
const LAG_WAIT_TIMEOUT = 15;
function __construct( Title $title, array $params ) {
// Job to update all (or a range of) backlink pages for a page
if ( !empty( $this->params['recursive'] ) ) {
- // When the base job branches, wait for the slaves to catch up to the master.
+ // When the base job branches, wait for the replica DBs to catch up to the master.
// From then on, we know that any template changes at the time the base job was
// enqueued will be reflected in backlink page parses when the leaf jobs run.
if ( !isset( $params['range'] ) ) {
$skewedTimestamp = $this->params['rootJobTimestamp'];
if ( $opportunistic ) {
- // Neither clock skew nor DB snapshot/slave lag matter much for such
+ // Neither clock skew nor DB snapshot/replica DB lag matter much for such
// updates; focus on reusing the (often recently updated) cache
} else {
// For transclusion updates, the template changes must be reflected
/**
* A cache class that directs writes to one set of servers and reads to
- * another. This assumes that the servers used for reads are setup to slave
+ * another. This assumes that the servers used for reads are setup to replica DB
* those that writes go to. This can easily be used with redis for example.
*
* In the WAN scenario (e.g. multi-datacenter case), this is useful when
* - writeFactory : ObjectFactory::getObjectFromSpec array yeilding BagOStuff.
* This object will be used for writes (e.g. the master DB).
* - readFactory : ObjectFactory::getObjectFromSpec array yeilding BagOStuff.
- * This object will be used for reads (e.g. a slave DB).
+ * This object will be used for reads (e.g. a replica DB).
*
* @param array $params
* @throws InvalidArgumentException
* @param integer $ttl Seconds to live. Special values are:
* - WANObjectCache::TTL_INDEFINITE: Cache forever
* @param array $opts Options map:
- * - lag : Seconds of slave lag. Typically, this is either the slave lag
- * before the data was read or, if applicable, the slave lag before
+ * - lag : Seconds of replica DB lag. Typically, this is either the replica DB lag
+ * before the data was read or, if applicable, the replica DB lag before
* the snapshot-isolated transaction the data was read from started.
* Default: 0 seconds
* - since : UNIX timestamp of the data in $value. Typically, this is either
* Keys using it via get(), getMulti(), or getWithSetCallback() will
* be invalidated. It is treated as being HOLDOFF_TTL seconds in the future
* by those methods to avoid race conditions where dependent keys get updated
- * with stale values (e.g. from a DB slave).
+ * with stale values (e.g. from a DB replica DB).
*
* This is typically useful for keys with hardcoded names or in some cases
* dynamically generated names where a low number of combinations exist.
* // Function that derives the new key value
* function ( $oldValue, &$ttl, array &$setOpts ) {
* $dbr = wfGetDB( DB_SLAVE );
- * // Account for any snapshot/slave lag
+ * // Account for any snapshot/replica DB lag
* $setOpts += Database::getCacheSetOptions( $dbr );
*
* return $dbr->selectRow( ... );
* // Function that derives the new key value
* function ( $oldValue, &$ttl, array &$setOpts ) {
* $dbr = wfGetDB( DB_SLAVE );
- * // Account for any snapshot/slave lag
+ * // Account for any snapshot/replica DB lag
* $setOpts += Database::getCacheSetOptions( $dbr );
*
* return CatConfig::newFromRow( $dbr->selectRow( ... ) );
* function ( $oldValue, &$ttl, array &$setOpts ) {
* // Determine new value from the DB
* $dbr = wfGetDB( DB_SLAVE );
- * // Account for any snapshot/slave lag
+ * // Account for any snapshot/replica DB lag
* $setOpts += Database::getCacheSetOptions( $dbr );
*
* return CatState::newFromResults( $dbr->select( ... ) );
* // Function that derives the new key value
* function ( $oldValue, &$ttl, array &$setOpts ) {
* $dbr = wfGetDB( DB_SLAVE );
- * // Account for any snapshot/slave lag
+ * // Account for any snapshot/replica DB lag
* $setOpts += Database::getCacheSetOptions( $dbr );
*
* // Start off with the last cached list
* - pcTTL: Process cache the value in this PHP instance for this many seconds. This avoids
* network I/O when a key is read several times. This will not cache when the callback
* returns false, however. Note that any purges will not be seen while process cached;
- * since the callback should use slave DBs and they may be lagged or have snapshot
+ * since the callback should use replica DBs and they may be lagged or have snapshot
* isolation anyway, this should not typically matter.
* Default: WANObjectCache::TTL_UNCACHEABLE.
* - version: Integer version number. This allows for callers to make breaking changes to
* Purpose: Ephemeral global storage.
* Stored centrally within the primary data-center.
* Changes are applied there first and replicated to other DCs (best-effort).
- * To retrieve the latest value (e.g. not from a slave), use BagOStuff::READ_LATEST.
+ * To retrieve the latest value (e.g. not from a replica DB), use BagOStuff::READ_LATEST.
* This store may be subject to LRU style evictions.
*
* - ObjectCache::getInstance( $cacheType )
try {
if ( $this->getMasterLinkStatus( $conn ) === 'down' ) {
// If the master cannot be reached, fail-over to the next server.
- // If masters are in data-center A, and slaves in data-center B,
+ // If masters are in data-center A, and replica DBs in data-center B,
// this helps avoid the case were fail-over happens in A but not
// to the corresponding server in B (e.g. read/write mismatch).
continue;
}
/**
- * Check the master link status of a Redis server that is configured as a slave.
+ * Check the master link status of a Redis server that is configured as a replica DB.
* @param RedisConnRef $conn
* @return string|null Master link status (either 'up' or 'down'), or null
- * if the server is not a slave.
+ * if the server is not a replica DB.
*/
protected function getMasterLinkStatus( RedisConnRef $conn ) {
$info = $conn->info();
* required to hold the largest shard index. Data will be
* distributed across all tables by key hash. This is for
* MySQL bugs 61735 and 61736.
- * - slaveOnly: Whether to only use slave DBs and avoid triggering
+ * - slaveOnly: Whether to only use replica DBs and avoid triggering
* garbage collection logic of expired items. This only
* makes sense if the primary DB is used and only if get()
* calls will be used. This is used by ReplicatedBagOStuff.
- * - syncTimeout: Max seconds to wait for slaves to catch up for WRITE_SYNC.
+ * - syncTimeout: Max seconds to wait for replica DBs to catch up for WRITE_SYNC.
*
* @param array $params
*/
?: MediaWikiServices::getInstance()->getDBLoadBalancer();
if ( $lb->getServerCount() <= 1 ) {
- return true; // no slaves
+ return true; // no replica DBs
}
- // Main LB is used; wait for any slaves to catch up
+ // Main LB is used; wait for any replica DBs to catch up
$masterPos = $lb->getMasterPos();
$loop = new WaitConditionLoop(
if ( !$rc ) {
// Don't cache: This can be hit if the page gets accessed very fast after
- // its creation / latest upload or in case we have high slave lag. In case
+ // its creation / latest upload or in case we have high replica DB lag. In case
// the revision is too old, we will already return above.
return false;
}
*
* @param int $id Article ID to load
* @param string|int $from One of the following values:
- * - "fromdb" or WikiPage::READ_NORMAL to select from a slave database
+ * - "fromdb" or WikiPage::READ_NORMAL to select from a replica DB
* - "fromdbmaster" or WikiPage::READ_LATEST to select from the master database
*
* @return WikiPage|null
* @since 1.20
* @param object $row Database row containing at least fields returned by selectFields().
* @param string|int $from Source of $data:
- * - "fromdb" or WikiPage::READ_NORMAL: from a slave DB
+ * - "fromdb" or WikiPage::READ_NORMAL: from a replica DB
* - "fromdbmaster" or WikiPage::READ_LATEST: from the master DB
* - "forupdate" or WikiPage::READ_LOCKING: from the master DB using SELECT FOR UPDATE
* @return WikiPage
*
* @param object|string|int $from One of the following:
* - A DB query result object.
- * - "fromdb" or WikiPage::READ_NORMAL to get from a slave DB.
+ * - "fromdb" or WikiPage::READ_NORMAL to get from a replica DB.
* - "fromdbmaster" or WikiPage::READ_LATEST to get from the master DB.
* - "forupdate" or WikiPage::READ_LOCKING to get from the master DB
* using SELECT FOR UPDATE.
$data = $this->pageDataFromTitle( wfGetDB( $index ), $this->mTitle, $opts );
}
} else {
- // No idea from where the caller got this data, assume slave database.
+ // No idea from where the caller got this data, assume replica DB.
$data = $from;
$from = self::READ_NORMAL;
}
* @since 1.20
* @param object|bool $data DB row containing fields returned by selectFields() or false
* @param string|int $from One of the following:
- * - "fromdb" or WikiPage::READ_NORMAL if the data comes from a slave DB
+ * - "fromdb" or WikiPage::READ_NORMAL if the data comes from a replica DB
* - "fromdbmaster" or WikiPage::READ_LATEST if the data comes from the master DB
* - "forupdate" or WikiPage::READ_LOCKING if the data comes from
* the master DB using SELECT FOR UPDATE
*/
public function getOldestRevision() {
- // Try using the slave database first, then try the master
+ // Try using the replica DB first, then try the master
$continue = 2;
$db = wfGetDB( DB_SLAVE );
$revSelectFields = Revision::selectFields();
$flags = Revision::READ_LOCKING;
} elseif ( $this->mDataLoadedFrom == self::READ_LATEST ) {
// Bug T93976: if page_latest was loaded from the master, fetch the
- // revision from there as well, as it may not exist yet on a slave DB.
+ // revision from there as well, as it may not exist yet on a replica DB.
// Also, this keeps the queries in the same REPEATABLE-READ snapshot.
$flags = Revision::READ_LATEST;
} else {
// We get here if vary-revision is set. This means that this page references
// itself (such as via self-transclusion). In this case, we need to make sure
// that any such self-references refer to the newly-saved revision, and not
- // to the previous one, which could otherwise happen due to slave lag.
+ // to the previous one, which could otherwise happen due to replica DB lag.
$oldCallback = $edit->popts->getCurrentRevisionCallback();
$edit->popts->setCurrentRevisionCallback(
function ( Title $title, $parser = false ) use ( $revision, &$oldCallback ) {
if ( $title->getNamespace() == NS_CATEGORY ) {
// Load the Category object, which will schedule a job to create
- // the category table row if necessary. Checking a slave is ok
+ // the category table row if necessary. Checking a replica DB is ok
// here, in the worst case it'll run an unnecessary recount job on
// a category that probably doesn't have many members.
Category::newFromTitle( $title )->getID();
}
$this->mIsBackwards = ( $this->mRequest->getVal( 'dir' ) == 'prev' );
- # Let the subclass set the DB here; otherwise use a slave DB for the current wiki
+ # Let the subclass set the DB here; otherwise use a replica DB for the current wiki
$this->mDb = $this->mDb ?: wfGetDB( DB_SLAVE );
$index = $this->getIndexField(); // column to sort on
/**
* Get the Database object used in getTitleInfo().
*
- * Defaults to the local slave DB. Subclasses may want to override this to return a foreign
+ * Defaults to the local replica DB. Subclasses may want to override this to return a foreign
* database object, or null if getTitleInfo() shouldn't access the database.
*
* NOTE: This ONLY works for getTitleInfo() and isKnownEmpty(), NOT FOR ANYTHING ELSE.
if ( !$pager->getNumRows() ) {
$out->addWikiMsg( 'nocontribs', $target );
} else {
- # Show a message about slave lag, if applicable
+ # Show a message about replica DB lag, if applicable
$lag = wfGetLB()->safeGetLag( $pager->getDatabase() );
if ( $lag > 0 ) {
$out->showLagWarning( $lag );
return;
}
- # Show a message about slave lag, if applicable
+ # Show a message about replica DB lag, if applicable
$lag = wfGetLB()->safeGetLag( $pager->getDatabase() );
if ( $lag > 0 ) {
$out->showLagWarning( $lag );
// continuing with this, as the user is just going to end up getting sent
// somewhere else. Additionally, if we keep going here, we end up
// populating the memcache of tag data (see ChangeTags::listDefinedTags)
- // with out-of-date data from the slave, because the slave hasn't caught
+ // with out-of-date data from the replica DB, because the replica DB hasn't caught
// up to the fact that a new tag has been created as part of an implicit,
// as yet uncommitted transaction on master.
if ( $out->getRedirect() !== '' ) {
$user = $this->getUser();
$output = $this->getOutput();
- # Show a message about slave lag, if applicable
+ # Show a message about replica DB lag, if applicable
$lag = wfGetLB()->safeGetLag( $dbr );
if ( $lag > 0 ) {
$output->showLagWarning( $lag );
$month = isset( $options['month'] ) ? $options['month'] : false;
$this->getDateCond( $year, $month );
- // Most of this code will use the 'contributions' group DB, which can map to slaves
+ // Most of this code will use the 'contributions' group DB, which can map to replica DBs
// with extra user based indexes or partioning by user. The additional metadata
- // queries should use a regular slave since the lookup pattern is not all by user.
- $this->mDbSecondary = wfGetDB( DB_SLAVE ); // any random slave
+ // queries should use a regular replica DB since the lookup pattern is not all by user.
+ $this->mDbSecondary = wfGetDB( DB_SLAVE ); // any random replica DB
$this->mDb = wfGetDB( DB_SLAVE, 'contributions' );
}
/**
* Get blocking information
- * @param bool $bFromSlave Whether to check the slave database first.
- * To improve performance, non-critical checks are done against slaves.
+ * @param bool $bFromSlave Whether to check the replica DB first.
+ * To improve performance, non-critical checks are done against replica DBs.
* Check when actually saving should be done against master.
*/
private function getBlockedStatus( $bFromSlave = true ) {
/**
* Check if user is blocked
*
- * @param bool $bFromSlave Whether to check the slave database instead of
+ * @param bool $bFromSlave Whether to check the replica DB instead of
* the master. Hacked from false due to horrible probs on site.
* @return bool True if blocked, false otherwise
*/
/**
* Get the block affecting the user, or null if the user is not blocked
*
- * @param bool $bFromSlave Whether to check the slave database instead of the master
+ * @param bool $bFromSlave Whether to check the replica DB instead of the master
* @return Block|null
*/
public function getBlock( $bFromSlave = true ) {
* Check if user is blocked from editing a particular article
*
* @param Title $title Title to check
- * @param bool $bFromSlave Whether to check the slave database instead of the master
+ * @param bool $bFromSlave Whether to check the replica DB instead of the master
* @return bool
*/
public function isBlockedFrom( $title, $bFromSlave = false ) {
// Only update the timestamp if the page is being watched.
// The query to find out if it is watched is cached both in memcached and per-invocation,
- // and when it does have to be executed, it can be on a slave
+ // and when it does have to be executed, it can be on a replica DB
// If this is the user's newtalk page, we always update the timestamp
$force = '';
if ( $title->getNamespace() == NS_USER_TALK && $title->getText() == $this->getName() ) {
// Get a new user_touched that is higher than the old one.
// This will be used for a CAS check as a last-resort safety
- // check against race conditions and slave lag.
+ // check against race conditions and replica DB lag.
$newTouched = $this->newTouchedTimestamp();
$dbw = wfGetDB( DB_MASTER );
// Now here's a goddamn hack...
$dbr = wfGetDB( DB_SLAVE );
if ( $dbr !== $dbw ) {
- // If we actually have a slave server, the count is
+ // If we actually have a replica DB server, the count is
// at least one behind because the current transaction
// has not been committed and replicated.
$this->mEditCount = $this->initEditCount( 1 );
* @return int Number of edits
*/
protected function initEditCount( $add = 0 ) {
- // Pull from a slave to be less cruel to servers
+ // Pull from a replica DB to be less cruel to servers
// Accuracy isn't the point anyway here
$dbr = wfGetDB( DB_SLAVE );
$count = (int)$dbr->selectField(
* Get a new instance of this user that was loaded from the master via a locking read
*
* Use this instead of the main context User when updating that user. This avoids races
- * where that user was loaded from a slave or even the master but without proper locks.
+ * where that user was loaded from a replica DB or even the master but without proper locks.
*
* @return User|null Returns null if the user was not found in the DB
* @since 1.27
* method of batch updating rows in a database. To use create a class
* implementing the RowUpdateGenerator interface and configure the
* BatchRowIterator and BatchRowWriter for access to the correct table.
- * The components will handle reading, writing, and waiting for slaves
+ * The components will handle reading, writing, and waiting for replica DBs
* while the generator implementation handles generating update arrays
* for singular rows.
*