From 16266edff3337c893a1b5bc42e0bd006a828cde3 Mon Sep 17 00:00:00 2001 From: Aaron Schulz Date: Mon, 5 Sep 2016 13:21:26 -0700 Subject: [PATCH] Change "slave" => "replica DB" in /includes Change-Id: Icb716219c9335ff8fa447b1733d04b71d9712bf9 --- includes/Block.php | 2 +- includes/CategoryFinder.php | 2 +- includes/DefaultSettings.php | 18 ++++++++--------- includes/Defines.php | 2 +- includes/GlobalFunctions.php | 6 +++--- includes/MediaWiki.php | 2 +- includes/MergeHistory.php | 2 +- includes/OutputPage.php | 2 +- includes/Revision.php | 8 ++++---- includes/SiteStats.php | 4 ++-- includes/Title.php | 8 ++++---- includes/api/ApiBase.php | 4 ++-- includes/api/ApiMain.php | 2 +- includes/api/ApiQueryUserContributions.php | 6 +++--- includes/auth/AuthManager.php | 2 +- includes/cache/BacklinkCache.php | 2 +- includes/cache/MessageBlobStore.php | 2 +- includes/cache/MessageCache.php | 4 ++-- includes/changes/CategoryMembershipChange.php | 2 +- includes/changetags/ChangeTags.php | 6 +++--- includes/dao/IDBAccessObject.php | 10 +++++----- includes/externalstore/ExternalStoreDB.php | 4 ++-- includes/filerepo/LocalRepo.php | 2 +- includes/import/WikiImporter.php | 2 +- includes/interwiki/ClassicInterwikiLookup.php | 2 +- includes/jobqueue/JobQueue.php | 2 +- includes/jobqueue/JobQueueDB.php | 2 +- includes/jobqueue/JobQueueGroup.php | 2 +- includes/jobqueue/JobRunner.php | 18 ++++++++--------- .../jobs/CategoryMembershipChangeJob.php | 4 ++-- .../jobqueue/jobs/RecentChangesUpdateJob.php | 2 +- includes/jobqueue/jobs/RefreshLinksJob.php | 6 +++--- .../libs/objectcache/ReplicatedBagOStuff.php | 4 ++-- includes/libs/objectcache/WANObjectCache.php | 16 +++++++-------- includes/objectcache/ObjectCache.php | 2 +- includes/objectcache/RedisBagOStuff.php | 6 +++--- includes/objectcache/SqlBagOStuff.php | 8 ++++---- includes/page/Article.php | 2 +- includes/page/WikiPage.php | 18 ++++++++--------- includes/pager/IndexPager.php | 2 +- .../ResourceLoaderWikiModule.php | 2 +- includes/specials/SpecialContributions.php | 2 +- .../specials/SpecialDeletedContributions.php | 2 +- includes/specials/SpecialTags.php | 2 +- includes/specials/SpecialWatchlist.php | 2 +- includes/specials/pagers/ContribsPager.php | 6 +++--- includes/user/User.php | 20 +++++++++---------- includes/utils/BatchRowUpdate.php | 2 +- 48 files changed, 119 insertions(+), 119 deletions(-) diff --git a/includes/Block.php b/includes/Block.php index 79b31bbf58..99db22e7e2 100644 --- a/includes/Block.php +++ b/includes/Block.php @@ -1110,7 +1110,7 @@ class Block { * @param array $ipChain List of IPs (strings), usually retrieved from the * X-Forwarded-For header of the request * @param bool $isAnon Exclude anonymous-only blocks if false - * @param bool $fromMaster Whether to query the master or slave database + * @param bool $fromMaster Whether to query the master or replica DB * @return array Array of Blocks * @since 1.22 */ diff --git a/includes/CategoryFinder.php b/includes/CategoryFinder.php index 3d5e6c58ab..b6c63c70af 100644 --- a/includes/CategoryFinder.php +++ b/includes/CategoryFinder.php @@ -64,7 +64,7 @@ class CategoryFinder { /** @var string "AND" or "OR" */ protected $mode; - /** @var IDatabase Read-DB slave */ + /** @var IDatabase Read-DB replica DB */ protected $dbr; /** diff --git a/includes/DefaultSettings.php b/includes/DefaultSettings.php index a990525def..1ffc3bdf72 100644 --- a/includes/DefaultSettings.php +++ b/includes/DefaultSettings.php @@ -1913,7 +1913,7 @@ $wgSharedSchema = false; * - DBO_COMPRESS -- uses internal compression in database connections, * if available * - * - max lag: (optional) Maximum replication lag before a slave will taken out of rotation + * - max lag: (optional) Maximum replication lag before a replica DB goes out of rotation * - is static: (optional) Set to true if the dataset is static and no replication is used. * - cliMode: (optional) Connection handles will not assume that requests are short-lived * nor that INSERT..SELECT can be rewritten into a buffered SELECT and INSERT. @@ -1927,15 +1927,15 @@ $wgSharedSchema = false; * perhaps in some command-line scripts). * * The first server listed in this array (with key 0) will be the master. The - * rest of the servers will be slaves. To prevent writes to your slaves due to + * rest of the servers will be replica DBs. To prevent writes to your replica DBs due to * accidental misconfiguration or MediaWiki bugs, set read_only=1 on all your - * slaves in my.cnf. You can set read_only mode at runtime using: + * replica DBs in my.cnf. You can set read_only mode at runtime using: * * @code * SET @@read_only=1; * @endcode * - * Since the effect of writing to a slave is so damaging and difficult to clean + * Since the effect of writing to a replica DB is so damaging and difficult to clean * up, we at Wikimedia set read_only=1 in my.cnf on all our DB servers, even * our masters, and then set read_only=0 on masters at runtime. */ @@ -2660,7 +2660,7 @@ $wgInternalServer = false; $wgSquidMaxage = 18000; /** - * Cache timeout for the CDN when DB slave lag is high + * Cache timeout for the CDN when DB replica DB lag is high * @see $wgSquidMaxage * @since 1.27 */ @@ -2670,7 +2670,7 @@ $wgCdnMaxageLagged = 30; * If set, any SquidPurge call on a URL or URLs will send a second purge no less than * this many seconds later via the job queue. This requires delayed job support. * This should be safely higher than the 'max lag' value in $wgLBFactoryConf, so that - * slave lag does not cause page to be stuck in stales states in CDN. + * replica DB lag does not cause page to be stuck in stales states in CDN. * * This also fixes race conditions in two-tiered CDN setups (e.g. cdn2 => cdn1 => MediaWiki). * If a purge for a URL reaches cdn2 before cdn1 and a request reaches cdn2 for that URL, @@ -7219,8 +7219,8 @@ $wgJobTypesExcludedFromDefaultQueue = [ 'AssembleUploadChunks', 'PublishStashedF $wgJobBackoffThrottling = []; /** - * Make job runners commit changes for slave-lag prone jobs one job at a time. - * This is useful if there are many job workers that race on slave lag checks. + * Make job runners commit changes for replica DB-lag prone jobs one job at a time. + * This is useful if there are many job workers that race on replica DB lag checks. * If set, jobs taking this many seconds of DB write time have serialized commits. * * Note that affected jobs may have worse lock contention. Also, if they affect @@ -7842,7 +7842,7 @@ $wgAPIMaxResultSize = 8388608; $wgAPIMaxUncachedDiffs = 1; /** - * Maximum amount of DB lag on a majority of DB slaves to tolerate + * Maximum amount of DB lag on a majority of DB replica DBs to tolerate * before forcing bots to retry any write requests via API errors. * This should be lower than the 'max lag' value in $wgLBFactoryConf. */ diff --git a/includes/Defines.php b/includes/Defines.php index fe5083e1be..f145483b6f 100644 --- a/includes/Defines.php +++ b/includes/Defines.php @@ -43,7 +43,7 @@ define( 'DBO_COMPRESS', 512 ); * Valid database indexes * Operation-based indexes */ -define( 'DB_SLAVE', -1 ); # Read from the slave (or only server) +define( 'DB_SLAVE', -1 ); # Read from the replica DB (or only server) define( 'DB_MASTER', -2 ); # Write to master (or only server) /**@}*/ diff --git a/includes/GlobalFunctions.php b/includes/GlobalFunctions.php index b421f96caa..aa6766baea 100644 --- a/includes/GlobalFunctions.php +++ b/includes/GlobalFunctions.php @@ -1278,7 +1278,7 @@ function wfReadOnly() { * Check if the site is in read-only mode and return the message if so * * This checks wfConfiguredReadOnlyReason() and the main load balancer - * for slave lag. This may result in DB_SLAVE connection being made. + * for replica DB lag. This may result in DB_SLAVE connection being made. * * @return string|bool String when in read-only mode; false otherwise */ @@ -3279,10 +3279,10 @@ function wfGetNull() { } /** - * Waits for the slaves to catch up to the master position + * Waits for the replica DBs to catch up to the master position * * Use this when updating very large numbers of rows, as in maintenance scripts, - * to avoid causing too much lag. Of course, this is a no-op if there are no slaves. + * to avoid causing too much lag. Of course, this is a no-op if there are no replica DBs. * * By default this waits on the main DB cluster of the current wiki. * If $cluster is set to "*" it will wait on all DB clusters, including diff --git a/includes/MediaWiki.php b/includes/MediaWiki.php index 1add24a600..e67a9b53d6 100644 --- a/includes/MediaWiki.php +++ b/includes/MediaWiki.php @@ -580,7 +580,7 @@ class MediaWiki { $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options ); } - // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is + // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is // also intimately related to the value of $wgCdnReboundPurgeDelay. if ( $factory->laggedSlaveUsed() ) { $maxAge = $config->get( 'CdnMaxageLagged' ); diff --git a/includes/MergeHistory.php b/includes/MergeHistory.php index 441fe9e2c0..dd1fd37051 100644 --- a/includes/MergeHistory.php +++ b/includes/MergeHistory.php @@ -33,7 +33,7 @@ */ class MergeHistory { - /** @const int Maximum number of revisions that can be merged at once (avoid too much slave lag) */ + /** @const int Maximum number of revisions that can be merged at once */ const REVISION_LIMIT = 5000; /** @var Title Page from which history will be merged */ diff --git a/includes/OutputPage.php b/includes/OutputPage.php index 5aaa47454d..f3b3f8890e 100644 --- a/includes/OutputPage.php +++ b/includes/OutputPage.php @@ -2535,7 +2535,7 @@ class OutputPage extends ContextSource { } /** - * Show a warning about slave lag + * Show a warning about replica DB lag * * If the lag is higher than $wgSlaveLagCritical seconds, * then the warning is a bit more obvious. If the lag is diff --git a/includes/Revision.php b/includes/Revision.php index 36e27bd339..39fd1d1bcf 100644 --- a/includes/Revision.php +++ b/includes/Revision.php @@ -309,7 +309,7 @@ class Revision implements IDBAccessObject { * Given a set of conditions, fetch a revision * * This method is used then a revision ID is qualified and - * will incorporate some basic slave/master fallback logic + * will incorporate some basic replica DB/master fallback logic * * @param array $conditions * @param int $flags (optional) @@ -1609,7 +1609,7 @@ class Revision implements IDBAccessObject { } if ( !$row ) { - // Text data is immutable; check slaves first. + // Text data is immutable; check replica DBs first. $dbr = wfGetDB( DB_SLAVE ); $row = $dbr->selectRow( 'text', [ 'old_text', 'old_flags' ], @@ -1617,7 +1617,7 @@ class Revision implements IDBAccessObject { __METHOD__ ); } - // Fallback to the master in case of slave lag. Also use FOR UPDATE if it was + // Fallback to the master in case of replica DB lag. Also use FOR UPDATE if it was // used to fetch this revision to avoid missing the row due to REPEATABLE-READ. $forUpdate = ( $this->mQueryFlags & self::READ_LOCKING == self::READ_LOCKING ); if ( !$row && ( $forUpdate || wfGetLB()->getServerCount() > 1 ) ) { @@ -1638,7 +1638,7 @@ class Revision implements IDBAccessObject { wfDebugLog( 'Revision', "No blob for text row '$textId' (revision {$this->getId()})." ); } - # No negative caching -- negative hits on text rows may be due to corrupted slave servers + # No negative caching -- negative hits on text rows may be due to corrupted replica DB servers if ( $wgRevisionCacheExpiry && $text !== false ) { $processCache->set( $key, $text ); $cache->set( $key, $text, $wgRevisionCacheExpiry ); diff --git a/includes/SiteStats.php b/includes/SiteStats.php index 604ab93f7f..9507864e41 100644 --- a/includes/SiteStats.php +++ b/includes/SiteStats.php @@ -71,12 +71,12 @@ class SiteStats { static function loadAndLazyInit() { global $wgMiserMode; - wfDebug( __METHOD__ . ": reading site_stats from slave\n" ); + wfDebug( __METHOD__ . ": reading site_stats from replica DB\n" ); $row = self::doLoad( wfGetDB( DB_SLAVE ) ); if ( !self::isSane( $row ) ) { // Might have just been initialized during this request? Underflow? - wfDebug( __METHOD__ . ": site_stats damaged or missing on slave\n" ); + wfDebug( __METHOD__ . ": site_stats damaged or missing on replica DB\n" ); $row = self::doLoad( wfGetDB( DB_MASTER ) ); } diff --git a/includes/Title.php b/includes/Title.php index 2021e0ab9c..d95a379d25 100644 --- a/includes/Title.php +++ b/includes/Title.php @@ -1886,8 +1886,8 @@ class Title implements LinkTarget { * @param string $action Action that permission needs to be checked for * @param User $user User to check * @param string $rigor One of (quick,full,secure) - * - quick : does cheap permission checks from slaves (usable for GUI creation) - * - full : does cheap and expensive checks possibly from a slave + * - quick : does cheap permission checks from replica DBs (usable for GUI creation) + * - full : does cheap and expensive checks possibly from a replica DB * - secure : does cheap and expensive checks, using the master as needed * @param array $ignoreErrors Array of Strings Set this to a list of message keys * whose corresponding errors may be ignored. @@ -2416,8 +2416,8 @@ class Title implements LinkTarget { * @param string $action Action that permission needs to be checked for * @param User $user User to check * @param string $rigor One of (quick,full,secure) - * - quick : does cheap permission checks from slaves (usable for GUI creation) - * - full : does cheap and expensive checks possibly from a slave + * - quick : does cheap permission checks from replica DBs (usable for GUI creation) + * - full : does cheap and expensive checks possibly from a replica DB * - secure : does cheap and expensive checks, using the master as needed * @param bool $short Set this to true to stop after the first permission error. * @return array Array of arrays of the arguments to wfMessage to explain permissions problems. diff --git a/includes/api/ApiBase.php b/includes/api/ApiBase.php index 55d243086c..4ce0ab5743 100644 --- a/includes/api/ApiBase.php +++ b/includes/api/ApiBase.php @@ -599,7 +599,7 @@ abstract class ApiBase extends ContextSource { } /** - * Gets a default slave database connection object + * Gets a default replica DB connection object * @return DatabaseBase */ protected function getDB() { @@ -826,7 +826,7 @@ abstract class ApiBase extends ContextSource { * @param array $params * @param bool|string $load Whether load the object's state from the database: * - false: don't load (if the pageid is given, it will still be loaded) - * - 'fromdb': load from a slave database + * - 'fromdb': load from a replica DB * - 'fromdbmaster': load from the master database * @return WikiPage */ diff --git a/includes/api/ApiMain.php b/includes/api/ApiMain.php index 22b079dee1..2ce51202fe 100644 --- a/includes/api/ApiMain.php +++ b/includes/api/ApiMain.php @@ -1322,7 +1322,7 @@ class ApiMain extends ApiBase { } } - // If a majority of slaves are too lagged then disallow writes + // If a majority of replica DBs are too lagged then disallow writes $slaveCount = wfGetLB()->getServerCount() - 1; if ( $numLagged >= ceil( $slaveCount / 2 ) ) { $laggedServers = implode( ', ', $laggedServers ); diff --git a/includes/api/ApiQueryUserContributions.php b/includes/api/ApiQueryUserContributions.php index 51e192325e..35723e06fd 100644 --- a/includes/api/ApiQueryUserContributions.php +++ b/includes/api/ApiQueryUserContributions.php @@ -57,10 +57,10 @@ class ApiQueryContributions extends ApiQueryBase { $this->fld_patrolled = isset( $prop['patrolled'] ); $this->fld_tags = isset( $prop['tags'] ); - // Most of this code will use the 'contributions' group DB, which can map to slaves + // Most of this code will use the 'contributions' group DB, which can map to replica DBs // with extra user based indexes or partioning by user. The additional metadata - // queries should use a regular slave since the lookup pattern is not all by user. - $dbSecondary = $this->getDB(); // any random slave + // queries should use a regular replica DB since the lookup pattern is not all by user. + $dbSecondary = $this->getDB(); // any random replica DB // TODO: if the query is going only against the revision table, should this be done? $this->selectNamedDB( 'contributions', DB_SLAVE, 'contributions' ); diff --git a/includes/auth/AuthManager.php b/includes/auth/AuthManager.php index a5c86be555..992e70f118 100644 --- a/includes/auth/AuthManager.php +++ b/includes/auth/AuthManager.php @@ -1540,7 +1540,7 @@ class AuthManager implements LoggerAwareInterface { $username = $user->getName(); - // Try the local user from the slave DB + // Try the local user from the replica DB $localId = User::idFromName( $username ); $flags = User::READ_NORMAL; diff --git a/includes/cache/BacklinkCache.php b/includes/cache/BacklinkCache.php index 8c2a19eb67..a51681910b 100644 --- a/includes/cache/BacklinkCache.php +++ b/includes/cache/BacklinkCache.php @@ -137,7 +137,7 @@ class BacklinkCache { } /** - * Get the slave connection to the database + * Get the replica DB connection to the database * When non existing, will initialize the connection. * @return DatabaseBase */ diff --git a/includes/cache/MessageBlobStore.php b/includes/cache/MessageBlobStore.php index 279898c7f8..f1fe54213d 100644 --- a/includes/cache/MessageBlobStore.php +++ b/includes/cache/MessageBlobStore.php @@ -179,7 +179,7 @@ class MessageBlobStore implements LoggerAwareInterface { public function updateMessage( $key ) { $moduleNames = $this->getResourceLoader()->getModulesByMessage( $key ); foreach ( $moduleNames as $moduleName ) { - // Uses a holdoff to account for database slave lag (for MessageCache) + // Uses a holdoff to account for database replica DB lag (for MessageCache) $this->wanCache->touchCheckKey( $this->wanCache->makeKey( __CLASS__, $moduleName ) ); } } diff --git a/includes/cache/MessageCache.php b/includes/cache/MessageCache.php index 62fab5fc50..e52670203b 100644 --- a/includes/cache/MessageCache.php +++ b/includes/cache/MessageCache.php @@ -302,7 +302,7 @@ class MessageCache { $where[] = 'global cache is expired'; $staleCache = $cache; } elseif ( $hashVolatile ) { - # DB results are slave lag prone until the holdoff TTL passes. + # DB results are replica DB lag prone until the holdoff TTL passes. # By then, updates should be reflected in loadFromDBWithLock(). # One thread renerates the cache while others use old values. $where[] = 'global cache is expired/volatile'; @@ -564,7 +564,7 @@ class MessageCache { } // Mark this cache as definitely "latest" (non-volatile) so - // load() calls do try to refresh the cache with slave data + // load() calls do try to refresh the cache with replica DB data $this->mCache[$code]['LATEST'] = time(); // Update caches if the lock was acquired diff --git a/includes/changes/CategoryMembershipChange.php b/includes/changes/CategoryMembershipChange.php index 64d8139eb9..515ab0561a 100644 --- a/includes/changes/CategoryMembershipChange.php +++ b/includes/changes/CategoryMembershipChange.php @@ -164,7 +164,7 @@ class CategoryMembershipChange { /** * T109700 - Default bot flag to true when there is no corresponding RC entry * This means all changes caused by parser functions & Lua on reparse are marked as bot - * Also in the case no RC entry could be found due to slave lag + * Also in the case no RC entry could be found due to replica DB lag */ $bot = 1; $lastRevId = 0; diff --git a/includes/changetags/ChangeTags.php b/includes/changetags/ChangeTags.php index a2945afb61..4b5c6a2423 100644 --- a/includes/changetags/ChangeTags.php +++ b/includes/changetags/ChangeTags.php @@ -174,7 +174,7 @@ class ChangeTags { // Might as well look for rcids and so on. if ( !$rc_id ) { - // Info might be out of date, somewhat fractionally, on slave. + // Info might be out of date, somewhat fractionally, on replica DB. // LogEntry/LogPage and WikiPage match rev/log/rc timestamps, // so use that relation to avoid full table scans. if ( $log_id ) { @@ -201,7 +201,7 @@ class ChangeTags { ); } } elseif ( !$log_id && !$rev_id ) { - // Info might be out of date, somewhat fractionally, on slave. + // Info might be out of date, somewhat fractionally, on replica DB. $log_id = $dbw->selectField( 'recentchanges', 'rc_logid', @@ -313,7 +313,7 @@ class ChangeTags { $tagsToAdd = array_diff( $tagsToAdd, $tagsToRemove ); // Update the summary row. - // $prevTags can be out of date on slaves, especially when addTags is called consecutively, + // $prevTags can be out of date on replica DBs, especially when addTags is called consecutively, // causing loss of tags added recently in tag_summary table. $prevTags = $dbw->selectField( 'tag_summary', 'ts_tags', $tsConds, __METHOD__ ); $prevTags = $prevTags ? $prevTags : ''; diff --git a/includes/dao/IDBAccessObject.php b/includes/dao/IDBAccessObject.php index c24962bebd..5acf3ae371 100644 --- a/includes/dao/IDBAccessObject.php +++ b/includes/dao/IDBAccessObject.php @@ -29,15 +29,15 @@ * though certain objects may assume READ_LATEST for common use case or legacy reasons. * * There are four types of reads: - * - READ_NORMAL : Potentially cached read of data (e.g. from a slave or stale replica) + * - READ_NORMAL : Potentially cached read of data (e.g. from a replica DB or stale replica) * - READ_LATEST : Up-to-date read as of transaction start (e.g. from master or a quorum read) * - READ_LOCKING : Up-to-date read as of now, that locks (shared) the records * - READ_EXCLUSIVE : Up-to-date read as of now, that locks (exclusive) the records * All record locks persist for the duration of the transaction. * * A special constant READ_LATEST_IMMUTABLE can be used for fetching append-only data. Such - * data is either (a) on a slave and up-to-date or (b) not yet there, but on the master/quorum. - * Because the data is append-only, it can never be stale on a slave if present. + * data is either (a) on a replica DB and up-to-date or (b) not yet there, but on the master/quorum. + * Because the data is append-only, it can never be stale on a replica DB if present. * * Callers should use READ_NORMAL (or pass in no flags) unless the read determines a write. * In theory, such cases may require READ_LOCKING, though to avoid contention, READ_LATEST is @@ -54,7 +54,7 @@ */ interface IDBAccessObject { /** Constants for object loading bitfield flags (higher => higher QoS) */ - /** @var integer Read from a slave/non-quorum */ + /** @var integer Read from a replica DB/non-quorum */ const READ_NORMAL = 0; /** @var integer Read from the master/quorum */ const READ_LATEST = 1; @@ -63,7 +63,7 @@ interface IDBAccessObject { /** @var integer Read from the master/quorum and lock out other writers and locking readers */ const READ_EXCLUSIVE = 7; // READ_LOCKING (3) and "FOR UPDATE" (4) - /** @var integer Read from a slave/non-quorum immutable data, using the master/quorum on miss */ + /** @var integer Read from a replica DB or without a quorum, using the master/quorum on miss */ const READ_LATEST_IMMUTABLE = 8; // Convenience constant for tracking how data was loaded (higher => higher QoS) diff --git a/includes/externalstore/ExternalStoreDB.php b/includes/externalstore/ExternalStoreDB.php index b45457720e..161a7885d7 100644 --- a/includes/externalstore/ExternalStoreDB.php +++ b/includes/externalstore/ExternalStoreDB.php @@ -112,7 +112,7 @@ class ExternalStoreDB extends ExternalStoreMedium { } /** - * Get a slave database connection for the specified cluster + * Get a replica DB connection for the specified cluster * * @param string $cluster Cluster name * @return IDatabase @@ -264,7 +264,7 @@ class ExternalStoreDB extends ExternalStoreMedium { } /** - * Helper function for self::batchFetchBlobs for merging master/slave results + * Helper function for self::batchFetchBlobs for merging master/replica DB results * @param array &$ret Current self::batchFetchBlobs return value * @param array &$ids Map from blob_id to requested itemIDs * @param mixed $res DB result from Database::select diff --git a/includes/filerepo/LocalRepo.php b/includes/filerepo/LocalRepo.php index eaec15129c..b108e00b41 100644 --- a/includes/filerepo/LocalRepo.php +++ b/includes/filerepo/LocalRepo.php @@ -453,7 +453,7 @@ class LocalRepo extends FileRepo { } /** - * Get a connection to the slave DB + * Get a connection to the replica DB * @return DatabaseBase */ function getSlaveDB() { diff --git a/includes/import/WikiImporter.php b/includes/import/WikiImporter.php index 406667e14d..d1a9bc503d 100644 --- a/includes/import/WikiImporter.php +++ b/includes/import/WikiImporter.php @@ -377,7 +377,7 @@ class WikiImporter { // Update article count statistics (T42009) // The normal counting logic in WikiPage->doEditUpdates() is designed for // one-revision-at-a-time editing, not bulk imports. In this situation it - // suffers from issues of slave lag. We let WikiPage handle the total page + // suffers from issues of replica DB lag. We let WikiPage handle the total page // and revision count, and we implement our own custom logic for the // article (content page) count. $page = WikiPage::factory( $title ); diff --git a/includes/interwiki/ClassicInterwikiLookup.php b/includes/interwiki/ClassicInterwikiLookup.php index 6ac165ab82..f7205f4fca 100644 --- a/includes/interwiki/ClassicInterwikiLookup.php +++ b/includes/interwiki/ClassicInterwikiLookup.php @@ -37,7 +37,7 @@ use WANObjectCache; * and tree storage backends (SQL, CDB, and plain PHP arrays). * * All information is loaded on creation when called by $this->fetch( $prefix ). - * All work is done on slave, because this should *never* change (except during + * All work is done on replica DB, because this should *never* change (except during * schema updates etc, which aren't wiki-related) * * @since 1.28 diff --git a/includes/jobqueue/JobQueue.php b/includes/jobqueue/JobQueue.php index d64be3c409..020a684728 100644 --- a/includes/jobqueue/JobQueue.php +++ b/includes/jobqueue/JobQueue.php @@ -553,7 +553,7 @@ abstract class JobQueue { } /** - * Wait for any slaves or backup servers to catch up. + * Wait for any replica DBs or backup servers to catch up. * * This does nothing for certain queue classes. * diff --git a/includes/jobqueue/JobQueueDB.php b/includes/jobqueue/JobQueueDB.php index 3a1da13645..d981d13a40 100644 --- a/includes/jobqueue/JobQueueDB.php +++ b/includes/jobqueue/JobQueueDB.php @@ -236,7 +236,7 @@ class JobQueueDB extends JobQueue { } // Build the full list of job rows to insert $rows = array_merge( $rowList, array_values( $rowSet ) ); - // Insert the job rows in chunks to avoid slave lag... + // Insert the job rows in chunks to avoid replica DB lag... foreach ( array_chunk( $rows, 50 ) as $rowBatch ) { $dbw->insert( 'job', $rowBatch, $method ); } diff --git a/includes/jobqueue/JobQueueGroup.php b/includes/jobqueue/JobQueueGroup.php index a4b32411cf..8d575625d9 100644 --- a/includes/jobqueue/JobQueueGroup.php +++ b/includes/jobqueue/JobQueueGroup.php @@ -255,7 +255,7 @@ class JobQueueGroup { } /** - * Wait for any slaves or backup queue servers to catch up. + * Wait for any replica DBs or backup queue servers to catch up. * * This does nothing for certain queue classes. * diff --git a/includes/jobqueue/JobRunner.php b/includes/jobqueue/JobRunner.php index 112696b5e3..134ba9d706 100644 --- a/includes/jobqueue/JobRunner.php +++ b/includes/jobqueue/JobRunner.php @@ -42,7 +42,7 @@ class JobRunner implements LoggerAwareInterface { protected $logger; const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present - const LAG_CHECK_PERIOD = 1.0; // check slave lag this many seconds + const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors /** @@ -126,7 +126,7 @@ class JobRunner implements LoggerAwareInterface { $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); $lbFactory->commitAll( __METHOD__ ); - // Catch huge single updates that lead to slave lag + // Catch huge single updates that lead to replica DB lag $trxProfiler = Profiler::instance()->getTransactionProfiler(); $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) ); $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ ); @@ -141,7 +141,7 @@ class JobRunner implements LoggerAwareInterface { $jobsPopped = 0; $timeMsTotal = 0; $startTime = microtime( true ); // time since jobs started running - $lastCheckTime = 1; // timestamp of last slave check + $lastCheckTime = 1; // timestamp of last replica DB check do { // Sync the persistent backoffs with concurrent runners $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait ); @@ -210,7 +210,7 @@ class JobRunner implements LoggerAwareInterface { break; } - // Don't let any of the main DB slaves get backed up. + // Don't let any of the main DB replica DBs get backed up. // This only waits for so long before exiting and letting // other wikis in the farm (on different masters) get a chance. $timePassed = microtime( true ) - $lastCheckTime; @@ -226,7 +226,7 @@ class JobRunner implements LoggerAwareInterface { } $lastCheckTime = microtime( true ); } - // Don't let any queue slaves/backups fall behind + // Don't let any queue replica DBs/backups fall behind if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) { $group->waitForBackups(); } @@ -288,7 +288,7 @@ class JobRunner implements LoggerAwareInterface { // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. - // Note that jobs are still responsible for handling slave lag. + // Note that jobs are still responsible for handling replica DB lag. $lbFactory->flushReplicaSnapshots( __METHOD__ ); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); @@ -490,7 +490,7 @@ class JobRunner implements LoggerAwareInterface { /** * Issue a commit on all masters who are currently in a transaction and have * made changes to the database. It also supports sometimes waiting for the - * local wiki's slaves to catch up. See the documentation for + * local wiki's replica DBs to catch up. See the documentation for * $wgJobSerialCommitThreshold for more. * * @param Job $job @@ -513,7 +513,7 @@ class JobRunner implements LoggerAwareInterface { $dbwSerial = false; } } else { - // There are no slaves or writes are all to foreign DB (we don't handle that) + // There are no replica DBs or writes are all to foreign DB (we don't handle that) $dbwSerial = false; } @@ -532,7 +532,7 @@ class JobRunner implements LoggerAwareInterface { // This will trigger a rollback in the main loop throw new DBError( $dbwSerial, "Timed out waiting on commit queue." ); } - // Wait for the slave DBs to catch up + // Wait for the replica DBs to catch up $pos = $lb->getMasterPos(); if ( $pos ) { $lb->waitForAll( $pos ); diff --git a/includes/jobqueue/jobs/CategoryMembershipChangeJob.php b/includes/jobqueue/jobs/CategoryMembershipChangeJob.php index b561021b29..94e7248e60 100644 --- a/includes/jobqueue/jobs/CategoryMembershipChangeJob.php +++ b/includes/jobqueue/jobs/CategoryMembershipChangeJob.php @@ -58,9 +58,9 @@ class CategoryMembershipChangeJob extends Job { } $dbr = wfGetDB( DB_SLAVE, [ 'recentchanges' ] ); - // Wait till the slave is caught up so that jobs for this page see each others' changes + // Wait till the replica DB is caught up so that jobs for this page see each others' changes if ( !wfGetLB()->safeWaitForMasterPos( $dbr ) ) { - $this->setLastError( "Timed out while waiting for slave to catch up" ); + $this->setLastError( "Timed out while waiting for replica DB to catch up" ); return false; } // Clear any stale REPEATABLE-READ snapshot diff --git a/includes/jobqueue/jobs/RecentChangesUpdateJob.php b/includes/jobqueue/jobs/RecentChangesUpdateJob.php index 2fd3899f9f..809fb637f9 100644 --- a/includes/jobqueue/jobs/RecentChangesUpdateJob.php +++ b/includes/jobqueue/jobs/RecentChangesUpdateJob.php @@ -93,7 +93,7 @@ class RecentChangesUpdateJob extends Job { ); if ( $rcIds ) { $dbw->delete( 'recentchanges', [ 'rc_id' => $rcIds ], __METHOD__ ); - // There might be more, so try waiting for slaves + // There might be more, so try waiting for replica DBs try { $factory->commitAndWaitForReplication( __METHOD__, $ticket, [ 'timeout' => 3 ] diff --git a/includes/jobqueue/jobs/RefreshLinksJob.php b/includes/jobqueue/jobs/RefreshLinksJob.php index 9cdb1617d1..b0dcd57442 100644 --- a/includes/jobqueue/jobs/RefreshLinksJob.php +++ b/includes/jobqueue/jobs/RefreshLinksJob.php @@ -40,7 +40,7 @@ class RefreshLinksJob extends Job { const PARSE_THRESHOLD_SEC = 1.0; /** @var integer Lag safety margin when comparing root job times to last-refresh times */ const CLOCK_FUDGE = 10; - /** @var integer How many seconds to wait for slaves to catch up */ + /** @var integer How many seconds to wait for replica DBs to catch up */ const LAG_WAIT_TIMEOUT = 15; function __construct( Title $title, array $params ) { @@ -83,7 +83,7 @@ class RefreshLinksJob extends Job { // Job to update all (or a range of) backlink pages for a page if ( !empty( $this->params['recursive'] ) ) { - // When the base job branches, wait for the slaves to catch up to the master. + // When the base job branches, wait for the replica DBs to catch up to the master. // From then on, we know that any template changes at the time the base job was // enqueued will be reflected in backlink page parses when the leaf jobs run. if ( !isset( $params['range'] ) ) { @@ -182,7 +182,7 @@ class RefreshLinksJob extends Job { $skewedTimestamp = $this->params['rootJobTimestamp']; if ( $opportunistic ) { - // Neither clock skew nor DB snapshot/slave lag matter much for such + // Neither clock skew nor DB snapshot/replica DB lag matter much for such // updates; focus on reusing the (often recently updated) cache } else { // For transclusion updates, the template changes must be reflected diff --git a/includes/libs/objectcache/ReplicatedBagOStuff.php b/includes/libs/objectcache/ReplicatedBagOStuff.php index f2ba9de032..ad811c71a8 100644 --- a/includes/libs/objectcache/ReplicatedBagOStuff.php +++ b/includes/libs/objectcache/ReplicatedBagOStuff.php @@ -22,7 +22,7 @@ /** * A cache class that directs writes to one set of servers and reads to - * another. This assumes that the servers used for reads are setup to slave + * another. This assumes that the servers used for reads are setup to replica DB * those that writes go to. This can easily be used with redis for example. * * In the WAN scenario (e.g. multi-datacenter case), this is useful when @@ -42,7 +42,7 @@ class ReplicatedBagOStuff extends BagOStuff { * - writeFactory : ObjectFactory::getObjectFromSpec array yeilding BagOStuff. * This object will be used for writes (e.g. the master DB). * - readFactory : ObjectFactory::getObjectFromSpec array yeilding BagOStuff. - * This object will be used for reads (e.g. a slave DB). + * This object will be used for reads (e.g. a replica DB). * * @param array $params * @throws InvalidArgumentException diff --git a/includes/libs/objectcache/WANObjectCache.php b/includes/libs/objectcache/WANObjectCache.php index 0d7da91683..daf963f302 100644 --- a/includes/libs/objectcache/WANObjectCache.php +++ b/includes/libs/objectcache/WANObjectCache.php @@ -377,8 +377,8 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * @param integer $ttl Seconds to live. Special values are: * - WANObjectCache::TTL_INDEFINITE: Cache forever * @param array $opts Options map: - * - lag : Seconds of slave lag. Typically, this is either the slave lag - * before the data was read or, if applicable, the slave lag before + * - lag : Seconds of replica DB lag. Typically, this is either the replica DB lag + * before the data was read or, if applicable, the replica DB lag before * the snapshot-isolated transaction the data was read from started. * Default: 0 seconds * - since : UNIX timestamp of the data in $value. Typically, this is either @@ -566,7 +566,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * Keys using it via get(), getMulti(), or getWithSetCallback() will * be invalidated. It is treated as being HOLDOFF_TTL seconds in the future * by those methods to avoid race conditions where dependent keys get updated - * with stale values (e.g. from a DB slave). + * with stale values (e.g. from a DB replica DB). * * This is typically useful for keys with hardcoded names or in some cases * dynamically generated names where a low number of combinations exist. @@ -661,7 +661,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * // Function that derives the new key value * function ( $oldValue, &$ttl, array &$setOpts ) { * $dbr = wfGetDB( DB_SLAVE ); - * // Account for any snapshot/slave lag + * // Account for any snapshot/replica DB lag * $setOpts += Database::getCacheSetOptions( $dbr ); * * return $dbr->selectRow( ... ); @@ -679,7 +679,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * // Function that derives the new key value * function ( $oldValue, &$ttl, array &$setOpts ) { * $dbr = wfGetDB( DB_SLAVE ); - * // Account for any snapshot/slave lag + * // Account for any snapshot/replica DB lag * $setOpts += Database::getCacheSetOptions( $dbr ); * * return CatConfig::newFromRow( $dbr->selectRow( ... ) ); @@ -706,7 +706,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * function ( $oldValue, &$ttl, array &$setOpts ) { * // Determine new value from the DB * $dbr = wfGetDB( DB_SLAVE ); - * // Account for any snapshot/slave lag + * // Account for any snapshot/replica DB lag * $setOpts += Database::getCacheSetOptions( $dbr ); * * return CatState::newFromResults( $dbr->select( ... ) ); @@ -733,7 +733,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * // Function that derives the new key value * function ( $oldValue, &$ttl, array &$setOpts ) { * $dbr = wfGetDB( DB_SLAVE ); - * // Account for any snapshot/slave lag + * // Account for any snapshot/replica DB lag * $setOpts += Database::getCacheSetOptions( $dbr ); * * // Start off with the last cached list @@ -784,7 +784,7 @@ class WANObjectCache implements IExpiringStore, LoggerAwareInterface { * - pcTTL: Process cache the value in this PHP instance for this many seconds. This avoids * network I/O when a key is read several times. This will not cache when the callback * returns false, however. Note that any purges will not be seen while process cached; - * since the callback should use slave DBs and they may be lagged or have snapshot + * since the callback should use replica DBs and they may be lagged or have snapshot * isolation anyway, this should not typically matter. * Default: WANObjectCache::TTL_UNCACHEABLE. * - version: Integer version number. This allows for callers to make breaking changes to diff --git a/includes/objectcache/ObjectCache.php b/includes/objectcache/ObjectCache.php index bcdf62f59d..9ff39a083c 100644 --- a/includes/objectcache/ObjectCache.php +++ b/includes/objectcache/ObjectCache.php @@ -65,7 +65,7 @@ use MediaWiki\Services\ServiceDisabledException; * Purpose: Ephemeral global storage. * Stored centrally within the primary data-center. * Changes are applied there first and replicated to other DCs (best-effort). - * To retrieve the latest value (e.g. not from a slave), use BagOStuff::READ_LATEST. + * To retrieve the latest value (e.g. not from a replica DB), use BagOStuff::READ_LATEST. * This store may be subject to LRU style evictions. * * - ObjectCache::getInstance( $cacheType ) diff --git a/includes/objectcache/RedisBagOStuff.php b/includes/objectcache/RedisBagOStuff.php index c3e0c96b75..f9d201f489 100644 --- a/includes/objectcache/RedisBagOStuff.php +++ b/includes/objectcache/RedisBagOStuff.php @@ -363,7 +363,7 @@ class RedisBagOStuff extends BagOStuff { try { if ( $this->getMasterLinkStatus( $conn ) === 'down' ) { // If the master cannot be reached, fail-over to the next server. - // If masters are in data-center A, and slaves in data-center B, + // If masters are in data-center A, and replica DBs in data-center B, // this helps avoid the case were fail-over happens in A but not // to the corresponding server in B (e.g. read/write mismatch). continue; @@ -384,10 +384,10 @@ class RedisBagOStuff extends BagOStuff { } /** - * Check the master link status of a Redis server that is configured as a slave. + * Check the master link status of a Redis server that is configured as a replica DB. * @param RedisConnRef $conn * @return string|null Master link status (either 'up' or 'down'), or null - * if the server is not a slave. + * if the server is not a replica DB. */ protected function getMasterLinkStatus( RedisConnRef $conn ) { $info = $conn->info(); diff --git a/includes/objectcache/SqlBagOStuff.php b/includes/objectcache/SqlBagOStuff.php index 0abe64c933..48bbc490fc 100644 --- a/includes/objectcache/SqlBagOStuff.php +++ b/includes/objectcache/SqlBagOStuff.php @@ -85,11 +85,11 @@ class SqlBagOStuff extends BagOStuff { * required to hold the largest shard index. Data will be * distributed across all tables by key hash. This is for * MySQL bugs 61735 and 61736. - * - slaveOnly: Whether to only use slave DBs and avoid triggering + * - slaveOnly: Whether to only use replica DBs and avoid triggering * garbage collection logic of expired items. This only * makes sense if the primary DB is used and only if get() * calls will be used. This is used by ReplicatedBagOStuff. - * - syncTimeout: Max seconds to wait for slaves to catch up for WRITE_SYNC. + * - syncTimeout: Max seconds to wait for replica DBs to catch up for WRITE_SYNC. * * @param array $params */ @@ -807,10 +807,10 @@ class SqlBagOStuff extends BagOStuff { ?: MediaWikiServices::getInstance()->getDBLoadBalancer(); if ( $lb->getServerCount() <= 1 ) { - return true; // no slaves + return true; // no replica DBs } - // Main LB is used; wait for any slaves to catch up + // Main LB is used; wait for any replica DBs to catch up $masterPos = $lb->getMasterPos(); $loop = new WaitConditionLoop( diff --git a/includes/page/Article.php b/includes/page/Article.php index e299f7eece..c36b5e85c3 100644 --- a/includes/page/Article.php +++ b/includes/page/Article.php @@ -1151,7 +1151,7 @@ class Article implements Page { if ( !$rc ) { // Don't cache: This can be hit if the page gets accessed very fast after - // its creation / latest upload or in case we have high slave lag. In case + // its creation / latest upload or in case we have high replica DB lag. In case // the revision is too old, we will already return above. return false; } diff --git a/includes/page/WikiPage.php b/includes/page/WikiPage.php index e5ce924b95..4dfbb7a4c2 100644 --- a/includes/page/WikiPage.php +++ b/includes/page/WikiPage.php @@ -134,7 +134,7 @@ class WikiPage implements Page, IDBAccessObject { * * @param int $id Article ID to load * @param string|int $from One of the following values: - * - "fromdb" or WikiPage::READ_NORMAL to select from a slave database + * - "fromdb" or WikiPage::READ_NORMAL to select from a replica DB * - "fromdbmaster" or WikiPage::READ_LATEST to select from the master database * * @return WikiPage|null @@ -161,7 +161,7 @@ class WikiPage implements Page, IDBAccessObject { * @since 1.20 * @param object $row Database row containing at least fields returned by selectFields(). * @param string|int $from Source of $data: - * - "fromdb" or WikiPage::READ_NORMAL: from a slave DB + * - "fromdb" or WikiPage::READ_NORMAL: from a replica DB * - "fromdbmaster" or WikiPage::READ_LATEST: from the master DB * - "forupdate" or WikiPage::READ_LOCKING: from the master DB using SELECT FOR UPDATE * @return WikiPage @@ -346,7 +346,7 @@ class WikiPage implements Page, IDBAccessObject { * * @param object|string|int $from One of the following: * - A DB query result object. - * - "fromdb" or WikiPage::READ_NORMAL to get from a slave DB. + * - "fromdb" or WikiPage::READ_NORMAL to get from a replica DB. * - "fromdbmaster" or WikiPage::READ_LATEST to get from the master DB. * - "forupdate" or WikiPage::READ_LOCKING to get from the master DB * using SELECT FOR UPDATE. @@ -374,7 +374,7 @@ class WikiPage implements Page, IDBAccessObject { $data = $this->pageDataFromTitle( wfGetDB( $index ), $this->mTitle, $opts ); } } else { - // No idea from where the caller got this data, assume slave database. + // No idea from where the caller got this data, assume replica DB. $data = $from; $from = self::READ_NORMAL; } @@ -388,7 +388,7 @@ class WikiPage implements Page, IDBAccessObject { * @since 1.20 * @param object|bool $data DB row containing fields returned by selectFields() or false * @param string|int $from One of the following: - * - "fromdb" or WikiPage::READ_NORMAL if the data comes from a slave DB + * - "fromdb" or WikiPage::READ_NORMAL if the data comes from a replica DB * - "fromdbmaster" or WikiPage::READ_LATEST if the data comes from the master DB * - "forupdate" or WikiPage::READ_LOCKING if the data comes from * the master DB using SELECT FOR UPDATE @@ -552,7 +552,7 @@ class WikiPage implements Page, IDBAccessObject { */ public function getOldestRevision() { - // Try using the slave database first, then try the master + // Try using the replica DB first, then try the master $continue = 2; $db = wfGetDB( DB_SLAVE ); $revSelectFields = Revision::selectFields(); @@ -609,7 +609,7 @@ class WikiPage implements Page, IDBAccessObject { $flags = Revision::READ_LOCKING; } elseif ( $this->mDataLoadedFrom == self::READ_LATEST ) { // Bug T93976: if page_latest was loaded from the master, fetch the - // revision from there as well, as it may not exist yet on a slave DB. + // revision from there as well, as it may not exist yet on a replica DB. // Also, this keeps the queries in the same REPEATABLE-READ snapshot. $flags = Revision::READ_LATEST; } else { @@ -2110,7 +2110,7 @@ class WikiPage implements Page, IDBAccessObject { // We get here if vary-revision is set. This means that this page references // itself (such as via self-transclusion). In this case, we need to make sure // that any such self-references refer to the newly-saved revision, and not - // to the previous one, which could otherwise happen due to slave lag. + // to the previous one, which could otherwise happen due to replica DB lag. $oldCallback = $edit->popts->getCurrentRevisionCallback(); $edit->popts->setCurrentRevisionCallback( function ( Title $title, $parser = false ) use ( $revision, &$oldCallback ) { @@ -3323,7 +3323,7 @@ class WikiPage implements Page, IDBAccessObject { if ( $title->getNamespace() == NS_CATEGORY ) { // Load the Category object, which will schedule a job to create - // the category table row if necessary. Checking a slave is ok + // the category table row if necessary. Checking a replica DB is ok // here, in the worst case it'll run an unnecessary recount job on // a category that probably doesn't have many members. Category::newFromTitle( $title )->getID(); diff --git a/includes/pager/IndexPager.php b/includes/pager/IndexPager.php index a96ca87316..183e4f21b5 100644 --- a/includes/pager/IndexPager.php +++ b/includes/pager/IndexPager.php @@ -145,7 +145,7 @@ abstract class IndexPager extends ContextSource implements Pager { } $this->mIsBackwards = ( $this->mRequest->getVal( 'dir' ) == 'prev' ); - # Let the subclass set the DB here; otherwise use a slave DB for the current wiki + # Let the subclass set the DB here; otherwise use a replica DB for the current wiki $this->mDb = $this->mDb ?: wfGetDB( DB_SLAVE ); $index = $this->getIndexField(); // column to sort on diff --git a/includes/resourceloader/ResourceLoaderWikiModule.php b/includes/resourceloader/ResourceLoaderWikiModule.php index 82051b17cd..dc74351a82 100644 --- a/includes/resourceloader/ResourceLoaderWikiModule.php +++ b/includes/resourceloader/ResourceLoaderWikiModule.php @@ -130,7 +130,7 @@ class ResourceLoaderWikiModule extends ResourceLoaderModule { /** * Get the Database object used in getTitleInfo(). * - * Defaults to the local slave DB. Subclasses may want to override this to return a foreign + * Defaults to the local replica DB. Subclasses may want to override this to return a foreign * database object, or null if getTitleInfo() shouldn't access the database. * * NOTE: This ONLY works for getTitleInfo() and isKnownEmpty(), NOT FOR ANYTHING ELSE. diff --git a/includes/specials/SpecialContributions.php b/includes/specials/SpecialContributions.php index 6aeb2c38ef..68289a770c 100644 --- a/includes/specials/SpecialContributions.php +++ b/includes/specials/SpecialContributions.php @@ -203,7 +203,7 @@ class SpecialContributions extends IncludableSpecialPage { if ( !$pager->getNumRows() ) { $out->addWikiMsg( 'nocontribs', $target ); } else { - # Show a message about slave lag, if applicable + # Show a message about replica DB lag, if applicable $lag = wfGetLB()->safeGetLag( $pager->getDatabase() ); if ( $lag > 0 ) { $out->showLagWarning( $lag ); diff --git a/includes/specials/SpecialDeletedContributions.php b/includes/specials/SpecialDeletedContributions.php index 8e168b2e6e..d625f82f77 100644 --- a/includes/specials/SpecialDeletedContributions.php +++ b/includes/specials/SpecialDeletedContributions.php @@ -98,7 +98,7 @@ class DeletedContributionsPage extends SpecialPage { return; } - # Show a message about slave lag, if applicable + # Show a message about replica DB lag, if applicable $lag = wfGetLB()->safeGetLag( $pager->getDatabase() ); if ( $lag > 0 ) { $out->showLagWarning( $lag ); diff --git a/includes/specials/SpecialTags.php b/includes/specials/SpecialTags.php index 47bed62642..5e5ed254bb 100644 --- a/includes/specials/SpecialTags.php +++ b/includes/specials/SpecialTags.php @@ -110,7 +110,7 @@ class SpecialTags extends SpecialPage { // continuing with this, as the user is just going to end up getting sent // somewhere else. Additionally, if we keep going here, we end up // populating the memcache of tag data (see ChangeTags::listDefinedTags) - // with out-of-date data from the slave, because the slave hasn't caught + // with out-of-date data from the replica DB, because the replica DB hasn't caught // up to the fact that a new tag has been created as part of an implicit, // as yet uncommitted transaction on master. if ( $out->getRedirect() !== '' ) { diff --git a/includes/specials/SpecialWatchlist.php b/includes/specials/SpecialWatchlist.php index 17d77ba2b8..854ca658e6 100644 --- a/includes/specials/SpecialWatchlist.php +++ b/includes/specials/SpecialWatchlist.php @@ -343,7 +343,7 @@ class SpecialWatchlist extends ChangesListSpecialPage { $user = $this->getUser(); $output = $this->getOutput(); - # Show a message about slave lag, if applicable + # Show a message about replica DB lag, if applicable $lag = wfGetLB()->safeGetLag( $dbr ); if ( $lag > 0 ) { $output->showLagWarning( $lag ); diff --git a/includes/specials/pagers/ContribsPager.php b/includes/specials/pagers/ContribsPager.php index f8eba9a985..28895a0f32 100644 --- a/includes/specials/pagers/ContribsPager.php +++ b/includes/specials/pagers/ContribsPager.php @@ -70,10 +70,10 @@ class ContribsPager extends ReverseChronologicalPager { $month = isset( $options['month'] ) ? $options['month'] : false; $this->getDateCond( $year, $month ); - // Most of this code will use the 'contributions' group DB, which can map to slaves + // Most of this code will use the 'contributions' group DB, which can map to replica DBs // with extra user based indexes or partioning by user. The additional metadata - // queries should use a regular slave since the lookup pattern is not all by user. - $this->mDbSecondary = wfGetDB( DB_SLAVE ); // any random slave + // queries should use a regular replica DB since the lookup pattern is not all by user. + $this->mDbSecondary = wfGetDB( DB_SLAVE ); // any random replica DB $this->mDb = wfGetDB( DB_SLAVE, 'contributions' ); } diff --git a/includes/user/User.php b/includes/user/User.php index 4ec8d54784..8b92e020d4 100644 --- a/includes/user/User.php +++ b/includes/user/User.php @@ -1570,8 +1570,8 @@ class User implements IDBAccessObject { /** * Get blocking information - * @param bool $bFromSlave Whether to check the slave database first. - * To improve performance, non-critical checks are done against slaves. + * @param bool $bFromSlave Whether to check the replica DB first. + * To improve performance, non-critical checks are done against replica DBs. * Check when actually saving should be done against master. */ private function getBlockedStatus( $bFromSlave = true ) { @@ -1922,7 +1922,7 @@ class User implements IDBAccessObject { /** * Check if user is blocked * - * @param bool $bFromSlave Whether to check the slave database instead of + * @param bool $bFromSlave Whether to check the replica DB instead of * the master. Hacked from false due to horrible probs on site. * @return bool True if blocked, false otherwise */ @@ -1933,7 +1933,7 @@ class User implements IDBAccessObject { /** * Get the block affecting the user, or null if the user is not blocked * - * @param bool $bFromSlave Whether to check the slave database instead of the master + * @param bool $bFromSlave Whether to check the replica DB instead of the master * @return Block|null */ public function getBlock( $bFromSlave = true ) { @@ -1945,7 +1945,7 @@ class User implements IDBAccessObject { * Check if user is blocked from editing a particular article * * @param Title $title Title to check - * @param bool $bFromSlave Whether to check the slave database instead of the master + * @param bool $bFromSlave Whether to check the replica DB instead of the master * @return bool */ public function isBlockedFrom( $title, $bFromSlave = false ) { @@ -3595,7 +3595,7 @@ class User implements IDBAccessObject { // Only update the timestamp if the page is being watched. // The query to find out if it is watched is cached both in memcached and per-invocation, - // and when it does have to be executed, it can be on a slave + // and when it does have to be executed, it can be on a replica DB // If this is the user's newtalk page, we always update the timestamp $force = ''; if ( $title->getNamespace() == NS_USER_TALK && $title->getText() == $this->getName() ) { @@ -3818,7 +3818,7 @@ class User implements IDBAccessObject { // Get a new user_touched that is higher than the old one. // This will be used for a CAS check as a last-resort safety - // check against race conditions and slave lag. + // check against race conditions and replica DB lag. $newTouched = $this->newTouchedTimestamp(); $dbw = wfGetDB( DB_MASTER ); @@ -4917,7 +4917,7 @@ class User implements IDBAccessObject { // Now here's a goddamn hack... $dbr = wfGetDB( DB_SLAVE ); if ( $dbr !== $dbw ) { - // If we actually have a slave server, the count is + // If we actually have a replica DB server, the count is // at least one behind because the current transaction // has not been committed and replicated. $this->mEditCount = $this->initEditCount( 1 ); @@ -4947,7 +4947,7 @@ class User implements IDBAccessObject { * @return int Number of edits */ protected function initEditCount( $add = 0 ) { - // Pull from a slave to be less cruel to servers + // Pull from a replica DB to be less cruel to servers // Accuracy isn't the point anyway here $dbr = wfGetDB( DB_SLAVE ); $count = (int)$dbr->selectField( @@ -5317,7 +5317,7 @@ class User implements IDBAccessObject { * Get a new instance of this user that was loaded from the master via a locking read * * Use this instead of the main context User when updating that user. This avoids races - * where that user was loaded from a slave or even the master but without proper locks. + * where that user was loaded from a replica DB or even the master but without proper locks. * * @return User|null Returns null if the user was not found in the DB * @since 1.27 diff --git a/includes/utils/BatchRowUpdate.php b/includes/utils/BatchRowUpdate.php index 549ad41ba2..1e7eda8b63 100644 --- a/includes/utils/BatchRowUpdate.php +++ b/includes/utils/BatchRowUpdate.php @@ -4,7 +4,7 @@ * method of batch updating rows in a database. To use create a class * implementing the RowUpdateGenerator interface and configure the * BatchRowIterator and BatchRowWriter for access to the correct table. - * The components will handle reading, writing, and waiting for slaves + * The components will handle reading, writing, and waiting for replica DBs * while the generator implementation handles generating update arrays * for singular rows. * -- 2.20.1