// @note: higher than IDBAccessObject constants
const LOAD_ALL = 16; // integer; load all the lazy fields too (like metadata)
+ const ATOMIC_SECTION_LOCK = 'LocalFile::lockingTransaction';
+
/**
* Create a LocalFile from a title
* Do not call this except from inside a repo class.
# Do some cache purges after final commit so that:
# a) Changes are more likely to be seen post-purge
# b) They won't cause rollback of the log publish/update above
- $that = $this;
- $dbw->onTransactionIdle( function () use (
- $that, $reupload, $wikiPage, $newPageContent, $comment, $user, $logEntry, $logId, $descId, $tags
- ) {
- # Update memcache after the commit
- $that->invalidateCache();
-
- $updateLogPage = false;
- if ( $newPageContent ) {
- # New file page; create the description page.
- # There's already a log entry, so don't make a second RC entry
- # CDN and file cache for the description page are purged by doEditContent.
- $status = $wikiPage->doEditContent(
- $newPageContent,
- $comment,
- EDIT_NEW | EDIT_SUPPRESS_RC,
- false,
- $user
- );
-
- if ( isset( $status->value['revision'] ) ) {
- // Associate new page revision id
- $logEntry->setAssociatedRevId( $status->value['revision']->getId() );
- }
- // This relies on the resetArticleID() call in WikiPage::insertOn(),
- // which is triggered on $descTitle by doEditContent() above.
- if ( isset( $status->value['revision'] ) ) {
- /** @var $rev Revision */
- $rev = $status->value['revision'];
- $updateLogPage = $rev->getPage();
- }
- } else {
- # Existing file page: invalidate description page cache
- $wikiPage->getTitle()->invalidateCache();
- $wikiPage->getTitle()->purgeSquid();
- # Allow the new file version to be patrolled from the page footer
- Article::purgePatrolFooterCache( $descId );
- }
-
- # Update associated rev id. This should be done by $logEntry->insert() earlier,
- # but setAssociatedRevId() wasn't called at that point yet...
- $logParams = $logEntry->getParameters();
- $logParams['associated_rev_id'] = $logEntry->getAssociatedRevId();
- $update = [ 'log_params' => LogEntryBase::makeParamBlob( $logParams ) ];
- if ( $updateLogPage ) {
- # Also log page, in case where we just created it above
- $update['log_page'] = $updateLogPage;
- }
- $that->getRepo()->getMasterDB()->update(
- 'logging',
- $update,
- [ 'log_id' => $logId ],
- __METHOD__
- );
- $that->getRepo()->getMasterDB()->insert(
- 'log_search',
- [
- 'ls_field' => 'associated_rev_id',
- 'ls_value' => $logEntry->getAssociatedRevId(),
- 'ls_log_id' => $logId,
- ],
- __METHOD__
- );
+ DeferredUpdates::addUpdate(
+ new AutoCommitUpdate(
+ $dbw,
+ __METHOD__,
+ function () use (
+ $reupload, $wikiPage, $newPageContent, $comment, $user,
+ $logEntry, $logId, $descId, $tags
+ ) {
+ # Update memcache after the commit
+ $this->invalidateCache();
+
+ $updateLogPage = false;
+ if ( $newPageContent ) {
+ # New file page; create the description page.
+ # There's already a log entry, so don't make a second RC entry
+ # CDN and file cache for the description page are purged by doEditContent.
+ $status = $wikiPage->doEditContent(
+ $newPageContent,
+ $comment,
+ EDIT_NEW | EDIT_SUPPRESS_RC,
+ false,
+ $user
+ );
+
+ if ( isset( $status->value['revision'] ) ) {
+ // Associate new page revision id
+ $logEntry->setAssociatedRevId( $status->value['revision']->getId() );
+ }
+ // This relies on the resetArticleID() call in WikiPage::insertOn(),
+ // which is triggered on $descTitle by doEditContent() above.
+ if ( isset( $status->value['revision'] ) ) {
+ /** @var $rev Revision */
+ $rev = $status->value['revision'];
+ $updateLogPage = $rev->getPage();
+ }
+ } else {
+ # Existing file page: invalidate description page cache
+ $wikiPage->getTitle()->invalidateCache();
+ $wikiPage->getTitle()->purgeSquid();
+ # Allow the new file version to be patrolled from the page footer
+ Article::purgePatrolFooterCache( $descId );
+ }
- # Add change tags, if any
- if ( $tags ) {
- $logEntry->setTags( $tags );
- }
+ # Update associated rev id. This should be done by $logEntry->insert() earlier,
+ # but setAssociatedRevId() wasn't called at that point yet...
+ $logParams = $logEntry->getParameters();
+ $logParams['associated_rev_id'] = $logEntry->getAssociatedRevId();
+ $update = [ 'log_params' => LogEntryBase::makeParamBlob( $logParams ) ];
+ if ( $updateLogPage ) {
+ # Also log page, in case where we just created it above
+ $update['log_page'] = $updateLogPage;
+ }
+ $this->getRepo()->getMasterDB()->update(
+ 'logging',
+ $update,
+ [ 'log_id' => $logId ],
+ __METHOD__
+ );
+ $this->getRepo()->getMasterDB()->insert(
+ 'log_search',
+ [
+ 'ls_field' => 'associated_rev_id',
+ 'ls_value' => $logEntry->getAssociatedRevId(),
+ 'ls_log_id' => $logId,
+ ],
+ __METHOD__
+ );
+
+ # Add change tags, if any
+ if ( $tags ) {
+ $logEntry->setTags( $tags );
+ }
- # Uploads can be patrolled
- $logEntry->setIsPatrollable( true );
+ # Uploads can be patrolled
+ $logEntry->setIsPatrollable( true );
- # Now that the log entry is up-to-date, make an RC entry.
- $logEntry->publish( $logId );
+ # Now that the log entry is up-to-date, make an RC entry.
+ $logEntry->publish( $logId );
- # Run hook for other updates (typically more cache purging)
- Hooks::run( 'FileUpload', [ $that, $reupload, !$newPageContent ] );
+ # Run hook for other updates (typically more cache purging)
+ Hooks::run( 'FileUpload', [ $this, $reupload, !$newPageContent ] );
- if ( $reupload ) {
- # Delete old thumbnails
- $that->purgeThumbnails();
- # Remove the old file from the CDN cache
- DeferredUpdates::addUpdate(
- new CdnCacheUpdate( [ $that->getUrl() ] ),
- DeferredUpdates::PRESEND
- );
- } else {
- # Update backlink pages pointing to this title if created
- LinksUpdate::queueRecursiveJobsForTable( $that->getTitle(), 'imagelinks' );
- }
- } );
+ if ( $reupload ) {
+ # Delete old thumbnails
+ $this->purgeThumbnails();
+ # Remove the old file from the CDN cache
+ DeferredUpdates::addUpdate(
+ new CdnCacheUpdate( [ $this->getUrl() ] ),
+ DeferredUpdates::PRESEND
+ );
+ } else {
+ # Update backlink pages pointing to this title if created
+ LinksUpdate::queueRecursiveJobsForTable( $this->getTitle(), 'imagelinks' );
+ }
+ }
+ )
+ );
if ( !$reupload ) {
# This is a new file, so update the image count
// Purge the source and target files...
$oldTitleFile = wfLocalFile( $this->title );
$newTitleFile = wfLocalFile( $target );
- // Hack: the lock()/unlock() pair is nested in a transaction so the locking is not
- // tied to BEGIN/COMMIT. To avoid slow purges in the transaction, move them outside.
- $this->getRepo()->getMasterDB()->onTransactionIdle(
- function () use ( $oldTitleFile, $newTitleFile, $archiveNames ) {
- $oldTitleFile->purgeEverything();
- foreach ( $archiveNames as $archiveName ) {
- $oldTitleFile->purgeOldThumbnails( $archiveName );
+ // To avoid slow purges in the transaction, move them outside...
+ DeferredUpdates::addUpdate(
+ new AutoCommitUpdate(
+ $this->getRepo()->getMasterDB(),
+ __METHOD__,
+ function () use ( $oldTitleFile, $newTitleFile, $archiveNames ) {
+ $oldTitleFile->purgeEverything();
+ foreach ( $archiveNames as $archiveName ) {
+ $oldTitleFile->purgeOldThumbnails( $archiveName );
+ }
+ $newTitleFile->purgeEverything();
}
- $newTitleFile->purgeEverything();
- }
+ ),
+ DeferredUpdates::PRESEND
);
if ( $status->isOK() ) {
$this->lock(); // begin
$batch->addCurrent();
- # Get old version relative paths
+ // Get old version relative paths
$archiveNames = $batch->addOlds();
$status = $batch->execute();
$this->unlock(); // done
DeferredUpdates::addUpdate( SiteStatsUpdate::factory( [ 'images' => -1 ] ) );
}
- // Hack: the lock()/unlock() pair is nested in a transaction so the locking is not
- // tied to BEGIN/COMMIT. To avoid slow purges in the transaction, move them outside.
- $that = $this;
- $this->getRepo()->getMasterDB()->onTransactionIdle(
- function () use ( $that, $archiveNames ) {
- $that->purgeEverything();
- foreach ( $archiveNames as $archiveName ) {
- $that->purgeOldThumbnails( $archiveName );
+ // To avoid slow purges in the transaction, move them outside...
+ DeferredUpdates::addUpdate(
+ new AutoCommitUpdate(
+ $this->getRepo()->getMasterDB(),
+ __METHOD__,
+ function () use ( $archiveNames ) {
+ $this->purgeEverything();
+ foreach ( $archiveNames as $archiveName ) {
+ $this->purgeOldThumbnails( $archiveName );
+ }
}
- }
+ ),
+ DeferredUpdates::PRESEND
);
// Purge the CDN
}
/**
- * Start a transaction and lock the image for update
- * Increments a reference counter if the lock is already held
+ * Start an atomic DB section and lock the image for update
+ * or increments a reference counter if the lock is already held
+ *
* @throws LocalFileLockError Throws an error if the lock was not acquired
* @return bool Whether the file lock owns/spawned the DB transaction
*/
function lock() {
if ( !$this->locked ) {
$logger = LoggerFactory::getInstance( 'LocalFile' );
+
$dbw = $this->repo->getMasterDB();
- if ( !$dbw->trxLevel() ) {
- $dbw->begin( __METHOD__ );
- $this->lockedOwnTrx = true;
- }
+ $makesTransaction = !$dbw->trxLevel();
+ $dbw->startAtomic( self::ATOMIC_SECTION_LOCK );
// Bug 54736: use simple lock to handle when the file does not exist.
// SELECT FOR UPDATE prevents changes, not other SELECTs with FOR UPDATE.
// Also, that would cause contention on INSERT of similarly named rows.
$lockPaths = [ $this->getPath() ]; // represents all versions of the file
$status = $backend->lockFiles( $lockPaths, LockManager::LOCK_EX, 10 );
if ( !$status->isGood() ) {
- if ( $this->lockedOwnTrx ) {
- $dbw->rollback( __METHOD__ );
- }
+ $dbw->endAtomic( self::ATOMIC_SECTION_LOCK );
$logger->warning( "Failed to lock '{file}'", [ 'file' => $this->name ] );
throw new LocalFileLockError( $status );
$logger->error( "Failed to unlock '{file}'", [ 'file' => $this->name ] );
}
} );
+ // Callers might care if the SELECT snapshot is safely fresh
+ $this->lockedOwnTrx = $makesTransaction;
}
$this->locked++;
}
/**
- * Decrement the lock reference count. If the reference count is reduced to zero, commits
- * the transaction and thereby releases the image lock.
+ * Decrement the lock reference count and end the atomic section if it reaches zero
+ *
+ * The commit and loc release will happen when no atomic sections are active, which
+ * may happen immediately or at some point after calling this
*/
function unlock() {
if ( $this->locked ) {
--$this->locked;
- if ( !$this->locked && $this->lockedOwnTrx ) {
+ if ( !$this->locked ) {
$dbw = $this->repo->getMasterDB();
- $dbw->commit( __METHOD__ );
+ $dbw->endAtomic( self::ATOMIC_SECTION_LOCK );
$this->lockedOwnTrx = false;
}
}
}
- /**
- * Roll back the DB transaction and mark the image unlocked
- */
- function unlockAndRollback() {
- $this->locked = false;
- $dbw = $this->repo->getMasterDB();
- $dbw->rollback( __METHOD__ );
- $this->lockedOwnTrx = false;
- }
-
/**
* @return Status
*/
}
}
- // Lock the filearchive rows so that the files don't get deleted by a cleanup operation
- // We acquire this lock by running the inserts now, before the file operations.
- // This potentially has poor lock contention characteristics -- an alternative
- // scheme would be to insert stub filearchive entries with no fa_name and commit
- // them in a separate transaction, then run the file ops, then update the fa_name fields.
- $this->doDBInserts();
-
if ( !$repo->hasSha1Storage() ) {
// Removes non-existent file from the batch, so we don't get errors.
// This also handles files in the 'deleted' zone deleted via revision deletion.
// Execute the file deletion batch
$status = $this->file->repo->deleteBatch( $this->deletionBatch );
-
if ( !$status->isGood() ) {
$this->status->merge( $status );
}
}
if ( !$this->status->isOK() ) {
- // Critical file deletion error
- // Roll back inserts, release lock and abort
- // TODO: delete the defunct filearchive rows if we are using a non-transactional DB
- $this->file->unlockAndRollback();
+ // Critical file deletion error; abort
+ $this->file->unlock();
return $this->status;
}
+ // Copy the image/oldimage rows to filearchive
+ $this->doDBInserts();
// Delete image/oldimage rows
$this->doDBDeletes();
public function execute() {
$repo = $this->file->repo;
$status = $repo->newGood();
+ $destFile = wfLocalFile( $this->target );
+
+ $this->file->lock(); // begin
+ $destFile->lock(); // quickly fail if destination is not available
$triplets = $this->getMoveTriplets();
$checkStatus = $this->removeNonexistentFiles( $triplets );
if ( !$checkStatus->isGood() ) {
- $status->merge( $checkStatus );
+ $destFile->unlock();
+ $this->file->unlock();
+ $status->merge( $checkStatus ); // couldn't talk to file backend
return $status;
}
$triplets = $checkStatus->value;
- $destFile = wfLocalFile( $this->target );
- $this->file->lock(); // begin
- $destFile->lock(); // quickly fail if destination is not available
- // Rename the file versions metadata in the DB.
- // This implicitly locks the destination file, which avoids race conditions.
- // If we moved the files from A -> C before DB updates, another process could
- // move files from B -> C at this point, causing storeBatch() to fail and thus
- // cleanupTarget() to trigger. It would delete the C files and cause data loss.
- $statusDb = $this->doDBUpdates();
+ // Verify the file versions metadata in the DB.
+ $statusDb = $this->verifyDBUpdates();
if ( !$statusDb->isGood() ) {
$destFile->unlock();
- $this->file->unlockAndRollback();
+ $this->file->unlock();
$statusDb->ok = false;
return $statusDb;
}
- wfDebugLog( 'imagemove', "Renamed {$this->file->getName()} in database: " .
- "{$statusDb->successCount} successes, {$statusDb->failCount} failures" );
if ( !$repo->hasSha1Storage() ) {
// Copy the files into their new location.
// Delete any files copied over (while the destination is still locked)
$this->cleanupTarget( $triplets );
$destFile->unlock();
- $this->file->unlockAndRollback(); // unlocks the destination
+ $this->file->unlock();
wfDebugLog( 'imagemove', "Error in moving files: "
. $statusMove->getWikiText( false, false, 'en' ) );
$statusMove->ok = false;
$status->merge( $statusMove );
}
+ // Rename the file versions metadata in the DB.
+ $this->doDBUpdates();
+
+ wfDebugLog( 'imagemove', "Renamed {$this->file->getName()} in database: " .
+ "{$statusDb->successCount} successes, {$statusDb->failCount} failures" );
+
$destFile->unlock();
$this->file->unlock(); // done
}
/**
- * Do the database updates and return a new FileRepoStatus indicating how
- * many rows where updated.
+ * Verify the database updates and return a new FileRepoStatus indicating how
+ * many rows would be updated.
*
* @return FileRepoStatus
*/
- protected function doDBUpdates() {
+ protected function verifyDBUpdates() {
$repo = $this->file->repo;
$status = $repo->newGood();
$dbw = $this->db;
- // Update current image
- $dbw->update(
+ $hasCurrent = $dbw->selectField(
'image',
- [ 'img_name' => $this->newName ],
+ '1',
[ 'img_name' => $this->oldName ],
- __METHOD__
+ __METHOD__,
+ [ 'FOR UPDATE' ]
+ );
+ $oldRowCount = $dbw->selectField(
+ 'oldimage',
+ 'COUNT(*)',
+ [ 'oi_name' => $this->oldName ],
+ __METHOD__,
+ [ 'FOR UPDATE' ]
);
- if ( $dbw->affectedRows() ) {
+ if ( $hasCurrent ) {
$status->successCount++;
} else {
$status->failCount++;
- $status->fatal( 'imageinvalidfilename' );
-
- return $status;
}
+ $status->successCount += $oldRowCount;
+ // Bug 34934: oldCount is based on files that actually exist.
+ // There may be more DB rows than such files, in which case $affected
+ // can be greater than $total. We use max() to avoid negatives here.
+ $status->failCount += max( 0, $this->oldCount - $oldRowCount );
+ if ( $status->failCount ) {
+ $status->error( 'imageinvalidfilename' );
+ }
+
+ return $status;
+ }
+
+ /**
+ * Do the database updates and return a new FileRepoStatus indicating how
+ * many rows where updated.
+ */
+ protected function doDBUpdates() {
+ $dbw = $this->db;
+ // Update current image
+ $dbw->update(
+ 'image',
+ [ 'img_name' => $this->newName ],
+ [ 'img_name' => $this->oldName ],
+ __METHOD__
+ );
// Update old images
$dbw->update(
'oldimage',
[ 'oi_name' => $this->oldName ],
__METHOD__
);
-
- $affected = $dbw->affectedRows();
- $total = $this->oldCount;
- $status->successCount += $affected;
- // Bug 34934: $total is based on files that actually exist.
- // There may be more DB rows than such files, in which case $affected
- // can be greater than $total. We use max() to avoid negatives here.
- $status->failCount += max( 0, $total - $affected );
- if ( $status->failCount ) {
- $status->error( 'imageinvalidfilename' );
- }
-
- return $status;
}
/**