Merge "CologneBlue rewrite: rework quickbar() once again"
[lhc/web/wiklou.git] / includes / filebackend / FileOpBatch.php
1 <?php
2 /**
3 * Helper class for representing batch file operations.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 * @ingroup FileBackend
22 * @author Aaron Schulz
23 */
24
25 /**
26 * Helper class for representing batch file operations.
27 * Do not use this class from places outside FileBackend.
28 *
29 * Methods should avoid throwing exceptions at all costs.
30 *
31 * @ingroup FileBackend
32 * @since 1.20
33 */
34 class FileOpBatch {
35 /* Timeout related parameters */
36 const MAX_BATCH_SIZE = 1000; // integer
37
38 /**
39 * Attempt to perform a series of file operations.
40 * Callers are responsible for handling file locking.
41 *
42 * $opts is an array of options, including:
43 * - force : Errors that would normally cause a rollback do not.
44 * The remaining operations are still attempted if any fail.
45 * - allowStale : Don't require the latest available data.
46 * This can increase performance for non-critical writes.
47 * This has no effect unless the 'force' flag is set.
48 * - nonJournaled : Don't log this operation batch in the file journal.
49 * - concurrency : Try to do this many operations in parallel when possible.
50 *
51 * The resulting Status will be "OK" unless:
52 * - a) unexpected operation errors occurred (network partitions, disk full...)
53 * - b) significant operation errors occurred and 'force' was not set
54 *
55 * @param $performOps Array List of FileOp operations
56 * @param $opts Array Batch operation options
57 * @param $journal FileJournal Journal to log operations to
58 * @return Status
59 */
60 public static function attempt( array $performOps, array $opts, FileJournal $journal ) {
61 wfProfileIn( __METHOD__ );
62 $status = Status::newGood();
63
64 $n = count( $performOps );
65 if ( $n > self::MAX_BATCH_SIZE ) {
66 $status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE );
67 wfProfileOut( __METHOD__ );
68 return $status;
69 }
70
71 $batchId = $journal->getTimestampedUUID();
72 $allowStale = !empty( $opts['allowStale'] );
73 $ignoreErrors = !empty( $opts['force'] );
74 $journaled = empty( $opts['nonJournaled'] );
75 $maxConcurrency = isset( $opts['concurrency'] ) ? $opts['concurrency'] : 1;
76
77 $entries = array(); // file journal entry list
78 $predicates = FileOp::newPredicates(); // account for previous ops in prechecks
79 $curBatch = array(); // concurrent FileOp sub-batch accumulation
80 $curBatchDeps = FileOp::newDependencies(); // paths used in FileOp sub-batch
81 $pPerformOps = array(); // ordered list of concurrent FileOp sub-batches
82 $lastBackend = null; // last op backend name
83 // Do pre-checks for each operation; abort on failure...
84 foreach ( $performOps as $index => $fileOp ) {
85 $backendName = $fileOp->getBackend()->getName();
86 $fileOp->setBatchId( $batchId ); // transaction ID
87 $fileOp->allowStaleReads( $allowStale ); // consistency level
88 // Decide if this op can be done concurrently within this sub-batch
89 // or if a new concurrent sub-batch must be started after this one...
90 if ( $fileOp->dependsOn( $curBatchDeps )
91 || count( $curBatch ) >= $maxConcurrency
92 || ( $backendName !== $lastBackend && count( $curBatch ) )
93 ) {
94 $pPerformOps[] = $curBatch; // push this batch
95 $curBatch = array(); // start a new sub-batch
96 $curBatchDeps = FileOp::newDependencies();
97 }
98 $lastBackend = $backendName;
99 $curBatch[$index] = $fileOp; // keep index
100 // Update list of affected paths in this batch
101 $curBatchDeps = $fileOp->applyDependencies( $curBatchDeps );
102 // Simulate performing the operation...
103 $oldPredicates = $predicates;
104 $subStatus = $fileOp->precheck( $predicates ); // updates $predicates
105 $status->merge( $subStatus );
106 if ( $subStatus->isOK() ) {
107 if ( $journaled ) { // journal log entries
108 $entries = array_merge( $entries,
109 $fileOp->getJournalEntries( $oldPredicates, $predicates ) );
110 }
111 } else { // operation failed?
112 $status->success[$index] = false;
113 ++$status->failCount;
114 if ( !$ignoreErrors ) {
115 wfProfileOut( __METHOD__ );
116 return $status; // abort
117 }
118 }
119 }
120 // Push the last sub-batch
121 if ( count( $curBatch ) ) {
122 $pPerformOps[] = $curBatch;
123 }
124
125 // Log the operations in the file journal...
126 if ( count( $entries ) ) {
127 $subStatus = $journal->logChangeBatch( $entries, $batchId );
128 if ( !$subStatus->isOK() ) {
129 wfProfileOut( __METHOD__ );
130 return $subStatus; // abort
131 }
132 }
133
134 if ( $ignoreErrors ) { // treat precheck() fatals as mere warnings
135 $status->setResult( true, $status->value );
136 }
137
138 // Attempt each operation (in parallel if allowed and possible)...
139 self::runParallelBatches( $pPerformOps, $status );
140
141 wfProfileOut( __METHOD__ );
142 return $status;
143 }
144
145 /**
146 * Attempt a list of file operations sub-batches in series.
147 *
148 * The operations *in* each sub-batch will be done in parallel.
149 * The caller is responsible for making sure the operations
150 * within any given sub-batch do not depend on each other.
151 * This will abort remaining ops on failure.
152 *
153 * @param $pPerformOps Array
154 * @param $status Status
155 * @return bool Success
156 */
157 protected static function runParallelBatches( array $pPerformOps, Status $status ) {
158 $aborted = false; // set to true on unexpected errors
159 foreach ( $pPerformOps as $performOpsBatch ) {
160 if ( $aborted ) { // check batch op abort flag...
161 // We can't continue (even with $ignoreErrors) as $predicates is wrong.
162 // Log the remaining ops as failed for recovery...
163 foreach ( $performOpsBatch as $i => $fileOp ) {
164 $performOpsBatch[$i]->logFailure( 'attempt_aborted' );
165 }
166 continue;
167 }
168 $statuses = array();
169 $opHandles = array();
170 // Get the backend; all sub-batch ops belong to a single backend
171 $backend = reset( $performOpsBatch )->getBackend();
172 // Get the operation handles or actually do it if there is just one.
173 // If attemptAsync() returns a Status, it was either due to an error
174 // or the backend does not support async ops and did it synchronously.
175 foreach ( $performOpsBatch as $i => $fileOp ) {
176 if ( !$fileOp->failed() ) { // failed => already has Status
177 // If the batch is just one operation, it's faster to avoid
178 // pipelining as that can involve creating new TCP connections.
179 $subStatus = ( count( $performOpsBatch ) > 1 )
180 ? $fileOp->attemptAsync()
181 : $fileOp->attempt();
182 if ( $subStatus->value instanceof FileBackendStoreOpHandle ) {
183 $opHandles[$i] = $subStatus->value; // deferred
184 } else {
185 $statuses[$i] = $subStatus; // done already
186 }
187 }
188 }
189 // Try to do all the operations concurrently...
190 $statuses = $statuses + $backend->executeOpHandlesInternal( $opHandles );
191 // Marshall and merge all the responses (blocking)...
192 foreach ( $performOpsBatch as $i => $fileOp ) {
193 if ( !$fileOp->failed() ) { // failed => already has Status
194 $subStatus = $statuses[$i];
195 $status->merge( $subStatus );
196 if ( $subStatus->isOK() ) {
197 $status->success[$i] = true;
198 ++$status->successCount;
199 } else {
200 $status->success[$i] = false;
201 ++$status->failCount;
202 $aborted = true; // set abort flag; we can't continue
203 }
204 }
205 }
206 }
207 return $status;
208 }
209 }