Merge "Expand core post edit functionality to match VE"
[lhc/web/wiklou.git] / maintenance / storage / compressOld.php
1 <?php
2 /**
3 * Compress the text of a wiki.
4 *
5 * Usage:
6 *
7 * Non-wikimedia
8 * php compressOld.php [options...]
9 *
10 * Wikimedia
11 * php compressOld.php <database> [options...]
12 *
13 * Options are:
14 * -t <type> set compression type to either:
15 * gzip: compress revisions independently
16 * concat: concatenate revisions and compress in chunks (default)
17 * -c <chunk-size> maximum number of revisions in a concat chunk
18 * -b <begin-date> earliest date to check for uncompressed revisions
19 * -e <end-date> latest revision date to compress
20 * -s <startid> the id to start from (referring to the text table for
21 * type gzip, and to the page table for type concat)
22 * -n <endid> the page_id to stop at (only when using concat compression type)
23 * --extdb <cluster> store specified revisions in an external cluster (untested)
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License along
36 * with this program; if not, write to the Free Software Foundation, Inc.,
37 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
38 * http://www.gnu.org/copyleft/gpl.html
39 *
40 * @file
41 * @ingroup Maintenance ExternalStorage
42 */
43
44 require_once __DIR__ . '/../Maintenance.php';
45
46 /**
47 * Maintenance script that compress the text of a wiki.
48 *
49 * @ingroup Maintenance ExternalStorage
50 */
51 class CompressOld extends Maintenance {
52 /**
53 * @todo document
54 */
55 const LS_INDIVIDUAL = 0;
56 const LS_CHUNKED = 1;
57
58 public function __construct() {
59 parent::__construct();
60 $this->mDescription = 'Compress the text of a wiki';
61 $this->addOption( 'type', 'Set compression type to either: gzip|concat', false, true, 't' );
62 $this->addOption(
63 'chunksize',
64 'Maximum number of revisions in a concat chunk',
65 false,
66 true,
67 'c'
68 );
69 $this->addOption(
70 'begin-date',
71 'Earliest date to check for uncompressed revisions',
72 false,
73 true,
74 'b'
75 );
76 $this->addOption( 'end-date', 'Latest revision date to compress', false, true, 'e' );
77 $this->addOption(
78 'startid',
79 'The id to start from (gzip -> text table, concat -> page table)',
80 false,
81 true,
82 's'
83 );
84 $this->addOption(
85 'extdb',
86 'Store specified revisions in an external cluster (untested)',
87 false,
88 true
89 );
90 $this->addOption(
91 'endid',
92 'The page_id to stop at (only when using concat compression type)',
93 false,
94 true,
95 'n'
96 );
97 }
98
99 public function execute() {
100 global $wgDBname;
101 if ( !function_exists( "gzdeflate" ) ) {
102 $this->error( "You must enable zlib support in PHP to compress old revisions!\n" .
103 "Please see http://www.php.net/manual/en/ref.zlib.php\n", true );
104 }
105
106 $type = $this->getOption( 'type', 'concat' );
107 $chunkSize = $this->getOption( 'chunksize', 20 );
108 $startId = $this->getOption( 'startid', 0 );
109 $beginDate = $this->getOption( 'begin-date', '' );
110 $endDate = $this->getOption( 'end-date', '' );
111 $extDB = $this->getOption( 'extdb', '' );
112 $endId = $this->getOption( 'endid', false );
113
114 if ( $type != 'concat' && $type != 'gzip' ) {
115 $this->error( "Type \"{$type}\" not supported" );
116 }
117
118 if ( $extDB != '' ) {
119 $this->output( "Compressing database {$wgDBname} to external cluster {$extDB}\n"
120 . str_repeat( '-', 76 ) . "\n\n" );
121 } else {
122 $this->output( "Compressing database {$wgDBname}\n"
123 . str_repeat( '-', 76 ) . "\n\n" );
124 }
125
126 $success = true;
127 if ( $type == 'concat' ) {
128 $success = $this->compressWithConcat( $startId, $chunkSize, $beginDate,
129 $endDate, $extDB, $endId );
130 } else {
131 $this->compressOldPages( $startId, $extDB );
132 }
133
134 if ( $success ) {
135 $this->output( "Done.\n" );
136 }
137 }
138
139 /** @todo document */
140 private function compressOldPages( $start = 0, $extdb = '' ) {
141 $chunksize = 50;
142 $this->output( "Starting from old_id $start...\n" );
143 $dbw = wfGetDB( DB_MASTER );
144 do {
145 $res = $dbw->select(
146 'text',
147 array( 'old_id', 'old_flags', 'old_text' ),
148 "old_id>=$start",
149 __METHOD__,
150 array( 'ORDER BY' => 'old_id', 'LIMIT' => $chunksize, 'FOR UPDATE' )
151 );
152
153 if ( $res->numRows() == 0 ) {
154 break;
155 }
156
157 $last = $start;
158
159 foreach ( $res as $row ) {
160 # print " {$row->old_id} - {$row->old_namespace}:{$row->old_title}\n";
161 $this->compressPage( $row, $extdb );
162 $last = $row->old_id;
163 }
164
165 $start = $last + 1; # Deletion may leave long empty stretches
166 $this->output( "$start...\n" );
167 } while ( true );
168 }
169
170 /**
171 * @todo document
172 * @param stdClass $row
173 * @param string $extdb
174 * @return bool
175 */
176 private function compressPage( $row, $extdb ) {
177 if ( false !== strpos( $row->old_flags, 'gzip' )
178 || false !== strpos( $row->old_flags, 'object' )
179 ) {
180 #print "Already compressed row {$row->old_id}\n";
181 return false;
182 }
183 $dbw = wfGetDB( DB_MASTER );
184 $flags = $row->old_flags ? "{$row->old_flags},gzip" : "gzip";
185 $compress = gzdeflate( $row->old_text );
186
187 # Store in external storage if required
188 if ( $extdb !== '' ) {
189 $storeObj = new ExternalStoreDB;
190 $compress = $storeObj->store( $extdb, $compress );
191 if ( $compress === false ) {
192 $this->error( "Unable to store object" );
193
194 return false;
195 }
196 }
197
198 # Update text row
199 $dbw->update( 'text',
200 array( /* SET */
201 'old_flags' => $flags,
202 'old_text' => $compress
203 ), array( /* WHERE */
204 'old_id' => $row->old_id
205 ), __METHOD__,
206 array( 'LIMIT' => 1 )
207 );
208
209 return true;
210 }
211
212 /**
213 * @param int $startId
214 * @param int $maxChunkSize
215 * @param string $beginDate
216 * @param string $endDate
217 * @param string $extdb
218 * @param bool|int $maxPageId
219 * @return bool
220 */
221 private function compressWithConcat( $startId, $maxChunkSize, $beginDate,
222 $endDate, $extdb = "", $maxPageId = false
223 ) {
224 $loadStyle = self::LS_CHUNKED;
225
226 $dbr = wfGetDB( DB_SLAVE );
227 $dbw = wfGetDB( DB_MASTER );
228
229 # Set up external storage
230 if ( $extdb != '' ) {
231 $storeObj = new ExternalStoreDB;
232 }
233
234 # Get all articles by page_id
235 if ( !$maxPageId ) {
236 $maxPageId = $dbr->selectField( 'page', 'max(page_id)', '', __METHOD__ );
237 }
238 $this->output( "Starting from $startId of $maxPageId\n" );
239 $pageConds = array();
240
241 /*
242 if ( $exclude_ns0 ) {
243 print "Excluding main namespace\n";
244 $pageConds[] = 'page_namespace<>0';
245 }
246 if ( $queryExtra ) {
247 $pageConds[] = $queryExtra;
248 }
249 */
250
251 # For each article, get a list of revisions which fit the criteria
252
253 # No recompression, use a condition on old_flags
254 # Don't compress object type entities, because that might produce data loss when
255 # overwriting bulk storage concat rows. Don't compress external references, because
256 # the script doesn't yet delete rows from external storage.
257 $conds = array(
258 'old_flags NOT ' . $dbr->buildLike( $dbr->anyString(), 'object', $dbr->anyString() )
259 . ' AND old_flags NOT '
260 . $dbr->buildLike( $dbr->anyString(), 'external', $dbr->anyString() )
261 );
262
263 if ( $beginDate ) {
264 if ( !preg_match( '/^\d{14}$/', $beginDate ) ) {
265 $this->error( "Invalid begin date \"$beginDate\"\n" );
266
267 return false;
268 }
269 $conds[] = "rev_timestamp>'" . $beginDate . "'";
270 }
271 if ( $endDate ) {
272 if ( !preg_match( '/^\d{14}$/', $endDate ) ) {
273 $this->error( "Invalid end date \"$endDate\"\n" );
274
275 return false;
276 }
277 $conds[] = "rev_timestamp<'" . $endDate . "'";
278 }
279 if ( $loadStyle == self::LS_CHUNKED ) {
280 $tables = array( 'revision', 'text' );
281 $fields = array( 'rev_id', 'rev_text_id', 'old_flags', 'old_text' );
282 $conds[] = 'rev_text_id=old_id';
283 $revLoadOptions = 'FOR UPDATE';
284 } else {
285 $tables = array( 'revision' );
286 $fields = array( 'rev_id', 'rev_text_id' );
287 $revLoadOptions = array();
288 }
289
290 # Don't work with current revisions
291 # Don't lock the page table for update either -- TS 2006-04-04
292 #$tables[] = 'page';
293 #$conds[] = 'page_id=rev_page AND rev_id != page_latest';
294
295 for ( $pageId = $startId; $pageId <= $maxPageId; $pageId++ ) {
296 wfWaitForSlaves();
297
298 # Wake up
299 $dbr->ping();
300
301 # Get the page row
302 $pageRes = $dbr->select( 'page',
303 array( 'page_id', 'page_namespace', 'page_title', 'page_latest' ),
304 $pageConds + array( 'page_id' => $pageId ), __METHOD__ );
305 if ( $pageRes->numRows() == 0 ) {
306 continue;
307 }
308 $pageRow = $dbr->fetchObject( $pageRes );
309
310 # Display progress
311 $titleObj = Title::makeTitle( $pageRow->page_namespace, $pageRow->page_title );
312 $this->output( "$pageId\t" . $titleObj->getPrefixedDBkey() . " " );
313
314 # Load revisions
315 $revRes = $dbw->select( $tables, $fields,
316 array_merge( array(
317 'rev_page' => $pageRow->page_id,
318 # Don't operate on the current revision
319 # Use < instead of <> in case the current revision has changed
320 # since the page select, which wasn't locking
321 'rev_id < ' . $pageRow->page_latest
322 ), $conds ),
323 __METHOD__,
324 $revLoadOptions
325 );
326 $revs = array();
327 foreach ( $revRes as $revRow ) {
328 $revs[] = $revRow;
329 }
330
331 if ( count( $revs ) < 2 ) {
332 # No revisions matching, no further processing
333 $this->output( "\n" );
334 continue;
335 }
336
337 # For each chunk
338 $i = 0;
339 while ( $i < count( $revs ) ) {
340 if ( $i < count( $revs ) - $maxChunkSize ) {
341 $thisChunkSize = $maxChunkSize;
342 } else {
343 $thisChunkSize = count( $revs ) - $i;
344 }
345
346 $chunk = new ConcatenatedGzipHistoryBlob();
347 $stubs = array();
348 $dbw->begin( __METHOD__ );
349 $usedChunk = false;
350 $primaryOldid = $revs[$i]->rev_text_id;
351
352 // @codingStandardsIgnoreStart Ignore avoid function calls in a FOR loop test part warning
353 # Get the text of each revision and add it to the object
354 for ( $j = 0; $j < $thisChunkSize && $chunk->isHappy(); $j++ ) {
355 // @codingStandardsIgnoreEnd
356 $oldid = $revs[$i + $j]->rev_text_id;
357
358 # Get text
359 if ( $loadStyle == self::LS_INDIVIDUAL ) {
360 $textRow = $dbw->selectRow( 'text',
361 array( 'old_flags', 'old_text' ),
362 array( 'old_id' => $oldid ),
363 __METHOD__,
364 'FOR UPDATE'
365 );
366 $text = Revision::getRevisionText( $textRow );
367 } else {
368 $text = Revision::getRevisionText( $revs[$i + $j] );
369 }
370
371 if ( $text === false ) {
372 $this->error( "\nError, unable to get text in old_id $oldid" );
373 #$dbw->delete( 'old', array( 'old_id' => $oldid ) );
374 }
375
376 if ( $extdb == "" && $j == 0 ) {
377 $chunk->setText( $text );
378 $this->output( '.' );
379 } else {
380 # Don't make a stub if it's going to be longer than the article
381 # Stubs are typically about 100 bytes
382 if ( strlen( $text ) < 120 ) {
383 $stub = false;
384 $this->output( 'x' );
385 } else {
386 $stub = new HistoryBlobStub( $chunk->addItem( $text ) );
387 $stub->setLocation( $primaryOldid );
388 $stub->setReferrer( $oldid );
389 $this->output( '.' );
390 $usedChunk = true;
391 }
392 $stubs[$j] = $stub;
393 }
394 }
395 $thisChunkSize = $j;
396
397 # If we couldn't actually use any stubs because the pages were too small, do nothing
398 if ( $usedChunk ) {
399 if ( $extdb != "" ) {
400 # Move blob objects to External Storage
401 $stored = $storeObj->store( $extdb, serialize( $chunk ) );
402 if ( $stored === false ) {
403 $this->error( "Unable to store object" );
404
405 return false;
406 }
407 # Store External Storage URLs instead of Stub placeholders
408 foreach ( $stubs as $stub ) {
409 if ( $stub === false ) {
410 continue;
411 }
412 # $stored should provide base path to a BLOB
413 $url = $stored . "/" . $stub->getHash();
414 $dbw->update( 'text',
415 array( /* SET */
416 'old_text' => $url,
417 'old_flags' => 'external,utf-8',
418 ), array( /* WHERE */
419 'old_id' => $stub->getReferrer(),
420 )
421 );
422 }
423 } else {
424 # Store the main object locally
425 $dbw->update( 'text',
426 array( /* SET */
427 'old_text' => serialize( $chunk ),
428 'old_flags' => 'object,utf-8',
429 ), array( /* WHERE */
430 'old_id' => $primaryOldid
431 )
432 );
433
434 # Store the stub objects
435 for ( $j = 1; $j < $thisChunkSize; $j++ ) {
436 # Skip if not compressing and don't overwrite the first revision
437 if ( $stubs[$j] !== false && $revs[$i + $j]->rev_text_id != $primaryOldid ) {
438 $dbw->update( 'text',
439 array( /* SET */
440 'old_text' => serialize( $stubs[$j] ),
441 'old_flags' => 'object,utf-8',
442 ), array( /* WHERE */
443 'old_id' => $revs[$i + $j]->rev_text_id
444 )
445 );
446 }
447 }
448 }
449 }
450 # Done, next
451 $this->output( "/" );
452 $dbw->commit( __METHOD__ );
453 $i += $thisChunkSize;
454 wfWaitForSlaves();
455 }
456 $this->output( "\n" );
457 }
458
459 return true;
460 }
461 }
462
463 $maintClass = 'CompressOld';
464 require_once RUN_MAINTENANCE_IF_MAIN;