(bug 19195) Make user IDs more readily available with the API
[lhc/web/wiklou.git] / includes / cache / HTMLCacheUpdate.php
1 <?php
2
3 /**
4 * Class to invalidate the HTML cache of all the pages linking to a given title.
5 * Small numbers of links will be done immediately, large numbers are pushed onto
6 * the job queue.
7 *
8 * This class is designed to work efficiently with small numbers of links, and
9 * to work reasonably well with up to ~10^5 links. Above ~10^6 links, the memory
10 * and time requirements of loading all backlinked IDs in doUpdate() might become
11 * prohibitive. The requirements measured at Wikimedia are approximately:
12 *
13 * memory: 48 bytes per row
14 * time: 16us per row for the query plus processing
15 *
16 * The reason this query is done is to support partitioning of the job
17 * by backlinked ID. The memory issue could be allieviated by doing this query in
18 * batches, but of course LIMIT with an offset is inefficient on the DB side.
19 *
20 * The class is nevertheless a vast improvement on the previous method of using
21 * File::getLinksTo() and Title::touchArray(), which uses about 2KB of memory per
22 * link.
23 *
24 * @ingroup Cache
25 */
26 class HTMLCacheUpdate implements DeferrableUpdate {
27 /**
28 * @var Title
29 */
30 public $mTitle;
31
32 public $mTable, $mPrefix, $mStart, $mEnd;
33 public $mRowsPerJob, $mRowsPerQuery;
34
35 /**
36 * @param $titleTo
37 * @param $table
38 * @param $start bool
39 * @param $end bool
40 */
41 function __construct( $titleTo, $table, $start = false, $end = false ) {
42 global $wgUpdateRowsPerJob, $wgUpdateRowsPerQuery;
43
44 $this->mTitle = $titleTo;
45 $this->mTable = $table;
46 $this->mStart = $start;
47 $this->mEnd = $end;
48 $this->mRowsPerJob = $wgUpdateRowsPerJob;
49 $this->mRowsPerQuery = $wgUpdateRowsPerQuery;
50 $this->mCache = $this->mTitle->getBacklinkCache();
51 }
52
53 public function doUpdate() {
54 if ( $this->mStart || $this->mEnd ) {
55 $this->doPartialUpdate();
56 return;
57 }
58
59 # Get an estimate of the number of rows from the BacklinkCache
60 $numRows = $this->mCache->getNumLinks( $this->mTable );
61 if ( $numRows > $this->mRowsPerJob * 2 ) {
62 # Do fast cached partition
63 $this->insertJobs();
64 } else {
65 # Get the links from the DB
66 $titleArray = $this->mCache->getLinks( $this->mTable );
67 # Check if the row count estimate was correct
68 if ( $titleArray->count() > $this->mRowsPerJob * 2 ) {
69 # Not correct, do accurate partition
70 wfDebug( __METHOD__.": row count estimate was incorrect, repartitioning\n" );
71 $this->insertJobsFromTitles( $titleArray );
72 } else {
73 $this->invalidateTitles( $titleArray );
74 }
75 }
76 }
77
78 /**
79 * Update some of the backlinks, defined by a page ID range
80 */
81 protected function doPartialUpdate() {
82 $titleArray = $this->mCache->getLinks( $this->mTable, $this->mStart, $this->mEnd );
83 if ( $titleArray->count() <= $this->mRowsPerJob * 2 ) {
84 # This partition is small enough, do the update
85 $this->invalidateTitles( $titleArray );
86 } else {
87 # Partitioning was excessively inaccurate. Divide the job further.
88 # This can occur when a large number of links are added in a short
89 # period of time, say by updating a heavily-used template.
90 $this->insertJobsFromTitles( $titleArray );
91 }
92 }
93
94 /**
95 * Partition the current range given by $this->mStart and $this->mEnd,
96 * using a pre-calculated title array which gives the links in that range.
97 * Queue the resulting jobs.
98 *
99 * @param $titleArray array
100 */
101 protected function insertJobsFromTitles( $titleArray ) {
102 # We make subpartitions in the sense that the start of the first job
103 # will be the start of the parent partition, and the end of the last
104 # job will be the end of the parent partition.
105 $jobs = array();
106 $start = $this->mStart; # start of the current job
107 $numTitles = 0;
108 foreach ( $titleArray as $title ) {
109 $id = $title->getArticleID();
110 # $numTitles is now the number of titles in the current job not
111 # including the current ID
112 if ( $numTitles >= $this->mRowsPerJob ) {
113 # Add a job up to but not including the current ID
114 $params = array(
115 'table' => $this->mTable,
116 'start' => $start,
117 'end' => $id - 1
118 );
119 $jobs[] = new HTMLCacheUpdateJob( $this->mTitle, $params );
120 $start = $id;
121 $numTitles = 0;
122 }
123 $numTitles++;
124 }
125 # Last job
126 $params = array(
127 'table' => $this->mTable,
128 'start' => $start,
129 'end' => $this->mEnd
130 );
131 $jobs[] = new HTMLCacheUpdateJob( $this->mTitle, $params );
132 wfDebug( __METHOD__.": repartitioning into " . count( $jobs ) . " jobs\n" );
133
134 if ( count( $jobs ) < 2 ) {
135 # I don't think this is possible at present, but handling this case
136 # makes the code a bit more robust against future code updates and
137 # avoids a potential infinite loop of repartitioning
138 wfDebug( __METHOD__.": repartitioning failed!\n" );
139 $this->invalidateTitles( $titleArray );
140 return;
141 }
142
143 Job::batchInsert( $jobs );
144 }
145
146 /**
147 * @return mixed
148 */
149 protected function insertJobs() {
150 $batches = $this->mCache->partition( $this->mTable, $this->mRowsPerJob );
151 if ( !$batches ) {
152 return;
153 }
154 $jobs = array();
155 foreach ( $batches as $batch ) {
156 $params = array(
157 'table' => $this->mTable,
158 'start' => $batch[0],
159 'end' => $batch[1],
160 );
161 $jobs[] = new HTMLCacheUpdateJob( $this->mTitle, $params );
162 }
163 Job::batchInsert( $jobs );
164 }
165
166 /**
167 * Invalidate an array (or iterator) of Title objects, right now
168 * @param $titleArray array
169 */
170 protected function invalidateTitles( $titleArray ) {
171 global $wgUseFileCache, $wgUseSquid;
172
173 $dbw = wfGetDB( DB_MASTER );
174 $timestamp = $dbw->timestamp();
175
176 # Get all IDs in this query into an array
177 $ids = array();
178 foreach ( $titleArray as $title ) {
179 $ids[] = $title->getArticleID();
180 }
181
182 if ( !$ids ) {
183 return;
184 }
185
186 # Update page_touched
187 $batches = array_chunk( $ids, $this->mRowsPerQuery );
188 foreach ( $batches as $batch ) {
189 $dbw->update( 'page',
190 array( 'page_touched' => $timestamp ),
191 array( 'page_id' => $batch ),
192 __METHOD__
193 );
194 }
195
196 # Update squid
197 if ( $wgUseSquid ) {
198 $u = SquidUpdate::newFromTitles( $titleArray );
199 $u->doUpdate();
200 }
201
202 # Update file cache
203 if ( $wgUseFileCache ) {
204 foreach ( $titleArray as $title ) {
205 HTMLFileCache::clearFileCache( $title );
206 }
207 }
208 }
209 }
210
211
212 /**
213 * Job wrapper for HTMLCacheUpdate. Gets run whenever a related
214 * job gets called from the queue.
215 *
216 * @ingroup JobQueue
217 */
218 class HTMLCacheUpdateJob extends Job {
219 var $table, $start, $end;
220
221 /**
222 * Construct a job
223 * @param $title Title: the title linked to
224 * @param $params Array: job parameters (table, start and end page_ids)
225 * @param $id Integer: job id
226 */
227 function __construct( $title, $params, $id = 0 ) {
228 parent::__construct( 'htmlCacheUpdate', $title, $params, $id );
229 $this->table = $params['table'];
230 $this->start = $params['start'];
231 $this->end = $params['end'];
232 }
233
234 public function run() {
235 $update = new HTMLCacheUpdate( $this->title, $this->table, $this->start, $this->end );
236 $update->doUpdate();
237 return true;
238 }
239 }