Merge "Revert "Don't check namespace in SpecialWantedtemplates""
[lhc/web/wiklou.git] / includes / deferred / SearchUpdate.php
1 <?php
2 /**
3 * Search index updater
4 *
5 * See deferred.txt
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 * http://www.gnu.org/copyleft/gpl.html
21 *
22 * @file
23 * @ingroup Search
24 */
25
26 /**
27 * Database independant search index updater
28 *
29 * @ingroup Search
30 */
31 class SearchUpdate implements DeferrableUpdate {
32 /** @var int Page id being updated */
33 private $id = 0;
34
35 /** @var Title Title we're updating */
36 private $title;
37
38 /** @var Content|bool Content of the page (not text) */
39 private $content;
40
41 /** @var WikiPage **/
42 private $page;
43
44 /**
45 * Constructor
46 *
47 * @param int $id Page id to update
48 * @param Title|string $title Title of page to update
49 * @param Content|string|bool $c Content of the page to update. Default: false.
50 * If a Content object, text will be gotten from it. String is for back-compat.
51 * Passing false tells the backend to just update the title, not the content
52 */
53 public function __construct( $id, $title, $c = false ) {
54 if ( is_string( $title ) ) {
55 $nt = Title::newFromText( $title );
56 } else {
57 $nt = $title;
58 }
59
60 if ( $nt ) {
61 $this->id = $id;
62 // is_string() check is back-compat for ApprovedRevs
63 if ( is_string( $c ) ) {
64 $this->content = new TextContent( $c );
65 } else {
66 $this->content = $c ?: false;
67 }
68 $this->title = $nt;
69 } else {
70 wfDebug( "SearchUpdate object created with invalid title '$title'\n" );
71 }
72 }
73
74 /**
75 * Perform actual update for the entry
76 */
77 public function doUpdate() {
78 global $wgDisableSearchUpdate;
79
80 if ( $wgDisableSearchUpdate || !$this->id ) {
81 return;
82 }
83
84 foreach ( SearchEngine::getSearchTypes() as $type ) {
85 $search = SearchEngine::create( $type );
86 if ( !$search->supports( 'search-update' ) ) {
87 continue;
88 }
89
90 $normalTitle = $this->getNormalizedTitle( $search );
91
92 if ( $this->getLatestPage() === null ) {
93 $search->delete( $this->id, $normalTitle );
94 continue;
95 } elseif ( $this->content === false ) {
96 $search->updateTitle( $this->id, $normalTitle );
97 continue;
98 }
99
100 $text = $search->getTextFromContent( $this->title, $this->content );
101 if ( !$search->textAlreadyUpdatedForIndex() ) {
102 $text = self::updateText( $text );
103 }
104
105 # Perform the actual update
106 $search->update( $this->id, $normalTitle, $search->normalizeText( $text ) );
107 }
108
109 }
110
111 /**
112 * Clean text for indexing. Only really suitable for indexing in databases.
113 * If you're using a real search engine, you'll probably want to override
114 * this behavior and do something nicer with the original wikitext.
115 * @param string $text
116 * @return string
117 */
118 public static function updateText( $text ) {
119 global $wgContLang;
120
121 # Language-specific strip/conversion
122 $text = $wgContLang->normalizeForSearch( $text );
123 $lc = SearchEngine::legalSearchChars() . '&#;';
124
125 $text = preg_replace( "/<\\/?\\s*[A-Za-z][^>]*?>/",
126 ' ', $wgContLang->lc( " " . $text . " " ) ); # Strip HTML markup
127 $text = preg_replace( "/(^|\\n)==\\s*([^\\n]+)\\s*==(\\s)/sD",
128 "\\1\\2 \\2 \\2\\3", $text ); # Emphasize headings
129
130 # Strip external URLs
131 $uc = "A-Za-z0-9_\\/:.,~%\\-+&;#?!=()@\\x80-\\xFF";
132 $protos = "http|https|ftp|mailto|news|gopher";
133 $pat = "/(^|[^\\[])({$protos}):[{$uc}]+([^{$uc}]|$)/";
134 $text = preg_replace( $pat, "\\1 \\3", $text );
135
136 $p1 = "/([^\\[])\\[({$protos}):[{$uc}]+]/";
137 $p2 = "/([^\\[])\\[({$protos}):[{$uc}]+\\s+([^\\]]+)]/";
138 $text = preg_replace( $p1, "\\1 ", $text );
139 $text = preg_replace( $p2, "\\1 \\3 ", $text );
140
141 # Internal image links
142 $pat2 = "/\\[\\[image:([{$uc}]+)\\.(gif|png|jpg|jpeg)([^{$uc}])/i";
143 $text = preg_replace( $pat2, " \\1 \\3", $text );
144
145 $text = preg_replace( "/([^{$lc}])([{$lc}]+)]]([a-z]+)/",
146 "\\1\\2 \\2\\3", $text ); # Handle [[game]]s
147
148 # Strip all remaining non-search characters
149 $text = preg_replace( "/[^{$lc}]+/", " ", $text );
150
151 /**
152 * Handle 's, s'
153 *
154 * $text = preg_replace( "/([{$lc}]+)'s /", "\\1 \\1's ", $text );
155 * $text = preg_replace( "/([{$lc}]+)s' /", "\\1s ", $text );
156 *
157 * These tail-anchored regexps are insanely slow. The worst case comes
158 * when Japanese or Chinese text (ie, no word spacing) is written on
159 * a wiki configured for Western UTF-8 mode. The Unicode characters are
160 * expanded to hex codes and the "words" are very long paragraph-length
161 * monstrosities. On a large page the above regexps may take over 20
162 * seconds *each* on a 1GHz-level processor.
163 *
164 * Following are reversed versions which are consistently fast
165 * (about 3 milliseconds on 1GHz-level processor).
166 */
167 $text = strrev( preg_replace( "/ s'([{$lc}]+)/", " s'\\1 \\1", strrev( $text ) ) );
168 $text = strrev( preg_replace( "/ 's([{$lc}]+)/", " s\\1", strrev( $text ) ) );
169
170 # Strip wiki '' and '''
171 $text = preg_replace( "/''[']*/", " ", $text );
172
173 return $text;
174 }
175
176 /**
177 * Get WikiPage for the SearchUpdate $id using WikiPage::READ_LATEST
178 * and ensure using the same WikiPage object if there are multiple
179 * SearchEngine types.
180 *
181 * Returns null if a page has been deleted or is not found.
182 *
183 * @return WikiPage|null
184 */
185 private function getLatestPage() {
186 if ( !isset( $this->page ) ) {
187 $this->page = WikiPage::newFromID( $this->id, WikiPage::READ_LATEST );
188 }
189
190 return $this->page;
191 }
192
193 /**
194 * Get a normalized string representation of a title suitable for
195 * including in a search index
196 *
197 * @param SearchEngine $search
198 * @return string A stripped-down title string ready for the search index
199 */
200 private function getNormalizedTitle( SearchEngine $search ) {
201 global $wgContLang;
202
203 $ns = $this->title->getNamespace();
204 $title = $this->title->getText();
205
206 $lc = $search->legalSearchChars() . '&#;';
207 $t = $wgContLang->normalizeForSearch( $title );
208 $t = preg_replace( "/[^{$lc}]+/", ' ', $t );
209 $t = $wgContLang->lc( $t );
210
211 # Handle 's, s'
212 $t = preg_replace( "/([{$lc}]+)'s( |$)/", "\\1 \\1's ", $t );
213 $t = preg_replace( "/([{$lc}]+)s'( |$)/", "\\1s ", $t );
214
215 $t = preg_replace( "/\\s+/", ' ', $t );
216
217 if ( $ns == NS_FILE ) {
218 $t = preg_replace( "/ (png|gif|jpg|jpeg|ogg)$/", "", $t );
219 }
220
221 return $search->normalizeText( trim( $t ) );
222 }
223 }