Merge "Made LinksUpdate::updateLinksTimestamp() use a more correct timestamp"
[lhc/web/wiklou.git] / includes / deferred / SearchUpdate.php
1 <?php
2 /**
3 * Search index updater
4 *
5 * See deferred.txt
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 * http://www.gnu.org/copyleft/gpl.html
21 *
22 * @file
23 * @ingroup Search
24 */
25
26 /**
27 * Database independant search index updater
28 *
29 * @ingroup Search
30 */
31 class SearchUpdate implements DeferrableUpdate {
32 /** @var int Page id being updated */
33 private $id = 0;
34
35 /** @var Title Title we're updating */
36 private $title;
37
38 /** @var Content|false Content of the page (not text) */
39 private $content;
40
41 /**
42 * Constructor
43 *
44 * @param int $id Page id to update
45 * @param Title|string $title Title of page to update
46 * @param Content|string|bool $c Content of the page to update. Default: false.
47 * If a Content object, text will be gotten from it. String is for back-compat.
48 * Passing false tells the backend to just update the title, not the content
49 */
50 public function __construct( $id, $title, $c = false ) {
51 if ( is_string( $title ) ) {
52 $nt = Title::newFromText( $title );
53 } else {
54 $nt = $title;
55 }
56
57 if ( $nt ) {
58 $this->id = $id;
59 // is_string() check is back-compat for ApprovedRevs
60 if ( is_string( $c ) ) {
61 $this->content = new TextContent( $c );
62 } else {
63 $this->content = $c ?: false;
64 }
65 $this->title = $nt;
66 } else {
67 wfDebug( "SearchUpdate object created with invalid title '$title'\n" );
68 }
69 }
70
71 /**
72 * Perform actual update for the entry
73 */
74 public function doUpdate() {
75 global $wgDisableSearchUpdate;
76
77 if ( $wgDisableSearchUpdate || !$this->id ) {
78 return;
79 }
80
81 wfProfileIn( __METHOD__ );
82
83 $page = WikiPage::newFromId( $this->id, WikiPage::READ_LATEST );
84 $indexTitle = Title::indexTitle( $this->title->getNamespace(), $this->title->getText() );
85
86 foreach ( SearchEngine::getSearchTypes() as $type ) {
87 $search = SearchEngine::create( $type );
88 if ( !$search->supports( 'search-update' ) ) {
89 continue;
90 }
91
92 $normalTitle = $search->normalizeText( $indexTitle );
93
94 if ( $page === null ) {
95 $search->delete( $this->id, $normalTitle );
96 continue;
97 } elseif ( $this->content === false ) {
98 $search->updateTitle( $this->id, $normalTitle );
99 continue;
100 }
101
102 $text = $search->getTextFromContent( $this->title, $this->content );
103 if ( !$search->textAlreadyUpdatedForIndex() ) {
104 $text = self::updateText( $text );
105 }
106
107 # Perform the actual update
108 $search->update( $this->id, $normalTitle, $search->normalizeText( $text ) );
109 }
110
111 wfProfileOut( __METHOD__ );
112 }
113
114 /**
115 * Clean text for indexing. Only really suitable for indexing in databases.
116 * If you're using a real search engine, you'll probably want to override
117 * this behavior and do something nicer with the original wikitext.
118 */
119 public static function updateText( $text ) {
120 global $wgContLang;
121
122 # Language-specific strip/conversion
123 $text = $wgContLang->normalizeForSearch( $text );
124 $lc = SearchEngine::legalSearchChars() . '&#;';
125
126 wfProfileIn( __METHOD__ . '-regexps' );
127 $text = preg_replace( "/<\\/?\\s*[A-Za-z][^>]*?>/",
128 ' ', $wgContLang->lc( " " . $text . " " ) ); # Strip HTML markup
129 $text = preg_replace( "/(^|\\n)==\\s*([^\\n]+)\\s*==(\\s)/sD",
130 "\\1\\2 \\2 \\2\\3", $text ); # Emphasize headings
131
132 # Strip external URLs
133 $uc = "A-Za-z0-9_\\/:.,~%\\-+&;#?!=()@\\x80-\\xFF";
134 $protos = "http|https|ftp|mailto|news|gopher";
135 $pat = "/(^|[^\\[])({$protos}):[{$uc}]+([^{$uc}]|$)/";
136 $text = preg_replace( $pat, "\\1 \\3", $text );
137
138 $p1 = "/([^\\[])\\[({$protos}):[{$uc}]+]/";
139 $p2 = "/([^\\[])\\[({$protos}):[{$uc}]+\\s+([^\\]]+)]/";
140 $text = preg_replace( $p1, "\\1 ", $text );
141 $text = preg_replace( $p2, "\\1 \\3 ", $text );
142
143 # Internal image links
144 $pat2 = "/\\[\\[image:([{$uc}]+)\\.(gif|png|jpg|jpeg)([^{$uc}])/i";
145 $text = preg_replace( $pat2, " \\1 \\3", $text );
146
147 $text = preg_replace( "/([^{$lc}])([{$lc}]+)]]([a-z]+)/",
148 "\\1\\2 \\2\\3", $text ); # Handle [[game]]s
149
150 # Strip all remaining non-search characters
151 $text = preg_replace( "/[^{$lc}]+/", " ", $text );
152
153 # Handle 's, s'
154 #
155 # $text = preg_replace( "/([{$lc}]+)'s /", "\\1 \\1's ", $text );
156 # $text = preg_replace( "/([{$lc}]+)s' /", "\\1s ", $text );
157 #
158 # These tail-anchored regexps are insanely slow. The worst case comes
159 # when Japanese or Chinese text (ie, no word spacing) is written on
160 # a wiki configured for Western UTF-8 mode. The Unicode characters are
161 # expanded to hex codes and the "words" are very long paragraph-length
162 # monstrosities. On a large page the above regexps may take over 20
163 # seconds *each* on a 1GHz-level processor.
164 #
165 # Following are reversed versions which are consistently fast
166 # (about 3 milliseconds on 1GHz-level processor).
167 #
168 $text = strrev( preg_replace( "/ s'([{$lc}]+)/", " s'\\1 \\1", strrev( $text ) ) );
169 $text = strrev( preg_replace( "/ 's([{$lc}]+)/", " s\\1", strrev( $text ) ) );
170
171 # Strip wiki '' and '''
172 $text = preg_replace( "/''[']*/", " ", $text );
173 wfProfileOut( __METHOD__ . '-regexps' );
174
175 return $text;
176 }
177 }