<?php
# Copyright (C) 2003 Brion Vibber <brion@pobox.com>
# http://www.mediawiki.org/
-#
+#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# http://www.gnu.org/copyleft/gpl.html
/**
*
- * @package MediaWiki
- * @subpackage SpecialPage
+ * @addtogroup SpecialPage
*/
-/** */
-require_once( 'Revision.php' );
+function wfExportGetPagesFromCategory( $title ) {
+ global $wgContLang;
-/**
- *
- */
-function wfSpecialExport( $page = '' ) {
- global $wgOut, $wgLang, $wgRequest;
-
- if( $wgRequest->getVal( 'action' ) == 'submit') {
- $page = $wgRequest->getText( 'pages' );
- $curonly = $wgRequest->getCheck( 'curonly' );
- } else {
- # Pre-check the 'current version only' box in the UI
- $curonly = true;
- }
-
- if( $page != '' ) {
- $wgOut->disable();
- header( "Content-type: application/xml; charset=utf-8" );
- $pages = explode( "\n", $page );
-
- $db =& wfGetDB( DB_SLAVE );
- $history = $curonly ? MW_EXPORT_CURRENT : MW_EXPORT_FULL;
- $exporter = new WikiExporter( $db, $history );
- $exporter->openStream();
- $exporter->pagesByName( $pages );
- $exporter->closeStream();
- return;
+ $name = $title->getDBkey();
+
+ $dbr = wfGetDB( DB_SLAVE );
+
+ list( $page, $categorylinks ) = $dbr->tableNamesN( 'page', 'categorylinks' );
+ $sql = "SELECT page_namespace, page_title FROM $page " .
+ "JOIN $categorylinks ON cl_from = page_id " .
+ "WHERE cl_to = " . $dbr->addQuotes( $name );
+
+ $pages = array();
+ $res = $dbr->query( $sql, 'wfExportGetPagesFromCategory' );
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ $n = $row->page_title;
+ if ($row->page_namespace) {
+ $ns = $wgContLang->getNsText( $row->page_namespace );
+ $n = $ns . ':' . $n;
+ }
+
+ $pages[] = $n;
}
-
- $wgOut->addWikiText( wfMsg( "exporttext" ) );
- $titleObj = Title::makeTitle( NS_SPECIAL, "Export" );
- $action = $titleObj->escapeLocalURL( 'action=submit' );
- $wgOut->addHTML( "
-<form method='post' action=\"$action\">
-<input type='hidden' name='action' value='submit' />
-<textarea name='pages' cols='40' rows='10'></textarea><br />
-<label><input type='checkbox' name='curonly' value='true' checked='checked' />
-" . wfMsg( "exportcuronly" ) . "</label><br />
-<input type='submit' />
-</form>
-" );
+ $dbr->freeResult($res);
+
+ return $pages;
}
-define( 'MW_EXPORT_FULL', 0 );
-define( 'MW_EXPORT_CURRENT', 1 );
+/**
+ * Expand a list of pages to include templates used in those pages.
+ * @param $inputPages array, list of titles to look up
+ * @param $pageSet array, associative array indexed by titles for output
+ * @return array associative array index by titles
+ */
+function wfExportGetTemplates( $inputPages, $pageSet ) {
+ return wfExportGetLinks( $inputPages, $pageSet,
+ 'templatelinks',
+ array( 'tl_namespace AS namespace', 'tl_title AS title' ),
+ array( 'page_id=tl_from' ) );
+}
-define( 'MW_EXPORT_BUFFER', 0 );
-define( 'MW_EXPORT_STREAM', 1 );
+/**
+ * Expand a list of pages to include images used in those pages.
+ * @param $inputPages array, list of titles to look up
+ * @param $pageSet array, associative array indexed by titles for output
+ * @return array associative array index by titles
+ */
+function wfExportGetImages( $inputPages, $pageSet ) {
+ return wfExportGetLinks( $inputPages, $pageSet,
+ 'imagelinks',
+ array( NS_IMAGE . ' AS namespace', 'il_to AS title' ),
+ array( 'page_id=il_from' ) );
+}
/**
- * @package MediaWiki
- * @subpackage SpecialPage
+ * Expand a list of pages to include items used in those pages.
+ * @private
*/
-class WikiExporter {
- var $pageCallback = null;
- var $revCallback = null;
-
- /**
- * If using MW_EXPORT_STREAM to stream a large amount of data,
- * provide a database connection which is not managed by
- * LoadBalancer to read from: some history blob types will
- * make additional queries to pull source data while the
- * main query is still running.
- *
- * @param Database $db
- * @param int $history one of MW_EXPORT_FULL or MW_EXPORT_CURRENT
- * @param int $buffer one of MW_EXPORT_BUFFER or MW_EXPORT_STREAM
- */
- function WikiExporter( &$db, $history = MW_EXPORT_CURRENT,
- $buffer = MW_EXPORT_BUFFER ) {
- $this->db =& $db;
- $this->history = $history;
- $this->buffer = $buffer;
- }
-
- /**
- * Set a callback to be called after each page in the output
- * stream is closed. The callback will be passed a database row
- * object with the last revision output.
- *
- * A set callback can be removed by passing null here.
- *
- * @param mixed $callback
- */
- function setPageCallback( $callback ) {
- $this->pageCallback = $callback;
- }
-
- /**
- * Set a callback to be called after each revision in the output
- * stream is closed. The callback will be passed a database row
- * object with the revision data.
- *
- * A set callback can be removed by passing null here.
- *
- * @param mixed $callback
- */
- function setRevCallback( $callback ) {
- $this->revCallback = $callback;
- }
-
- /**
- * Opens the XML output stream's root <mediawiki> element.
- * This does not include an xml directive, so is safe to include
- * as a subelement in a larger XML stream. Namespace and XML Schema
- * references are included.
- *
- * To capture the stream to a string, use PHP's output buffering
- * functions. Output will be encoded in UTF-8.
- */
- function openStream() {
- global $wgContLanguageCode;
- print wfElement( 'mediawiki', array(
- 'xmlns' => 'http://www.mediawiki.org/xml/export-0.1/',
- 'xmlns:xsi' => 'http://www.w3.org/2001/XMLSchema-instance',
- 'xsi:schemaLocation' => 'http://www.mediawiki.org/xml/export-0.1/ ' .
- 'http://www.mediawiki.org/xml/export-0.1.xsd',
- 'version' => '0.1',
- 'xml:lang' => $wgContLanguageCode ),
- null ) . "\n";
- }
-
- /**
- * Closes the output stream with the closing root element.
- * Call when finished dumping things.
- */
- function closeStream() {
- print "</mediawiki>\n";
- }
-
- /**
- * Dumps a series of page and revision records for all pages
- * in the database, either including complete history or only
- * the most recent version.
- *
- *
- * @param Database $db
- */
- function allPages() {
- return $this->dumpFrom( '' );
- }
-
- /**
- * @param Title $title
- */
- function pageByTitle( $title ) {
- return $this->dumpFrom(
- 'page_namespace=' . $title->getNamespace() .
- ' AND page_title=' . $this->db->addQuotes( $title->getDbKey() ) );
- }
-
- function pageByName( $name ) {
- $title = Title::newFromText( $name );
- if( is_null( $title ) ) {
- return WikiError( "Can't export invalid title" );
- } else {
- return $this->pageByTitle( $title );
+function wfExportGetLinks( $inputPages, $pageSet, $table, $fields, $join ) {
+ $dbr = wfGetDB( DB_SLAVE );
+ foreach( $inputPages as $page ) {
+ $title = Title::newFromText( $page );
+ if( $title ) {
+ $pageSet[$title->getPrefixedText()] = true;
+ /// @fixme May or may not be more efficient to batch these
+ /// by namespace when given multiple input pages.
+ $result = $dbr->select(
+ array( 'page', $table ),
+ $fields,
+ array_merge( $join,
+ array(
+ 'page_namespace' => $title->getNamespace(),
+ 'page_title' => $title->getDbKey() ) ),
+ __METHOD__ );
+ foreach( $result as $row ) {
+ $template = Title::makeTitle( $row->namespace, $row->title );
+ $pageSet[$template->getPrefixedText()] = true;
+ }
}
}
-
- function pagesByName( $names ) {
- foreach( $names as $name ) {
- $this->pageByName( $name );
+ return $pageSet;
+}
+
+/**
+ * Callback function to remove empty strings from the pages array.
+ */
+function wfFilterPage( $page ) {
+ return $page !== '' && $page !== null;
+}
+
+/**
+ *
+ */
+function wfSpecialExport( $page = '' ) {
+ global $wgOut, $wgRequest, $wgSitename, $wgExportAllowListContributors;
+ global $wgExportAllowHistory, $wgExportMaxHistory;
+
+ $curonly = true;
+ $doexport = false;
+
+ if ( $wgRequest->getCheck( 'addcat' ) ) {
+ $page = $wgRequest->getText( 'pages' );
+ $catname = $wgRequest->getText( 'catname' );
+
+ if ( $catname !== '' && $catname !== NULL && $catname !== false ) {
+ $t = Title::makeTitleSafe( NS_CATEGORY, $catname );
+ if ( $t ) {
+ /**
+ * @fixme This can lead to hitting memory limit for very large
+ * categories. Ideally we would do the lookup synchronously
+ * during the export in a single query.
+ */
+ $catpages = wfExportGetPagesFromCategory( $t );
+ if ( $catpages ) $page .= "\n" . implode( "\n", $catpages );
+ }
}
}
-
-
- // -------------------- private implementation below --------------------
-
- function dumpFrom( $cond = '' ) {
- $fname = 'WikiExporter::dumpFrom';
- wfProfileIn( $fname );
-
- $page = $this->db->tableName( 'page' );
- $revision = $this->db->tableName( 'revision' );
- $text = $this->db->tableName( 'text' );
-
- if( $this->history == MW_EXPORT_FULL ) {
- $join = 'page_id=rev_page';
- } elseif( $this->history == MW_EXPORT_CURRENT ) {
- $join = 'page_id=rev_page AND page_latest=rev_id';
+ else if( $wgRequest->wasPosted() && $page == '' ) {
+ $page = $wgRequest->getText( 'pages' );
+ $curonly = $wgRequest->getCheck( 'curonly' );
+ $rawOffset = $wgRequest->getVal( 'offset' );
+ if( $rawOffset ) {
+ $offset = wfTimestamp( TS_MW, $rawOffset );
} else {
- wfProfileOut( $fname );
- return new WikiError( "$fname given invalid history dump type." );
- }
- $where = ( $cond == '' ) ? '' : "$cond AND";
-
- if( $this->buffer == MW_EXPORT_STREAM ) {
- $prev = $this->db->bufferResults( false );
+ $offset = null;
}
- $result = $this->db->query(
- "SELECT * FROM
- $page FORCE INDEX (PRIMARY),
- $revision FORCE INDEX(page_timestamp),
- $text
- WHERE $where $join AND rev_text_id=old_id
- ORDER BY page_id", $fname );
- $wrapper = $this->db->resultObject( $result );
- $this->outputStream( $wrapper );
-
- if( $this->buffer == MW_EXPORT_STREAM ) {
- $this->db->bufferResults( $prev );
- }
-
- wfProfileOut( $fname );
- }
-
- /**
- * Runs through a query result set dumping page and revision records.
- * The result set should be sorted/grouped by page to avoid duplicate
- * page records in the output.
- *
- * The result set will be freed once complete. Should be safe for
- * streaming (non-buffered) queries, as long as it was made on a
- * separate database connection not managed by LoadBalancer; some
- * blob storage types will make queries to pull source data.
- *
- * @param ResultWrapper $resultset
- * @access private
- */
- function outputStream( $resultset ) {
- $last = null;
- while( $row = $resultset->fetchObject() ) {
- if( is_null( $last ) ||
- $last->page_namespace != $row->page_namespace ||
- $last->page_title != $row->page_title ) {
- if( isset( $last ) ) {
- $this->closePage( $last );
- }
- $this->openPage( $row );
- $last = $row;
+ $limit = $wgRequest->getInt( 'limit' );
+ $dir = $wgRequest->getVal( 'dir' );
+ $history = array(
+ 'dir' => 'asc',
+ 'offset' => false,
+ 'limit' => $wgExportMaxHistory,
+ );
+ $historyCheck = $wgRequest->getCheck( 'history' );
+ if ( $curonly ) {
+ $history = WikiExporter::CURRENT;
+ } elseif ( !$historyCheck ) {
+ if ( $limit > 0 && $limit < $wgExportMaxHistory ) {
+ $history['limit'] = $limit;
+ }
+ if ( !is_null( $offset ) ) {
+ $history['offset'] = $offset;
+ }
+ if ( strtolower( $dir ) == 'desc' ) {
+ $history['dir'] = 'desc';
}
- $this->dumpRev( $row );
- }
- if( isset( $last ) ) {
- $this->closePage( $last );
}
- $resultset->free();
- }
-
- /**
- * Opens a <page> section on the output stream, with data
- * from the given database row.
- *
- * @param object $row
- * @access private
- */
- function openPage( $row ) {
- print "<page>\n";
- $title = Title::makeTitle( $row->page_namespace, $row->page_title );
- print ' ' . wfElementClean( 'title', array(), $title->getPrefixedText() ) . "\n";
- print ' ' . wfElement( 'id', array(), $row->page_id ) . "\n";
- if( '' != $row->page_restrictions ) {
- print ' ' . wfElement( 'restrictions', array(),
- $row->page_restrictions ) . "\n";
+
+ if( $page != '' ) $doexport = true;
+ } else {
+ // Default to current-only for GET requests
+ $page = $wgRequest->getText( 'pages', $page );
+ $historyCheck = $wgRequest->getCheck( 'history' );
+ if( $historyCheck ) {
+ $history = WikiExporter::FULL;
+ } else {
+ $history = WikiExporter::CURRENT;
}
+
+ if( $page != '' ) $doexport = true;
}
-
- /**
- * Closes a <page> section on the output stream.
- * If a per-page callback has been set, it will be called
- * and passed the last database row used for this page.
- *
- * @param object $row
- * @access private
- */
- function closePage( $row ) {
- print "</page>\n";
- if( isset( $this->pageCallback ) ) {
- call_user_func( $this->pageCallback, $row );
- }
+
+ if( !$wgExportAllowHistory ) {
+ // Override
+ $history = WikiExporter::CURRENT;
}
-
- /**
- * Dumps a <revision> section on the output stream, with
- * data filled in from the given database row.
- *
- * @param object $row
- * @access private
- */
- function dumpRev( $row ) {
- $fname = 'WikiExporter::dumpRev';
- wfProfileIn( $fname );
-
- print " <revision>\n";
- print " " . wfElement( 'id', null, $row->rev_id ) . "\n";
-
- $ts = wfTimestamp2ISO8601( $row->rev_timestamp );
- print " " . wfElement( 'timestamp', null, $ts ) . "\n";
-
- print " <contributor>";
- if( $row->rev_user ) {
- print wfElementClean( 'username', null, $row->rev_user_text );
- print wfElement( 'id', null, $row->rev_user );
- } else {
- print wfElementClean( 'ip', null, $row->rev_user_text );
+
+ $list_authors = $wgRequest->getCheck( 'listauthors' );
+ if ( !$curonly || !$wgExportAllowListContributors ) $list_authors = false ;
+
+ if ( $doexport ) {
+ $wgOut->disable();
+
+ // Cancel output buffering and gzipping if set
+ // This should provide safer streaming for pages with history
+ wfResetOutputBuffers();
+ header( "Content-type: application/xml; charset=utf-8" );
+ if( $wgRequest->getCheck( 'wpDownload' ) ) {
+ // Provide a sane filename suggestion
+ $filename = urlencode( $wgSitename . '-' . wfTimestampNow() . '.xml' );
+ $wgRequest->response()->header( "Content-disposition: attachment;filename={$filename}" );
}
- print "</contributor>\n";
-
- if( $row->rev_minor_edit ) {
- print " <minor/>\n";
+
+ /* Split up the input and look up linked pages */
+ $inputPages = array_filter( explode( "\n", $page ), 'wfFilterPage' );
+ $pageSet = array_flip( $inputPages );
+
+ if( $wgRequest->getCheck( 'templates' ) ) {
+ $pageSet = wfExportGetTemplates( $inputPages, $pageSet );
}
- if( $row->rev_comment != '' ) {
- print " " . wfElementClean( 'comment', null, $row->rev_comment ) . "\n";
+
+ /*
+ // Enable this when we can do something useful exporting/importing image information. :)
+ if( $wgRequest->getCheck( 'images' ) ) {
+ $pageSet = wfExportGetImages( $inputPages, $pageSet );
}
-
- $text = Revision::getRevisionText( $row );
- print " " . wfElementClean( 'text', array(), $text ) . "\n";
- print " </revision>\n";
-
- wfProfileOut( $fname );
-
- if( isset( $this->revCallback ) ) {
- call_user_func( $this->revCallback, $row );
+ */
+
+ $pages = array_keys( $pageSet );
+
+ /* Ok, let's get to it... */
+
+ $db = wfGetDB( DB_SLAVE );
+ $exporter = new WikiExporter( $db, $history );
+ $exporter->list_authors = $list_authors ;
+ $exporter->openStream();
+
+ foreach( $pages as $page ) {
+ /*
+ if( $wgExportMaxHistory && !$curonly ) {
+ $title = Title::newFromText( $page );
+ if( $title ) {
+ $count = Revision::countByTitle( $db, $title );
+ if( $count > $wgExportMaxHistory ) {
+ wfDebug( __FUNCTION__ .
+ ": Skipped $page, $count revisions too big\n" );
+ continue;
+ }
+ }
+ }*/
+
+ #Bug 8824: Only export pages the user can read
+ $title = Title::newFromText( $page );
+ if( is_null( $title ) ) continue; #TODO: perhaps output an <error> tag or something.
+ if( !$title->userCanRead() ) continue; #TODO: perhaps output an <error> tag or something.
+
+ $exporter->pageByTitle( $title );
}
+
+ $exporter->closeStream();
+ return;
}
-}
+ $self = SpecialPage::getTitleFor( 'Export' );
+ $wgOut->addHtml( wfMsgExt( 'exporttext', 'parse' ) );
-function wfTimestamp2ISO8601( $ts ) {
- #2003-08-05T18:30:02Z
- return preg_replace( '/^(....)(..)(..)(..)(..)(..)$/', '$1-$2-$3T$4:$5:$6Z', $ts );
-}
+ $form = Xml::openElement( 'form', array( 'method' => 'post',
+ 'action' => $self->getLocalUrl( 'action=submit' ) ) );
-function xmlsafe( $string ) {
- $fname = 'xmlsafe';
- wfProfileIn( $fname );
-
- /**
- * The page may contain old data which has not been properly normalized.
- * Invalid UTF-8 sequences or forbidden control characters will make our
- * XML output invalid, so be sure to strip them out.
- */
- $string = UtfNormal::cleanUp( $string );
-
- $string = htmlspecialchars( $string );
- wfProfileOut( $fname );
- return $string;
-}
+ $form .= Xml::inputLabel( wfMsg( 'export-addcattext' ) , 'catname', 'catname', 40 ) . ' ';
+ $form .= Xml::submitButton( wfMsg( 'export-addcat' ), array( 'name' => 'addcat' ) ) . '<br />';
+
+ $form .= Xml::openElement( 'textarea', array( 'name' => 'pages', 'cols' => 40, 'rows' => 10 ) );
+ $form .= htmlspecialchars( $page );
+ $form .= Xml::closeElement( 'textarea' );
+ $form .= '<br />';
+
+ if( $wgExportAllowHistory ) {
+ $form .= Xml::checkLabel( wfMsg( 'exportcuronly' ), 'curonly', 'curonly', true ) . '<br />';
+ } else {
+ $wgOut->addHtml( wfMsgExt( 'exportnohistory', 'parse' ) );
+ }
+ $form .= Xml::checkLabel( wfMsg( 'export-templates' ), 'templates', 'wpExportTemplates', false ) . '<br />';
+ // Enable this when we can do something useful exporting/importing image information. :)
+ //$form .= Xml::checkLabel( wfMsg( 'export-images' ), 'images', 'wpExportImages', false ) . '<br />';
+ $form .= Xml::checkLabel( wfMsg( 'export-download' ), 'wpDownload', 'wpDownload', true ) . '<br />';
-?>
+ $form .= Xml::submitButton( wfMsg( 'export-submit' ) );
+ $form .= Xml::closeElement( 'form' );
+ $wgOut->addHtml( $form );
+}