<ruleset name="MediaWiki">
<rule ref="./vendor/mediawiki/mediawiki-codesniffer/MediaWiki">
<exclude name="Generic.ControlStructures.InlineControlStructure" />
+ <exclude name="MediaWiki.Commenting.FunctionAnnotations.UnrecognizedAnnotation" />
<exclude name="MediaWiki.Commenting.FunctionComment.MissingDocumentationProtected" />
<exclude name="MediaWiki.Commenting.FunctionComment.MissingDocumentationPublic" />
<exclude name="MediaWiki.Commenting.FunctionComment.MissingParamTag" />
<exclude name="MediaWiki.Commenting.IllegalSingleLineComment.IllegalSingleLineCommentStart" />
<exclude name="MediaWiki.Commenting.IllegalSingleLineComment.IllegalSingleLineCommentEnd" />
<exclude name="MediaWiki.ControlStructures.AssignmentInControlStructures.AssignmentInControlStructures" />
+ <exclude name="MediaWiki.Commenting.MissingCovers.MissingCovers" />
<exclude name="MediaWiki.NamingConventions.LowerCamelFunctionsName.FunctionName" />
- <exclude name="MediaWiki.WhiteSpace.SpaceBeforeSingleLineComment.NewLineComment" />
- <exclude name="MediaWiki.WhiteSpace.SpaceBeforeSingleLineComment.SingleSpaceBeforeSingleLineComment" />
<exclude name="MediaWiki.Usage.DbrQueryUsage.DbrQueryFound" />
<exclude name="MediaWiki.Usage.ExtendClassUsage.FunctionVarUsage" />
+ <exclude name="MediaWiki.Usage.ForbiddenFunctions.passthru" />
<exclude name="MediaWiki.VariableAnalysis.ForbiddenGlobalVariables.ForbiddenGlobal$wgTitle" />
+ <exclude name="MediaWiki.WhiteSpace.SpaceBeforeSingleLineComment.NewLineComment" />
+ <exclude name="MediaWiki.WhiteSpace.SpaceBeforeSingleLineComment.SingleSpaceBeforeSingleLineComment" />
<exclude name="Squiz.Scope.MethodScope.Missing" />
<exclude name="Squiz.Scope.MemberVarScope.Missing" />
- <exclude name="MediaWiki.Commenting.MissingCovers.MissingCovers" />
</rule>
<rule ref="MediaWiki.NamingConventions.PrefixedGlobalFunctions">
<properties>
<property name="ignoreList" type="array" value="$IP,$messageMemc" />
</properties>
</rule>
+ <rule ref="MediaWiki.NamingConventions.ValidGlobalName.allowedPrefix">
+ <exclude-pattern>profileinfo\.php</exclude-pattern>
+ <exclude-pattern>maintenance/language/checkLanguage\.inc</exclude-pattern>
+ <exclude-pattern>maintenance/doMaintenance\.php</exclude-pattern>
+ <exclude-pattern>maintenance/mergeMessageFileList\.php</exclude-pattern>
+ <exclude-pattern>maintenance/commandLine\.inc</exclude-pattern>
+ </rule>
<rule ref="Generic.Files.LineLength">
<exclude-pattern>*/languages/messages/Messages*\.php</exclude-pattern>
</rule>
MediaWiki is developed and tested mainly on Unix/Linux platforms, but should
work on Windows as well.
-If your PHP is configured as a CGI plug-in rather than an Apache module you may
-experience problems, as this configuration is not well tested.
-
Support for rendering mathematical formulas requires installing the Math extension,
see https://www.mediawiki.org/wiki/Special:MyLanguage/Extension:Math
* The archive table's ar_rev_id field is now unique.
* Special:BotPasswords now requires reauthentication.
* (T194414) The default watchlist view time has been increased from 3 to 7 days.
+* The right to edit sitewide Javascript (e.g. MediaWiki:Common.js), CSS or JSON
+ was separated from 'editinterface' and is available under
+ 'editsitejs'/'editsitecss'/'editsitejson'. Having 'editinterface' is still
+ necessary to edit such pages.
+* A new user group, 'interface-admin', is added for controlling access to
+ sitewide CSS/JS (and editing other users' CSS/JS). No other group has
+ 'editsitecss', 'editusercss', 'editsitejs' or 'edituserjs' by default.
+* A new grant group, 'editsiteconfig', is added for granting the above rights.
=== New features in 1.32 ===
* (T112474) Generalized the ResourceLoader mechanism for overriding modules
* Added new 'OutputPageAfterGetHeadLinksArray' hook, allowing extensions to
modify the return value of OutputPage#getHeadLinksArray in order to add,
remove or otherwise alter the elements to be output in the page <head>.
+* (T28934) The 'HistoryPageToolLinks' hook allows extensions to append
+ additional links to the subtitle of a history page.
=== External library changes in 1.32 ===
* …
* Updated wikimedia/scoped-callback from 1.0.0 to 2.0.0.
** ScopedCallback objects can no longer be serialized.
* Updated wikimedia/wrappedstring from 2.3.0 to 3.0.1.
+* Updated mediawiki/mediawiki-codesniffer from v20.0.0 to v21.0.0.
+* Updated composer/spdx-licenses from 1.3.0 to 1.4.0.
==== New external libraries ====
* Added wikimedia/xmp-reader 0.5.1
Set $wgShowExceptionDetails and/or $wgShowHostnames instead.
* The $wgShowDBErrorBacktrace global is deprecated and nonfunctional.
Set $wgShowExceptionDetails instead.
+* Public access to the DifferenceEngine properties mOldid, mNewid, mOldPage,
+ mNewPage, mOldContent, mNewContent, mRevisionsLoaded, mTextLoaded and
+ mCacheHit is deprecated. Use getOldid() / getNewid() for the first two,
+ do your own lookup for page/content. mNewRev / mOldRev remains public.
+* The $wgExternalDiffEngine value 'wikidiff2' is deprecated. To use wikidiff2
+ just enable the PHP extension, and it will be autodetected.
=== Other changes in 1.32 ===
* (T198811) The following tables have had their UNIQUE indexes turned into
'DumpStringOutput' => __DIR__ . '/includes/export/DumpStringOutput.php',
'DumpUploads' => __DIR__ . '/maintenance/dumpUploads.php',
'DuplicateJob' => __DIR__ . '/includes/jobqueue/jobs/DuplicateJob.php',
+ 'EasyDeflate' => __DIR__ . '/includes/libs/EasyDeflate.php',
'EditAction' => __DIR__ . '/includes/actions/EditAction.php',
'EditCLI' => __DIR__ . '/maintenance/edit.php',
'EditPage' => __DIR__ . '/includes/EditPage.php',
},
"require-dev": {
"cache/integration-tests": "0.16.0",
- "composer/spdx-licenses": "1.3.0",
+ "composer/spdx-licenses": "1.4.0",
"hamcrest/hamcrest-php": "^2.0",
"jakub-onderka/php-parallel-lint": "0.9.2",
- "jetbrains/phpstorm-stubs": "dev-master#1b9906084d6635456fcf3f3a01f0d7d5b99a578a",
+ "jetbrains/phpstorm-stubs": "dev-master#38ff1a581b297f7901e961b8c923862ea80c3b96",
"justinrainbow/json-schema": "~5.2",
- "mediawiki/mediawiki-codesniffer": "20.0.0",
+ "mediawiki/mediawiki-codesniffer": "21.0.0",
"monolog/monolog": "~1.22.1",
"nikic/php-parser": "3.1.3",
"seld/jsonlint": "1.7.1",
$prevRev: Revision object, next in line in page history, or null
$user: Current user object
+'HistoryPageToolLinks': Add one or more links to revision history page subtitle.
+$context: IContextSource (object)
+$linkRenderer: LinkRenderer instance
+&$links: Array of HTML strings
+
'HTMLFileCache::useFileCache': Override whether a page should be cached in file
cache.
$context: An IContextSource object with information about the request being
* just that it was. If you want to change this, you can set $wgImgAuthDetails to 'true'
* in localsettings.php and it will give the user the reason why access was denied.
*
- * Your server needs to support PATH_INFO; CGI-based configurations usually don't.
+ * Your server needs to support REQUEST_URI or PATH_INFO; CGI-based
+ * configurations sometimes don't.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
'PasswordCannotMatchUsername' => true,
'PasswordCannotBePopular' => 25,
],
+ 'interface-admin' => [
+ 'MinimalPasswordLength' => 8,
+ 'MinimumPasswordLengthToLogin' => 1,
+ 'PasswordCannotMatchUsername' => true,
+ 'PasswordCannotBePopular' => 25,
+ ],
'bot' => [
'MinimalPasswordLength' => 8,
'MinimumPasswordLengthToLogin' => 1,
$wgGroupPermissions['sysop']['deletedtext'] = true;
$wgGroupPermissions['sysop']['undelete'] = true;
$wgGroupPermissions['sysop']['editinterface'] = true;
-$wgGroupPermissions['sysop']['editusercss'] = true;
+$wgGroupPermissions['sysop']['editsitejson'] = true;
$wgGroupPermissions['sysop']['edituserjson'] = true;
-$wgGroupPermissions['sysop']['edituserjs'] = true;
$wgGroupPermissions['sysop']['import'] = true;
$wgGroupPermissions['sysop']['importupload'] = true;
$wgGroupPermissions['sysop']['move'] = true;
$wgGroupPermissions['sysop']['managechangetags'] = true;
$wgGroupPermissions['sysop']['deletechangetags'] = true;
+$wgGroupPermissions['interface-admin']['editinterface'] = true;
+$wgGroupPermissions['interface-admin']['editsitecss'] = true;
+$wgGroupPermissions['interface-admin']['editsitejson'] = true;
+$wgGroupPermissions['interface-admin']['editsitejs'] = true;
+$wgGroupPermissions['interface-admin']['editusercss'] = true;
+$wgGroupPermissions['interface-admin']['edituserjson'] = true;
+$wgGroupPermissions['interface-admin']['edituserjs'] = true;
+
// Permission to change users' group assignments
$wgGroupPermissions['bureaucrat']['userrights'] = true;
$wgGroupPermissions['bureaucrat']['noratelimit'] = true;
$wgGrantPermissions['editinterface'] = $wgGrantPermissions['editpage'];
$wgGrantPermissions['editinterface']['editinterface'] = true;
-$wgGrantPermissions['editinterface']['editusercss'] = true;
$wgGrantPermissions['editinterface']['edituserjson'] = true;
-$wgGrantPermissions['editinterface']['edituserjs'] = true;
+$wgGrantPermissions['editinterface']['editsitejson'] = true;
+
+$wgGrantPermissions['editsiteconfig'] = $wgGrantPermissions['editinterface'];
+$wgGrantPermissions['editsiteconfig']['editusercss'] = true;
+$wgGrantPermissions['editsiteconfig']['edituserjs'] = true;
+$wgGrantPermissions['editsiteconfig']['editsitecss'] = true;
+$wgGrantPermissions['editsiteconfig']['editsitejs'] = true;
$wgGrantPermissions['createeditmovepage'] = $wgGrantPermissions['editpage'];
$wgGrantPermissions['createeditmovepage']['createpage'] = true;
'editmyoptions' => 'customization',
'editinterface' => 'administration',
+ 'editsiteconfig' => 'administration',
'rollback' => 'administration',
'blockusers' => 'administration',
'delete' => 'administration',
* Returns the revision that was current at the time editing was initiated on the client,
* even if the edit was based on an old revision.
*
- * @warning: this method is very poorly named. If the user opened the form with ?oldid=X,
+ * @warning this method is very poorly named. If the user opened the form with ?oldid=X,
* one might think of X as the "base revision", which is NOT what this returns,
* see oldid for that. One might further assume that this corresponds to the $baseRevId
* parameter of WikiPage::doEditContent, which is not the case either.
// Give grep a chance to find the usages:
// grant-blockusers, grant-createeditmovepage, grant-delete,
// grant-editinterface, grant-editmycssjs, grant-editmywatchlist,
- // grant-editpage, grant-editprotected, grant-highvolume,
- // grant-oversight, grant-patrol, grant-protect, grant-rollback,
- // grant-sendemail, grant-uploadeditmovefile, grant-uploadfile,
- // grant-basic, grant-viewdeleted, grant-viewmywatchlist,
- // grant-createaccount
+ // grant-editsiteconfig, grant-editpage, grant-editprotected,
+ // grant-highvolume, grant-oversight, grant-patrol, grant-protect,
+ // grant-rollback, grant-sendemail, grant-uploadeditmovefile,
+ // grant-uploadfile, grant-basic, grant-viewdeleted,
+ // grant-viewmywatchlist, grant-createaccount
$msg = wfMessage( "grant-$grant" );
if ( $lang !== null ) {
if ( is_string( $lang ) ) {
/**
* Set the title of the revision
*
- * @deprecated: since 1.31, this is now a noop. Pass the Title to the constructor instead.
+ * @deprecated since 1.31, this is now a noop. Pass the Title to the constructor instead.
*
* @param Title $title
*/
* used to determine the content model to use. If no title is know, CONTENT_MODEL_WIKITEXT
* is used as a last resort.
*
- * @todo: drop this, with MCR, there no longer is a single model associated with a revision.
+ * @todo drop this, with MCR, there no longer is a single model associated with a revision.
*
* @return string The content model id associated with this revision,
* see the CONTENT_MODEL_XXX constants.
* If no content format was stored in the database, the default format for this
* revision's content model is returned.
*
- * @todo: drop this, the format is irrelevant to the revision!
+ * @todo drop this, the format is irrelevant to the revision!
*
* @return string The content format id associated with this revision,
* see the CONTENT_FORMAT_XXX constants.
/**
* Returns a list of status messages of the given type (or all if false)
*
- * @note: this handles RawMessage poorly
+ * @note this handles RawMessage poorly
*
* @param string|bool $type
* @return array
*
* @see docs/pageupdater.txt for more information on when thie method can and should be called.
*
- * @note: Calling this method more than once with the same $slotsUpdate
+ * @note Calling this method more than once with the same $slotsUpdate
* has no effect. Calling this method multiple times with different content will cause
* an exception.
*
- * @note: Calling this method after prepareUpdate() has been called will cause an exception.
+ * @note Calling this method after prepareUpdate() has been called will cause an exception.
*
* @param User $user The user to act as context for pre-save transformation (PST).
* Type hint should be reduced to UserIdentity at some point.
/**
* Whether the edit created, or should create, a new revision (that is, it's not a null-edit).
*
- * @warning: at present, "null-revisions" that do not change content but do have a revision
+ * @warning at present, "null-revisions" that do not change content but do have a revision
* record would return false after prepareContent(), but true after prepareUpdate()!
* This should probably be fixed.
*
*
* @see docs/pageupdater.txt for more information on when thie method can and should be called.
*
- * @note: Calling this method more than once with the same revision has no effect.
+ * @note Calling this method more than once with the same revision has no effect.
* $options are only used for the first call. Calling this method multiple times with
* different revisions will cause an exception.
*
- * @note: If grabCurrentRevision() (or prepareContent()) has been called before
+ * @note If grabCurrentRevision() (or prepareContent()) has been called before
* calling this method, $revision->getParentRevision() has to refer to the revision that
* was the current revision at the time grabCurrentRevision() was called.
*
/**
* Computes the total nominal size of the revision's slots, in bogo-bytes.
*
- * @warn This is potentially expensive! It may cause all slot's content to be loaded
+ * @warning This is potentially expensive! It may cause all slot's content to be loaded
* and deserialized.
*
* @return int
* is that slot's hash. For consistency, the combined hash of an empty set of slots
* is the hash of the empty string.
*
- * @warn This is potentially expensive! It may cause all slot's content to be loaded
+ * @warning This is potentially expensive! It may cause all slot's content to be loaded
* and deserialized, then re-serialized and hashed.
*
* @return string
* Such revisions can for instance identify page rename
* operations and other such meta-modifications.
*
- * @note: This method grabs a FOR UPDATE lock on the relevant row of the page table,
+ * @note This method grabs a FOR UPDATE lock on the relevant row of the page table,
* to prevent a new revision from being inserted before the null revision has been written
* to the database.
*
public static function newFromTextThrow( $text, $defaultNamespace = NS_MAIN ) {
if ( is_object( $text ) ) {
throw new MWException( '$text must be a string, given an object' );
+ } elseif ( $text === null ) {
+ // Legacy code relies on MalformedTitleException being thrown in this case
+ // (happens when URL with no title in it is parsed). TODO fix
+ throw new MalformedTitleException( 'title-invalid-empty' );
}
$titleCache = self::getTitleCache();
*/
public function isSiteConfigPage() {
return (
- NS_MEDIAWIKI == $this->mNamespace
- && (
- $this->hasContentModel( CONTENT_MODEL_CSS )
- || $this->hasContentModel( CONTENT_MODEL_JSON )
- || $this->hasContentModel( CONTENT_MODEL_JAVASCRIPT )
- )
+ $this->isSiteCssConfigPage()
+ || $this->isSiteJsonConfigPage()
+ || $this->isSiteJsConfigPage()
);
}
*/
public function isUserConfigPage() {
return (
- NS_USER == $this->mNamespace
- && $this->isSubpage()
- && (
- $this->hasContentModel( CONTENT_MODEL_CSS )
- || $this->hasContentModel( CONTENT_MODEL_JSON )
- || $this->hasContentModel( CONTENT_MODEL_JAVASCRIPT )
- )
+ $this->isUserCssConfigPage()
+ || $this->isUserJsonConfigPage()
+ || $this->isUserJsConfigPage()
);
}
return $this->isUserJsConfigPage();
}
+ /**
+ * Is this a sitewide CSS "config" page?
+ *
+ * @return bool
+ * @since 1.32
+ */
+ public function isSiteCssConfigPage() {
+ return (
+ NS_MEDIAWIKI == $this->mNamespace
+ && (
+ $this->hasContentModel( CONTENT_MODEL_CSS )
+ // paranoia - a MediaWiki: namespace page with mismatching extension and content
+ // model is probably by mistake and might get handled incorrectly (see e.g. T112937)
+ || substr( $this->getDBkey(), -4 ) === '.css'
+ )
+ );
+ }
+
+ /**
+ * Is this a sitewide JSON "config" page?
+ *
+ * @return bool
+ * @since 1.32
+ */
+ public function isSiteJsonConfigPage() {
+ return (
+ NS_MEDIAWIKI == $this->mNamespace
+ && (
+ $this->hasContentModel( CONTENT_MODEL_JSON )
+ // paranoia - a MediaWiki: namespace page with mismatching extension and content
+ // model is probably by mistake and might get handled incorrectly (see e.g. T112937)
+ || substr( $this->getDBkey(), -5 ) === '.json'
+ )
+ );
+ }
+
+ /**
+ * Is this a sitewide JS "config" page?
+ *
+ * @return bool
+ * @since 1.31
+ */
+ public function isSiteJsConfigPage() {
+ return (
+ NS_MEDIAWIKI == $this->mNamespace
+ && (
+ $this->hasContentModel( CONTENT_MODEL_JAVASCRIPT )
+ // paranoia - a MediaWiki: namespace page with mismatching extension and content
+ // model is probably by mistake and might get handled incorrectly (see e.g. T112937)
+ || substr( $this->getDBkey(), -3 ) === '.js'
+ )
+ );
+ }
+
/**
* Is this a talk page of some sort?
*
return $errors;
}
+ /**
+ * Check sitewide CSS/JSON/JS permissions
+ *
+ * @param string $action The action to check
+ * @param User $user User to check
+ * @param array $errors List of current errors
+ * @param string $rigor Same format as Title::getUserPermissionsErrors()
+ * @param bool $short Short circuit on first error
+ *
+ * @return array List of errors
+ */
+ private function checkSiteConfigPermissions( $action, $user, $errors, $rigor, $short ) {
+ if ( $action != 'patrol' ) {
+ // Sitewide CSS/JSON/JS changes, like all NS_MEDIAWIKI changes, also require the
+ // editinterface right. That's implemented as a restriction so no check needed here.
+ if ( $this->isSiteCssConfigPage() && !$user->isAllowed( 'editsitecss' ) ) {
+ $errors[] = [ 'sitecssprotected', $action ];
+ } elseif ( $this->isSiteJsonConfigPage() && !$user->isAllowed( 'editsitejson' ) ) {
+ $errors[] = [ 'sitejsonprotected', $action ];
+ } elseif ( $this->isSiteJsConfigPage() && !$user->isAllowed( 'editsitejs' ) ) {
+ $errors[] = [ 'sitejsprotected', $action ];
+ }
+ }
+
+ return $errors;
+ }
+
/**
* Check CSS/JSON/JS sub-page permissions
*
'checkReadPermissions',
'checkUserBlock', // for wgBlockDisablesLogin
];
- # Don't call checkSpecialsAndNSPermissions or checkUserConfigPermissions
- # here as it will lead to duplicate error messages. This is okay to do
- # since anywhere that checks for create will also check for edit, and
- # those checks are called for edit.
+ # Don't call checkSpecialsAndNSPermissions, checkSiteConfigPermissions
+ # or checkUserConfigPermissions here as it will lead to duplicate
+ # error messages. This is okay to do since anywhere that checks for
+ # create will also check for edit, and those checks are called for edit.
} elseif ( $action == 'create' ) {
$checks = [
'checkQuickPermissions',
'checkQuickPermissions',
'checkPermissionHooks',
'checkSpecialsAndNSPermissions',
+ 'checkSiteConfigPermissions',
'checkUserConfigPermissions',
'checkPageRestrictions',
'checkCascadingSourcesRestrictions',
protected function getDescription() {
// Creation of a subtitle link pointing to [[Special:Log]]
- return MediaWikiServices::getInstance()->getLinkRenderer()->makeKnownLink(
+ $linkRenderer = MediaWikiServices::getInstance()->getLinkRenderer();
+ $subtitle = $linkRenderer->makeKnownLink(
SpecialPage::getTitleFor( 'Log' ),
$this->msg( 'viewpagelogs' )->text(),
[],
[ 'page' => $this->getTitle()->getPrefixedText() ]
);
+
+ $links = [];
+ // Allow extensions to add more links
+ Hooks::run( 'HistoryPageToolLinks', [ $this->getContext(), $linkRenderer, &$links ] );
+ if ( $links ) {
+ $subtitle .= ''
+ . $this->msg( 'word-separator' )->escaped()
+ . $this->msg( 'parentheses' )
+ ->rawParams( $this->getLanguage()->pipeList( $links ) )
+ ->escaped();
+ }
+ return $subtitle;
}
/**
/**
* Update link tables with outgoing links from an updated article
*
- * @note: this is managed by DeferredUpdates::execute(). Do not run this in a transaction.
+ * @note this is managed by DeferredUpdates::execute(). Do not run this in a transaction.
*/
public function doUpdate() {
if ( $this->ticket ) {
* @ingroup DifferenceEngine
*/
class DifferenceEngine extends ContextSource {
+
+ use DeprecationHelper;
+
/**
* Constant to indicate diff cache compatibility.
* Bump this when changing the diff formatting in a way that
const DIFF_VERSION = '1.12';
/** @var int Revision ID or 0 for current */
- public $mOldid;
+ protected $mOldid;
/** @var int|string Revision ID or null for current or an alias such as 'next' */
- public $mNewid;
+ protected $mNewid;
private $mOldTags;
private $mNewTags;
/** @var Content|null */
- public $mOldContent;
+ protected $mOldContent;
/** @var Content|null */
- public $mNewContent;
+ protected $mNewContent;
/** @var Language */
protected $mDiffLang;
/** @var Title */
- public $mOldPage;
+ protected $mOldPage;
/** @var Title */
- public $mNewPage;
+ protected $mNewPage;
/** @var Revision|null */
public $mOldRev;
private $mRevisionsIdsLoaded = false;
/** @var bool Have the revisions been loaded */
- public $mRevisionsLoaded = false;
+ protected $mRevisionsLoaded = false;
/** @var int How many text blobs have been loaded, 0, 1 or 2? */
- public $mTextLoaded = 0;
+ protected $mTextLoaded = 0;
/**
* Was the content overridden via setContent()?
protected $isContentOverridden = false;
/** @var bool Was the diff fetched from cache? */
- public $mCacheHit = false;
+ protected $mCacheHit = false;
/**
* Set this to true to add debug info to the HTML output.
public function __construct( $context = null, $old = 0, $new = 0, $rcid = 0,
$refreshCache = false, $unhide = false
) {
+ $this->deprecatePublicProperty( 'mOldid', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mNewid', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mOldPage', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mNewPage', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mOldContent', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mNewContent', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mRevisionsLoaded', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mTextLoaded', '1.32', __CLASS__ );
+ $this->deprecatePublicProperty( 'mCacheHit', '1.32', __CLASS__ );
+
if ( $context instanceof IContextSource ) {
$this->setContext( $context );
}
wfDeprecated( "\$wgExternalDiffEngine = '{$wgExternalDiffEngine}'", '1.27' );
$wgExternalDiffEngine = false;
} elseif ( $wgExternalDiffEngine == 'wikidiff2' ) {
- // Same as above, but with no deprecation warnings
+ wfDeprecated( "\$wgExternalDiffEngine = '{$wgExternalDiffEngine}'", '1.32' );
$wgExternalDiffEngine = false;
} elseif ( !is_string( $wgExternalDiffEngine ) && $wgExternalDiffEngine !== false ) {
// And prevent people from shooting themselves in the foot...
$dropdownInputAttribs['classes'] = [ $this->mClass ];
}
+ $disabled = false;
+ if ( isset( $this->mParams[ 'disabled' ] ) && $this->mParams[ 'disabled' ] ) {
+ $disabled = true;
+ }
+
return $this->getInputWidget( [
'id' => $this->mID,
+ 'disabled' => $disabled,
'textinput' => $textAttribs,
'dropdowninput' => $dropdownInputAttribs,
'or' => false,
$textAttribs['placeholder'] = $this->mPlaceholder;
}
+ $disabled = false;
+ if ( isset( $this->mParams[ 'disabled' ] ) && $this->mParams[ 'disabled' ] ) {
+ $disabled = true;
+ }
+
return $this->getInputWidget( [
'id' => $this->mID,
+ 'disabled' => $disabled,
'textinput' => $textAttribs,
'dropdowninput' => $dropdownAttribs,
'or' => true,
}
/**
- * Create the first user account, grant it sysop and bureaucrat rights
+ * Create the first user account, grant it sysop, bureaucrat and interface-admin rights
*
* @return Status
*/
$user->addGroup( 'sysop' );
$user->addGroup( 'bureaucrat' );
+ $user->addGroup( 'interface-admin' );
if ( $this->getVar( '_AdminEmail' ) ) {
$user->setEmail( $this->getVar( '_AdminEmail' ) );
}
[ 'migrateComments' ],
// 1.31
- [ 'addTable', 'slots', 'patch-slots.sql' ],
- [ 'addField', 'slots', 'slot_origin', 'patch-slot-origin.sql' ],
- [ 'addTable', 'content', 'patch-content.sql' ],
[ 'addTable', 'slot_roles', 'patch-slot_roles.sql' ],
[ 'addTable', 'content_models', 'patch-content_models.sql' ],
+ [ 'addTable', 'content', 'patch-content.sql' ],
+ [ 'addTable', 'slots', 'patch-slots.sql' ],
+ [ 'addField', 'slots', 'slot_origin', 'patch-slot-origin.sql' ],
[ 'migrateArchiveText' ],
[ 'addTable', 'actor', 'patch-actor-table.sql' ],
[ 'migrateActors' ],
$status = $upp->checkUserPasswordForGroups(
$user,
$pwd,
- [ 'bureaucrat', 'sysop' ] // per Installer::createSysop()
+ [ 'bureaucrat', 'sysop', 'interface-admin' ] // per Installer::createSysop()
);
$valid = $status->isGood() ? true : $status->getMessage();
} else {
--- /dev/null
+<?php
+/**
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/**
+ * Server-side helper for the easy-deflate library
+ *
+ * @since 1.32
+ */
+class EasyDeflate {
+
+ /**
+ * Whether the content is deflated
+ *
+ * @param string $data
+ *
+ * @return bool
+ */
+ public static function isDeflated( $data ) {
+ return substr( $data, 0, 11 ) === 'rawdeflate,';
+ }
+
+ /**
+ * For content that has been compressed with deflate in the client,
+ * try to uncompress it with inflate.
+ *
+ * If data is not prefixed with 'rawdeflate,' it will be returned unmodified.
+ *
+ * Data can be compressed in the client using the 'easy-deflate.deflate'
+ * module:
+ *
+ * @code
+ * mw.loader.using( 'easy-deflate.deflate' ).then( function () {
+ * var deflated = EasyDeflate.deflate( myContent );
+ * } );
+ * @endcode
+ *
+ * @param string $data Deflated data
+ * @return StatusValue Inflated data will be set as the value
+ * @throws InvalidArgumentException If the data wasn't deflated
+ */
+ public static function inflate( $data ) {
+ if ( !self::isDeflated( $data ) ) {
+ throw new InvalidArgumentException( 'Data does not begin with deflated prefix' );
+ }
+ $deflated = base64_decode( substr( $data, 11 ), true );
+ if ( $deflated === false ) {
+ return StatusValue::newFatal( 'easydeflate-invaliddeflate' );
+ }
+ Wikimedia\suppressWarnings();
+ $inflated = gzinflate( $deflated );
+ Wikimedia\restoreWarnings();
+ if ( $inflated === false ) {
+ return StatusValue::newFatal( 'easydeflate-invaliddeflate' );
+ }
+ return StatusValue::newGood( $inflated );
+ }
+}
* Note that type parameters and accept extension like the "level" parameter
* are not supported, weights are derived from "q" values only.
*
- * @todo: If additional type parameters are present, ignore them cleanly.
+ * @todo If additional type parameters are present, ignore them cleanly.
* At present, they often confuse the result.
*
* See HTTP/1.1 section 14 for details.
}
/**
- * @TODO change this code to work in one batch
+ * @todo change this code to work in one batch
* @param string $lockSrv
* @param array $pathsByType
* @return StatusValue
final protected function normalizePathsByType( array $pathsByType ) {
$res = [];
foreach ( $pathsByType as $type => $paths ) {
+ foreach ( $paths as $path ) {
+ if ( (string)$path === '' ) {
+ throw new InvalidArgumentException( __METHOD__ . ": got empty path." );
+ }
+ }
$res[$this->lockTypeMap[$type]] = array_unique( $paths );
}
abstract protected function doGet( $key, $flags = 0 );
/**
- * @note: This method is only needed if merge() uses mergeViaCas()
+ * @note This method is only needed if merge() uses mergeViaCas()
*
* @param string $key
* @param mixed &$casToken
const STATUS_TRX_NONE = 3;
/**
- * @note: exceptions for missing libraries/drivers should be thrown in initConnection()
+ * @note exceptions for missing libraries/drivers should be thrown in initConnection()
* @param array $params Parameters passed from Database::factory()
*/
protected function __construct( array $params ) {
/**
* Basic database interface for live and lazy-loaded relation database handles
*
- * @note: IDatabase and DBConnRef should be updated to reflect any changes
+ * @note IDatabase and DBConnRef should be updated to reflect any changes
* @ingroup Database
*/
interface IDatabase {
*
* This is useful for combining cooperative locks and DB transactions.
*
- * @note: do not assume that *other* IDatabase instances will be AUTOCOMMIT mode
+ * @note do not assume that *other* IDatabase instances will be AUTOCOMMIT mode
*
* The callback takes the following arguments:
* - How the transaction ended (IDatabase::TRIGGER_COMMIT or IDatabase::TRIGGER_ROLLBACK)
*
* Updates will execute in the order they were enqueued.
*
- * @note: do not assume that *other* IDatabase instances will be AUTOCOMMIT mode
+ * @note do not assume that *other* IDatabase instances will be AUTOCOMMIT mode
*
* The callback takes the following arguments:
* - How the transaction ended (IDatabase::TRIGGER_COMMIT or IDatabase::TRIGGER_IDLE)
* - The failures are from contention solvable via onTransactionPreCommitOrIdle()
* - The failures are deadlocks; the RDBMs usually discard the whole transaction
*
- * @note: callers must use additional measures for situations involving two or more
+ * @note callers must use additional measures for situations involving two or more
* (peer) transactions (e.g. updating two database servers at once). The transaction
* and savepoint logic of this method only applies to this specific IDatabase instance.
*
* corresponding startAtomic() implicitly started a transaction, that
* transaction is rolled back.
*
- * @note: callers must use additional measures for situations involving two or more
+ * @note callers must use additional measures for situations involving two or more
* (peer) transactions (e.g. updating two database servers at once). The transaction
* and savepoint logic of startAtomic() are bound to specific IDatabase instances.
*
* - oldid|revision: the revision ID
* @param OutputPage $output
*
- * @note: Instead of an output page, a WebResponse could be sufficient, but
+ * @note Instead of an output page, a WebResponse could be sufficient, but
* redirect logic is currently implemented in OutputPage.
*
* @throws HttpError
'class' => 'HTMLUserTextField',
'label-message' => 'specialloguserlabel',
'name' => 'user',
+ 'default' => $user,
];
}
);
// DifferenceEngine directly fetched the revision:
- $this->mRevIdFetched = $de->mNewid;
+ $this->mRevIdFetched = $de->getNewid();
$de->showDiffPage( $diffOnly );
// Run view updates for the newer revision being diffed (and shown
/**
* Get the user language used by the parser for this page and split the parser cache.
*
- * @warning: Calling this causes the parser cache to be fragmented by user language!
+ * @warning Calling this causes the parser cache to be fragmented by user language!
* To avoid cache fragmentation, output should not depend on the user language.
* Use Parser::getFunctionLang() or Parser::getTargetLanguage() instead!
*
/**
* Same as getUserLangObj() but returns a string instead.
*
- * @warning: Calling this causes the parser cache to be fragmented by user language!
+ * @warning Calling this causes the parser cache to be fragmented by user language!
* To avoid cache fragmentation, output should not depend on the user language.
* Use Parser::getFunctionLang() or Parser::getTargetLanguage() instead!
*
/**
* @since 1.27
*
- * @todo: inject some kind of connection manager that is aware of the target wiki,
+ * @todo inject some kind of connection manager that is aware of the target wiki,
* instead of injecting a LoadBalancer.
*
* @param LoadBalancer $dbLoadBalancer
'editmyuserjs',
'editmywatchlist',
'editsemiprotected',
+ 'editsitecss',
+ 'editsitejson',
+ 'editsitejs',
'editusercss',
'edituserjson',
'edituserjs',
}
/**
- * @todo: move to MW-specific factory class and inject temp dir
+ * @todo move to MW-specific factory class and inject temp dir
* @return UIDGenerator
*/
protected static function singleton() {
$config
);
+ if ( isset( $config['disabled'] ) && $config['disabled'] == true ) {
+ $config['textinput']['disabled'] = true;
+ $config['dropdowninput']['disabled'] = true;
+ }
+
parent::__construct( $config );
// Properties
"customcssprotected": "You do not have permission to edit this CSS page because it contains another user's personal settings.",
"customjsonprotected": "You do not have permission to edit this JSON page because it contains another user's personal settings.",
"customjsprotected": "You do not have permission to edit this JavaScript page because it contains another user's personal settings.",
+ "sitecssprotected": "You do not have permission to edit this CSS page because it may affect all visitors",
+ "sitejsonprotected": "You do not have permission to edit this JSON page because it may affect all visitors",
+ "sitejsprotected": "You do not have permission to edit this JavaScript page because it may affect all visitors",
"mycustomcssprotected": "You do not have permission to edit this CSS page.",
"mycustomjsonprotected": "You do not have permission to edit this JSON page.",
"mycustomjsprotected": "You do not have permission to edit this JavaScript page.",
"group-autoconfirmed": "Autoconfirmed users",
"group-bot": "Bots",
"group-sysop": "Administrators",
+ "group-interface-admin": "Interface administrators",
"group-bureaucrat": "Bureaucrats",
"group-suppress": "Suppressors",
"group-all": "(all)",
"group-autoconfirmed-member": "{{GENDER:$1|autoconfirmed user}}",
"group-bot-member": "{{GENDER:$1|bot}}",
"group-sysop-member": "{{GENDER:$1|administrator}}",
+ "group-interface-admin-member": "{{GENDER:$1|interface administrator}}",
"group-bureaucrat-member": "{{GENDER:$1|bureaucrat}}",
"group-suppress-member": "{{GENDER:$1|suppressor}}",
"grouppage-user": "{{ns:project}}:Users",
"grouppage-autoconfirmed": "{{ns:project}}:Autoconfirmed users",
"grouppage-bot": "{{ns:project}}:Bots",
"grouppage-sysop": "{{ns:project}}:Administrators",
+ "grouppage-interface-admin": "{{ns:project}}:Interface administrators",
"grouppage-bureaucrat": "{{ns:project}}:Bureaucrats",
"grouppage-suppress": "{{ns:project}}:Suppress",
"right-read": "Read pages",
"right-editusercss": "Edit other users' CSS files",
"right-edituserjson": "Edit other users' JSON files",
"right-edituserjs": "Edit other users' JavaScript files",
+ "right-editsitecss": "Edit sitewide CSS",
+ "right-editsitejson": "Edit sitewide JSON",
+ "right-editsitejs": "Edit sitewide JavaScript",
"right-editmyusercss": "Edit your own user CSS files",
"right-editmyuserjson": "Edit your own user JSON files",
"right-editmyuserjs": "Edit your own user JavaScript files",
"grant-createaccount": "Create accounts",
"grant-createeditmovepage": "Create, edit, and move pages",
"grant-delete": "Delete pages, revisions, and log entries",
- "grant-editinterface": "Edit the MediaWiki namespace and user CSS/JSON/JavaScript",
+ "grant-editinterface": "Edit the MediaWiki namespace and sitewide/user JSON",
"grant-editmycssjs": "Edit your user CSS/JSON/JavaScript",
"grant-editmyoptions": "Edit your user preferences",
"grant-editmywatchlist": "Edit your watchlist",
+ "grant-editsiteconfig": "Edit sitewide and user CSS/JS",
"grant-editpage": "Edit existing pages",
"grant-editprotected": "Edit protected pages",
"grant-highvolume": "High-volume editing",
"uploadstash-zero-length": "File is zero length.",
"invalid-chunk-offset": "Invalid chunk offset",
"img-auth-accessdenied": "Access denied",
- "img-auth-nopathinfo": "Missing PATH_INFO.\nYour server is not set up to pass this information.\nIt may be CGI-based and cannot support img_auth.\nSee https://www.mediawiki.org/wiki/Special:MyLanguage/Manual:Image_Authorization.",
+ "img-auth-nopathinfo": "Missing path information.\nYour server must be set up to pass the REQUEST_URI and/or PATH_INFO variables.\nIf it is, try enabling $wgUsePathInfo.\nSee https://www.mediawiki.org/wiki/Special:MyLanguage/Manual:Image_Authorization.",
"img-auth-notindir": "Requested path is not in the configured upload directory.",
"img-auth-badtitle": "Unable to construct a valid title from \"$1\".",
"img-auth-nologinnWL": "You are not logged in and \"$1\" is not in the whitelist.",
"passwordpolicies-policy-passwordcannotmatchusername": "Password cannot be the same as username",
"passwordpolicies-policy-passwordcannotmatchblacklist": "Password cannot match specifically blacklisted passwords",
"passwordpolicies-policy-maximalpasswordlength": "Password must be less than $1 {{PLURAL:$1|character|characters}} long",
- "passwordpolicies-policy-passwordcannotbepopular": "Password cannot be {{PLURAL:$1|the popular password|in the list of $1 popular passwords}}"
+ "passwordpolicies-policy-passwordcannotbepopular": "Password cannot be {{PLURAL:$1|the popular password|in the list of $1 popular passwords}}",
+ "easydeflate-invaliddeflate": "Content provided is not properly deflated"
}
"customcssprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"customjsonprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"customjsprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
+ "sitecssprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
+ "sitejsonprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
+ "sitejsprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"mycustomcssprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"mycustomjsonprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"mycustomjsprotected": "Used as error message. Parameters:\n* $1 - (Unused) the action the user attempted to perform",
"group-autoconfirmed": "{{doc-group|autoconfirmed}}\nOn Wikimedia sites autoconfirmed users are users which are older than 4 days. After those 4 days, they have more rights.",
"group-bot": "{{doc-group|bot}}\n{{Identical|Bot}}",
"group-sysop": "{{doc-group|sysop}}\n{{Identical|Administrator}}",
+ "group-interface-admin": "{{doc-group|interface-admin}}",
"group-bureaucrat": "{{doc-group|bureaucrat}}",
"group-suppress": "{{doc-group|suppress}}\nThis is an optional (disabled by default) user group, meant for the suppression feature in [[mw:Flow|Flow]]. It is not to be confused with the Oversighters group, which also has access to the [[mw:RevisionDelete|RevisionDelete]] feature, to change the visibility of revisions through [[Special:RevisionDelete]].\n\n{{Identical|Suppress}}",
"group-all": "The name of the user group that contains all users, including anonymous users\n\n{{Identical|All}}",
"group-autoconfirmed-member": "{{doc-group|autoconfirmed|member}}",
"group-bot-member": "{{doc-group|bot|member}}",
"group-sysop-member": "{{doc-group|sysop|member}}\n{{Identical|Administrator}}",
+ "group-interface-admin-member": "{{doc-group|interface-admin|member}}",
"group-bureaucrat-member": "{{doc-group|bureaucrat|member}}",
"group-suppress-member": "{{doc-group|suppress|member}}\nThis is a member of the optional (disabled by default) user group, meant for the [[mw:RevisionDelete|RevisionDelete]] feature, to change the visibility of revisions through [[Special:RevisionDelete]].\n\n{{Identical|Suppress}}",
"grouppage-user": "{{doc-group|user|page}}\n{{Identical|User}}",
"grouppage-autoconfirmed": "{{doc-group|autoconfirmed|page}}",
"grouppage-bot": "{{doc-group|bot|page}}\n{{Identical|Bot}}",
"grouppage-sysop": "{{doc-group|sysop|page}}",
+ "grouppage-interface-admin": "{{doc-group|interface-admin|page}}",
"grouppage-bureaucrat": "{{doc-group|bureaucrat|page}}",
"grouppage-suppress": "{{doc-group|suppress|page}}\n{{Identical|Suppress}}",
"right-read": "{{doc-right|read}}\nBasic right to read any page.",
"right-editusercss": "{{doc-right|editusercss}}\nSee also:\n* {{msg-mw|Right-editmyusercss}}",
"right-edituserjson": "{{doc-right|edituserjson}}\nSee also:\n* {{msg-mw|Right-editmyuserjson}}",
"right-edituserjs": "{{doc-right|edituserjs}}\nSee also:\n* {{msg-mw|Right-editmyuserjs}}",
+ "right-editsitecss": "{{doc-right|editsitecss}}",
+ "right-editsitejson": "{{doc-right|editsitejson}}",
+ "right-editsitejs": "{{doc-right|editsitejs}}",
"right-editmyusercss": "{{doc-right|editmyusercss}}\nSee also:\n* {{msg-mw|Right-editusercss}}",
"right-editmyuserjson": "{{doc-right|editmyuserjson}}\nSee also:\n* {{msg-mw|Right-edituserjson}}",
"right-editmyuserjs": "{{doc-right|editmyuserjs}}\nSee also:\n* {{msg-mw|Right-edituserjs}}",
"right-applychangetags": "{{doc-right|applychangetags}}",
"right-changetags": "{{doc-right|changetags}}",
"right-deletechangetags": "{{doc-right|deletechangetags}}",
- "grant-generic": "Used if the grant name is not defined. Parameters:\n* $1 - grant name\n\nDefined grants (grant name refers: blockusers, createeditmovepage, ...):\n* {{msg-mw|grant-checkuser}}\n* {{msg-mw|grant-blockusers}}\n* {{msg-mw|grant-createaccount}}\n* {{msg-mw|grant-createeditmovepage}}\n* {{msg-mw|grant-delete}}\n* {{msg-mw|grant-editinterface}}\n* {{msg-mw|grant-editmycssjs}}\n* {{msg-mw|grant-editmyoptions}}\n* {{msg-mw|grant-editmywatchlist}}\n* {{msg-mw|grant-editpage}}\n* {{msg-mw|grant-editprotected}}\n* {{msg-mw|grant-highvolume}}\n* {{msg-mw|grant-oversight}}\n* {{msg-mw|grant-patrol}}\n* {{msg-mw|grant-privateinfo}}\n* {{msg-mw|grant-protect}}\n* {{msg-mw|grant-rollback}}\n* {{msg-mw|grant-sendemail}}\n* {{msg-mw|grant-uploadeditmovefile}}\n* {{msg-mw|grant-uploadfile}}\n* {{msg-mw|grant-basic}}\n* {{msg-mw|grant-viewdeleted}}\n* {{msg-mw|grant-viewmywatchlist}}",
+ "grant-generic": "Used if the grant name is not defined. Parameters:\n* $1 - grant name\n\nDefined grants (grant name refers: blockusers, createeditmovepage, ...):\n* {{msg-mw|grant-checkuser}}\n* {{msg-mw|grant-blockusers}}\n* {{msg-mw|grant-createaccount}}\n* {{msg-mw|grant-createeditmovepage}}\n* {{msg-mw|grant-delete}}\n* {{msg-mw|grant-editinterface}}\n* {{msg-mw|grant-editmycssjs}}\n* {{msg-mw|grant-editmyoptions}}\n* {{msg-mw|grant-editmywatchlist}}\n* {{msg-mw|grant-editsiteconfig}}\n* {{msg-mw|grant-editpage}}\n* {{msg-mw|grant-editprotected}}\n* {{msg-mw|grant-highvolume}}\n* {{msg-mw|grant-oversight}}\n* {{msg-mw|grant-patrol}}\n* {{msg-mw|grant-privateinfo}}\n* {{msg-mw|grant-protect}}\n* {{msg-mw|grant-rollback}}\n* {{msg-mw|grant-sendemail}}\n* {{msg-mw|grant-uploadeditmovefile}}\n* {{msg-mw|grant-uploadfile}}\n* {{msg-mw|grant-basic}}\n* {{msg-mw|grant-viewdeleted}}\n* {{msg-mw|grant-viewmywatchlist}}",
"grant-group-page-interaction": "{{Related|Grant-group}}",
"grant-group-file-interaction": "{{Related|Grant-group}}",
"grant-group-watchlist-interaction": "{{Related|Grant-group}}",
"grant-createaccount": "Name for grant \"createaccount\".\n{{Related|Grant}}\n{{Identical|Create account}}",
"grant-createeditmovepage": "Name for grant \"createeditmovepage\".\n{{Related|Grant}}",
"grant-delete": "Name for grant \"delete\".\n{{Related|Grant}}",
- "grant-editinterface": "Name for grant \"editinterface\".\n\n\"JS\" stands for \"JavaScript\".\n{{Related|Grant}}",
+ "grant-editinterface": "Name for grant \"editinterface\".\n{{Related|Grant}}",
"grant-editmycssjs": "Name for grant \"editmycssjs\".\n\n\"JS\" stands for \"JavaScript\".\n{{Related|Grant}}",
"grant-editmyoptions": "Name for grant \"editmyoptions\".\n{{Related|Grant}}",
"grant-editmywatchlist": "Name for grant \"editmywatchlist\".\n{{Related|Grant}}\n{{Identical|Edit your watchlist}}",
+ "grant-editsiteconfig": "Name for grant \"editsiteconfig\".\n{{Related|Grant}}",
"grant-editpage": "Name for grant \"editpage\".\n{{Related|Grant}}",
"grant-editprotected": "Name for grant \"editprotected\".\n{{Related|Grant}}",
"grant-highvolume": "Name for grant \"highvolume\".\n{{Related|Grant}}",
"passwordpolicies-policy-passwordcannotmatchusername": "Password policy that enforces that the password of the account cannot be the same as the username",
"passwordpolicies-policy-passwordcannotmatchblacklist": "Password policy that enforces that passwords are not on a list of blacklisted passwords (often previously used during MediaWiki automated testing)",
"passwordpolicies-policy-maximalpasswordlength": "Password policy that enforces a maximum number of characters a password must be. $1 - maximum number of characters that a password can be",
- "passwordpolicies-policy-passwordcannotbepopular": "Password policy that enforces that a password is not in a list of $1 number of \"popular\" passwords. $1 - number of popular passwords the password will be checked against"
+ "passwordpolicies-policy-passwordcannotbepopular": "Password policy that enforces that a password is not in a list of $1 number of \"popular\" passwords. $1 - number of popular passwords the password will be checked against",
+ "easydeflate-invaliddeflate": "Error message if the content passed to easydeflate was not deflated (compressed) properly"
}
* @ingroup Maintenance
*/
class CreateAndPromote extends Maintenance {
- private static $permitRoles = [ 'sysop', 'bureaucrat', 'bot' ];
+ private static $permitRoles = [ 'sysop', 'bureaucrat', 'interface-admin', 'bot' ];
public function __construct() {
parent::__construct();
-- T167246. Add an `actor` table and various columns (and temporary tables) to reference it.
CREATE TABLE /*_*/actor (
- actor_id bigint unsigned NOT NULL CONSTRAINT PK_actor PRIMARY KEY IDENTITY(0,1),
- actor_user int unsigned,
+ actor_id bigint NOT NULL CONSTRAINT PK_actor PRIMARY KEY IDENTITY(0,1),
+ actor_user int,
actor_name nvarchar(255) NOT NULL
);
CREATE UNIQUE INDEX /*i*/actor_user ON /*_*/actor (actor_user);
INSERT INTO /*_*/actor (actor_name) VALUES ('##Anonymous##');
CREATE TABLE /*_*/revision_actor_temp (
- revactor_rev int unsigned NOT NULL CONSTRAINT FK_revactor_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
- revactor_actor bigint unsigned NOT NULL,
+ revactor_rev int NOT NULL CONSTRAINT FK_revactor_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
+ revactor_actor bigint NOT NULL,
revactor_timestamp varchar(14) NOT NULL CONSTRAINT DF_revactor_timestamp DEFAULT '',
- revactor_page int unsigned NOT NULL,
+ revactor_page int NOT NULL,
CONSTRAINT PK_revision_actor_temp PRIMARY KEY (revactor_rev, revactor_actor)
);
CREATE UNIQUE INDEX /*i*/revactor_rev ON /*_*/revision_actor_temp (revactor_rev);
CREATE INDEX /*i*/page_actor_timestamp ON /*_*/revision_actor_temp (revactor_page,revactor_actor,revactor_timestamp);
ALTER TABLE /*_*/archive ADD CONSTRAINT DF_ar_user_text DEFAULT '' FOR ar_user_text;
-ALTER TABLE /*_*/archive ADD ar_actor bigint unsigned NOT NULL CONSTRAINT DF_ar_actor DEFAULT 0;
+ALTER TABLE /*_*/archive ADD ar_actor bigint NOT NULL CONSTRAINT DF_ar_actor DEFAULT 0;
CREATE INDEX /*i*/ar_actor_timestamp ON /*_*/archive (ar_actor,ar_timestamp);
-ALTER TABLE /*_*/ipblocks ADD ipb_by_actor bigint unsigned NOT NULL CONSTRAINT DF_ipb_by_actor DEFAULT 0;
+ALTER TABLE /*_*/ipblocks ADD ipb_by_actor bigint NOT NULL CONSTRAINT DF_ipb_by_actor DEFAULT 0;
ALTER TABLE /*_*/image ADD CONSTRAINT DF_img_user_text DEFAULT '' FOR img_user_text;
-ALTER TABLE /*_*/image ADD img_actor bigint unsigned NOT NULL CONSTRAINT DF_img_actor DEFAULT 0;
+ALTER TABLE /*_*/image ADD img_actor bigint NOT NULL CONSTRAINT DF_img_actor DEFAULT 0;
CREATE INDEX /*i*/img_actor_timestamp ON /*_*/image (img_actor, img_timestamp);
ALTER TABLE /*_*/oldimage ADD CONSTRAINT DF_oi_user_text DEFAULT '' FOR oi_user_text;
-ALTER TABLE /*_*/oldimage ADD oi_actor bigint unsigned NOT NULL CONSTRAINT DF_oi_actor DEFAULT 0;
+ALTER TABLE /*_*/oldimage ADD oi_actor bigint NOT NULL CONSTRAINT DF_oi_actor DEFAULT 0;
CREATE INDEX /*i*/oi_actor_timestamp ON /*_*/oldimage (oi_actor,oi_timestamp);
ALTER TABLE /*_*/filearchive ADD CONSTRAINT DF_fa_user_text DEFAULT '' FOR fa_user_text;
-ALTER TABLE /*_*/filearchive ADD fa_actor bigint unsigned NOT NULL CONSTRAINT DF_fa_actor DEFAULT 0;
+ALTER TABLE /*_*/filearchive ADD fa_actor bigint NOT NULL CONSTRAINT DF_fa_actor DEFAULT 0;
CREATE INDEX /*i*/fa_actor_timestamp ON /*_*/filearchive (fa_actor,fa_timestamp);
ALTER TABLE /*_*/recentchanges ADD CONSTRAINT DF_rc_user_text DEFAULT '' FOR rc_user_text;
-ALTER TABLE /*_*/recentchanges ADD rc_actor bigint unsigned NOT NULL CONSTRAINT DF_rc_actor DEFAULT 0;
+ALTER TABLE /*_*/recentchanges ADD rc_actor bigint NOT NULL CONSTRAINT DF_rc_actor DEFAULT 0;
CREATE INDEX /*i*/rc_ns_actor ON /*_*/recentchanges (rc_namespace, rc_actor);
CREATE INDEX /*i*/rc_actor ON /*_*/recentchanges (rc_actor, rc_timestamp);
-ALTER TABLE /*_*/logging ADD log_actor bigint unsigned NOT NULL CONSTRAINT DF_log_actor DEFAULT 0;
+ALTER TABLE /*_*/logging ADD log_actor bigint NOT NULL CONSTRAINT DF_log_actor DEFAULT 0;
CREATE INDEX /*i*/actor_time ON /*_*/logging (log_actor, log_timestamp);
CREATE INDEX /*i*/log_actor_type_time ON /*_*/logging (log_actor, log_type, log_timestamp);
-- T166732. Add a `comment` table and various columns (and temporary tables) to reference it.
CREATE TABLE /*_*/comment (
- comment_id bigint unsigned NOT NULL PRIMARY KEY IDENTITY(0,1),
+ comment_id bigint NOT NULL PRIMARY KEY IDENTITY(0,1),
comment_hash INT NOT NULL,
comment_text nvarchar(max) NOT NULL,
comment_data nvarchar(max)
CREATE TABLE /*_*/revision_comment_temp (
revcomment_rev INT NOT NULL CONSTRAINT FK_revcomment_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
- revcomment_comment_id bigint unsigned NOT NULL CONSTRAINT FK_revcomment_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ revcomment_comment_id bigint NOT NULL CONSTRAINT FK_revcomment_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
CONSTRAINT PK_revision_comment_temp PRIMARY KEY (revcomment_rev, revcomment_comment_id)
);
CREATE UNIQUE INDEX /*i*/revcomment_rev ON /*_*/revision_comment_temp (revcomment_rev);
CREATE TABLE /*_*/image_comment_temp (
- imgcomment_name nvarchar(255) NOT NULL CONSTRAINT FK_imgcomment_name FOREIGN KEY REFERENCES /*_*/image(imgcomment_name) ON DELETE CASCADE,
- imgcomment_description_id bigint unsigned NOT NULL CONSTRAINT FK_imgcomment_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ imgcomment_name nvarchar(255) NOT NULL CONSTRAINT FK_imgcomment_name FOREIGN KEY REFERENCES /*_*/image(img_name) ON DELETE CASCADE,
+ imgcomment_description_id bigint NOT NULL CONSTRAINT FK_imgcomment_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
CONSTRAINT PK_image_comment_temp PRIMARY KEY (imgcomment_name, imgcomment_description_id)
);
CREATE UNIQUE INDEX /*i*/imgcomment_name ON /*_*/image_comment_temp (imgcomment_name);
ALTER TABLE /*_*/revision ADD CONSTRAINT DF_rev_comment DEFAULT '' FOR rev_comment;
ALTER TABLE /*_*/archive ADD CONSTRAINT DF_ar_comment DEFAULT '' FOR ar_comment;
-ALTER TABLE /*_*/archive ADD ar_comment_id bigint unsigned NOT NULL CONSTRAINT DF_ar_comment_id DEFAULT 0 CONSTRAINT FK_ar_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/archive ADD ar_comment_id bigint NOT NULL CONSTRAINT DF_ar_comment_id DEFAULT 0 CONSTRAINT FK_ar_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
ALTER TABLE /*_*/ipblocks ADD CONSTRAINT DF_ipb_reason DEFAULT '' FOR ipb_reason;
-ALTER TABLE /*_*/ipblocks ADD ipb_reason_id bigint unsigned NOT NULL CONSTRAINT DF_ipb_reason_id DEFAULT 0 CONSTRAINT FK_ipb_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/ipblocks ADD ipb_reason_id bigint NOT NULL CONSTRAINT DF_ipb_reason_id DEFAULT 0 CONSTRAINT FK_ipb_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
ALTER TABLE /*_*/image ADD CONSTRAINT DF_img_description DEFAULT '' FOR img_description;
ALTER TABLE /*_*/oldimage ADD CONSTRAINT DF_oi_description DEFAULT '' FOR oi_description;
-ALTER TABLE /*_*/oldimage ADD oi_description_id bigint unsigned NOT NULL CONSTRAINT DF_oi_description_id DEFAULT 0 CONSTRAINT FK_oi_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/oldimage ADD oi_description_id bigint NOT NULL CONSTRAINT DF_oi_description_id DEFAULT 0 CONSTRAINT FK_oi_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
ALTER TABLE /*_*/filearchive ADD CONSTRAINT DF_fa_deleted_reason DEFAULT '' FOR fa_deleted_reason;
-ALTER TABLE /*_*/filearchive ADD fa_deleted_reason_id bigint unsigned NOT NULL CONSTRAINT DF_fa_deleted_reason_id DEFAULT 0 CONSTRAINT FK_fa_deleted_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/filearchive ADD fa_deleted_reason_id bigint NOT NULL CONSTRAINT DF_fa_deleted_reason_id DEFAULT 0 CONSTRAINT FK_fa_deleted_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
ALTER TABLE /*_*/filearchive ADD CONSTRAINT DF_fa_description DEFAULT '' FOR fa_description;
-ALTER TABLE /*_*/filearchive ADD fa_description_id bigint unsigned NOT NULL CONSTRAINT DF_fa_description_id DEFAULT 0 CONSTRAINT FK_fa_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/filearchive ADD fa_description_id bigint NOT NULL CONSTRAINT DF_fa_description_id DEFAULT 0 CONSTRAINT FK_fa_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
-ALTER TABLE /*_*/recentchanges ADD rc_comment_id bigint unsigned NOT NULL CONSTRAINT DF_rc_comment_id DEFAULT 0 CONSTRAINT FK_rc_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/recentchanges ADD rc_comment_id bigint NOT NULL CONSTRAINT DF_rc_comment_id DEFAULT 0 CONSTRAINT FK_rc_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
-ALTER TABLE /*_*/logging ADD log_comment_id bigint unsigned NOT NULL CONSTRAINT DF_log_comment_id DEFAULT 0 CONSTRAINT FK_log_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/logging ADD log_comment_id bigint NOT NULL CONSTRAINT DF_log_comment_id DEFAULT 0 CONSTRAINT FK_log_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
ALTER TABLE /*_*/protected_titles ADD CONSTRAINT DF_pt_reason DEFAULT '' FOR pt_reason;
-ALTER TABLE /*_*/protected_titles ADD pt_reason_id bigint unsigned NOT NULL CONSTRAINT DF_pt_reason_id DEFAULT 0 CONSTRAINT FK_pt_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
+ALTER TABLE /*_*/protected_titles ADD pt_reason_id bigint NOT NULL CONSTRAINT DF_pt_reason_id DEFAULT 0 CONSTRAINT FK_pt_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id);
CREATE TABLE /*_*/content (
-- ID of the content object
- content_id bigint unsigned NOT NULL CONSTRAINT PK_content PRIMARY KEY IDENTITY,
+ content_id bigint NOT NULL CONSTRAINT PK_content PRIMARY KEY IDENTITY,
-- Nominal size of the content object (not necessarily of the serialized blob)
- content_size int unsigned NOT NULL,
+ content_size int NOT NULL,
-- Nominal hash of the content object (not necessarily of the serialized blob)
content_sha1 varchar(32) NOT NULL,
-- reference to model_id
- content_model smallint unsigned NOT NULL CONSTRAINT FK_content_content_models FOREIGN KEY REFERENCES /*_*/content_models(model_id),
+ content_model smallint NOT NULL CONSTRAINT FK_content_content_models FOREIGN KEY REFERENCES /*_*/content_models(model_id),
-- URL-like address of the content blob
content_address nvarchar(255) NOT NULL
-);
\ No newline at end of file
+);
CREATE TABLE /*_*/slots (
-- reference to rev_id
- slot_revision_id bigint unsigned NOT NULL,
+ slot_revision_id bigint NOT NULL,
-- reference to role_id
- slot_role_id smallint unsigned NOT NULL CONSTRAINT FK_slots_slot_role FOREIGN KEY REFERENCES slot_roles(role_id),
+ slot_role_id smallint NOT NULL CONSTRAINT FK_slots_slot_role FOREIGN KEY REFERENCES slot_roles(role_id),
-- reference to content_id
- slot_content_id bigint unsigned NOT NULL CONSTRAINT FK_slots_content_id FOREIGN KEY REFERENCES content(content_id),
+ slot_content_id bigint NOT NULL CONSTRAINT FK_slots_content_id FOREIGN KEY REFERENCES content(content_id),
-- The revision ID of the revision that originated the slot's content.
-- To find revisions that changed slots, look for slot_origin = slot_revision_id.
- slot_origin bigint unsigned NOT NULL,
+ slot_origin bigint NOT NULL,
CONSTRAINT PK_slots PRIMARY KEY (slot_revision_id, slot_role_id)
);
-- can refer to the user table directly.
--
CREATE TABLE /*_*/actor (
- actor_id bigint unsigned NOT NULL CONSTRAINT PK_actor PRIMARY KEY IDENTITY(0,1),
- actor_user int unsigned,
+ actor_id bigint NOT NULL CONSTRAINT PK_actor PRIMARY KEY IDENTITY(0,1),
+ actor_user int,
actor_name nvarchar(255) NOT NULL
);
CREATE UNIQUE INDEX /*i*/actor_user ON /*_*/actor (actor_user);
-- the same comment_text and comment_data.
--
CREATE TABLE /*_*/comment (
- comment_id bigint unsigned NOT NULL PRIMARY KEY IDENTITY(0,1),
+ comment_id bigint NOT NULL PRIMARY KEY IDENTITY(0,1),
comment_hash INT NOT NULL,
comment_text nvarchar(max) NOT NULL,
comment_data nvarchar(max)
--
CREATE TABLE /*_*/revision_comment_temp (
revcomment_rev INT NOT NULL CONSTRAINT FK_revcomment_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
- revcomment_comment_id bigint unsigned NOT NULL CONSTRAINT FK_revcomment_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ revcomment_comment_id bigint NOT NULL CONSTRAINT FK_revcomment_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
CONSTRAINT PK_revision_comment_temp PRIMARY KEY (revcomment_rev, revcomment_comment_id)
);
CREATE UNIQUE INDEX /*i*/revcomment_rev ON /*_*/revision_comment_temp (revcomment_rev);
CREATE TABLE /*_*/revision_actor_temp (
- revactor_rev int unsigned NOT NULL CONSTRAINT FK_revactor_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
- revactor_actor bigint unsigned NOT NULL,
+ revactor_rev int NOT NULL CONSTRAINT FK_revactor_rev FOREIGN KEY REFERENCES /*_*/revision(rev_id) ON DELETE CASCADE,
+ revactor_actor bigint NOT NULL,
revactor_timestamp varchar(14) NOT NULL CONSTRAINT DF_revactor_timestamp DEFAULT '',
- revactor_page int unsigned NOT NULL,
+ revactor_page int NOT NULL,
CONSTRAINT PK_revision_actor_temp PRIMARY KEY (revactor_rev, revactor_actor)
);
CREATE UNIQUE INDEX /*i*/revactor_rev ON /*_*/revision_actor_temp (revactor_rev);
ar_namespace SMALLINT NOT NULL DEFAULT 0,
ar_title NVARCHAR(255) NOT NULL DEFAULT '',
ar_comment NVARCHAR(255) NOT NULL CONSTRAINT DF_ar_comment DEFAULT '',
- ar_comment_id bigint unsigned NOT NULL CONSTRAINT DF_ar_comment_id DEFAULT 0 CONSTRAINT FK_ar_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ ar_comment_id bigint NOT NULL CONSTRAINT DF_ar_comment_id DEFAULT 0 CONSTRAINT FK_ar_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
ar_user INT CONSTRAINT ar_user__user_id__fk FOREIGN KEY REFERENCES /*_*/mwuser(user_id),
ar_user_text NVARCHAR(255) NOT NULL CONSTRAINT DF_ar_user_text DEFAULT '',
- ar_actor bigint unsigned NOT NULL CONSTRAINT DF_ar_actor DEFAULT 0,
+ ar_actor bigint NOT NULL CONSTRAINT DF_ar_actor DEFAULT 0,
ar_timestamp varchar(14) NOT NULL default '',
ar_minor_edit BIT NOT NULL DEFAULT 0,
ar_rev_id INT NOT NULL, -- NOT a FK, the row gets deleted from revision and moved here
--
--- Slots represent an n:m relation between revisions and content objects.
--- A content object can have a specific "role" in one or more revisions.
--- Each revision can have multiple content objects, each having a different role.
+-- Normalization table for role names
--
-CREATE TABLE /*_*/slots (
-
- -- reference to rev_id
- slot_revision_id bigint unsigned NOT NULL,
-
- -- reference to role_id
- slot_role_id smallint unsigned NOT NULL CONSTRAINT FK_slots_slot_role FOREIGN KEY REFERENCES slot_roles(role_id),
-
- -- reference to content_id
- slot_content_id bigint unsigned NOT NULL CONSTRAINT FK_slots_content_id FOREIGN KEY REFERENCES content(content_id),
+CREATE TABLE /*_*/slot_roles (
+ role_id smallint NOT NULL CONSTRAINT PK_slot_roles PRIMARY KEY IDENTITY,
+ role_name nvarchar(64) NOT NULL
+);
- -- The revision ID of the revision that originated the slot's content.
- -- To find revisions that changed slots, look for slot_origin = slot_revision_id.
- slot_origin bigint NOT NULL,
+-- Index for looking of the internal ID of for a name
+CREATE UNIQUE INDEX /*i*/role_name ON /*_*/slot_roles (role_name);
- CONSTRAINT PK_slots PRIMARY KEY (slot_revision_id, slot_role_id)
+--
+-- Normalization table for content model names
+--
+CREATE TABLE /*_*/content_models (
+ model_id smallint NOT NULL CONSTRAINT PK_content_models PRIMARY KEY IDENTITY,
+ model_name nvarchar(64) NOT NULL
);
--- Index for finding revisions that modified a specific slot
-CREATE INDEX /*i*/slot_revision_origin_role ON /*_*/slots (slot_revision_id, slot_origin, slot_role_id);
+-- Index for looking of the internal ID of for a name
+CREATE UNIQUE INDEX /*i*/model_name ON /*_*/content_models (model_name);
--
-- The content table represents content objects. It's primary purpose is to provide the necessary
CREATE TABLE /*_*/content (
-- ID of the content object
- content_id bigint unsigned NOT NULL CONSTRAINT PK_content PRIMARY KEY IDENTITY,
+ content_id bigint NOT NULL CONSTRAINT PK_content PRIMARY KEY IDENTITY,
-- Nominal size of the content object (not necessarily of the serialized blob)
- content_size int unsigned NOT NULL,
+ content_size int NOT NULL,
-- Nominal hash of the content object (not necessarily of the serialized blob)
content_sha1 varchar(32) NOT NULL,
-- reference to model_id
- content_model smallint unsigned NOT NULL CONSTRAINT FK_content_content_models FOREIGN KEY REFERENCES /*_*/content_models(model_id),
+ content_model smallint NOT NULL CONSTRAINT FK_content_content_models FOREIGN KEY REFERENCES /*_*/content_models(model_id),
-- URL-like address of the content blob
content_address nvarchar(255) NOT NULL
);
--
--- Normalization table for role names
+-- Slots represent an n:m relation between revisions and content objects.
+-- A content object can have a specific "role" in one or more revisions.
+-- Each revision can have multiple content objects, each having a different role.
--
-CREATE TABLE /*_*/slot_roles (
- role_id smallint NOT NULL CONSTRAINT PK_slot_roles PRIMARY KEY IDENTITY,
- role_name nvarchar(64) NOT NULL
-);
+CREATE TABLE /*_*/slots (
--- Index for looking of the internal ID of for a name
-CREATE UNIQUE INDEX /*i*/role_name ON /*_*/slot_roles (role_name);
+ -- reference to rev_id
+ slot_revision_id bigint NOT NULL,
---
--- Normalization table for content model names
---
-CREATE TABLE /*_*/content_models (
- model_id smallint NOT NULL CONSTRAINT PK_content_models PRIMARY KEY IDENTITY,
- model_name nvarchar(64) NOT NULL
+ -- reference to role_id
+ slot_role_id smallint NOT NULL CONSTRAINT FK_slots_slot_role FOREIGN KEY REFERENCES slot_roles(role_id),
+
+ -- reference to content_id
+ slot_content_id bigint NOT NULL CONSTRAINT FK_slots_content_id FOREIGN KEY REFERENCES content(content_id),
+
+ -- The revision ID of the revision that originated the slot's content.
+ -- To find revisions that changed slots, look for slot_origin = slot_revision_id.
+ slot_origin bigint NOT NULL,
+
+ CONSTRAINT PK_slots PRIMARY KEY (slot_revision_id, slot_role_id)
);
--- Index for looking of the internal ID of for a name
-CREATE UNIQUE INDEX /*i*/model_name ON /*_*/content_models (model_name);
+-- Index for finding revisions that modified a specific slot
+CREATE INDEX /*i*/slot_revision_origin_role ON /*_*/slots (slot_revision_id, slot_origin, slot_role_id);
--
ipb_by int REFERENCES /*_*/mwuser(user_id) ON DELETE CASCADE,
-- Actor ID who made the block.
- ipb_by_actor bigint unsigned NOT NULL CONSTRAINT DF_ipb_by_actor DEFAULT 0,
+ ipb_by_actor bigint NOT NULL CONSTRAINT DF_ipb_by_actor DEFAULT 0,
-- User name of blocker
ipb_by_text nvarchar(255) NOT NULL default '',
-- Key to comment_id. Text comment made by blocker.
-- ("DEFAULT 0" is temporary, signaling that ipb_reason should be used)
- ipb_reason_id bigint unsigned NOT NULL CONSTRAINT DF_ipb_reason_id DEFAULT 0 CONSTRAINT FK_ipb_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ ipb_reason_id bigint NOT NULL CONSTRAINT DF_ipb_reason_id DEFAULT 0 CONSTRAINT FK_ipb_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
-- Creation (or refresh) date in standard YMDHMS form.
-- IP blocks expire automatically.
-- user_id and user_name of uploader.
img_user int REFERENCES /*_*/mwuser(user_id) ON DELETE SET NULL,
img_user_text nvarchar(255) NOT NULL CONSTRAINT DF_img_user_text DEFAULT '',
- img_actor bigint unsigned NOT NULL CONSTRAINT DF_img_actor DEFAULT 0,
+ img_actor bigint NOT NULL CONSTRAINT DF_img_actor DEFAULT 0,
-- Time of the upload.
img_timestamp nvarchar(14) NOT NULL default '',
-- will be merged back into image in the future.
--
CREATE TABLE /*_*/image_comment_temp (
- imgcomment_name nvarchar(255) NOT NULL CONSTRAINT FK_imgcomment_name FOREIGN KEY REFERENCES /*_*/image(imgcomment_name) ON DELETE CASCADE,
- imgcomment_description_id bigint unsigned NOT NULL CONSTRAINT FK_imgcomment_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ imgcomment_name nvarchar(255) NOT NULL CONSTRAINT FK_imgcomment_name FOREIGN KEY REFERENCES /*_*/image(img_name) ON DELETE CASCADE,
+ imgcomment_description_id bigint NOT NULL CONSTRAINT FK_imgcomment_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
CONSTRAINT PK_image_comment_temp PRIMARY KEY (imgcomment_name, imgcomment_description_id)
);
CREATE UNIQUE INDEX /*i*/imgcomment_name ON /*_*/image_comment_temp (imgcomment_name);
oi_height int NOT NULL default 0,
oi_bits int NOT NULL default 0,
oi_description nvarchar(255) NOT NULL CONSTRAINT DF_oi_description DEFAULT '',
- oi_description_id bigint unsigned NOT NULL CONSTRAINT DF_oi_description_id DEFAULT 0 CONSTRAINT FK_oi_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ oi_description_id bigint NOT NULL CONSTRAINT DF_oi_description_id DEFAULT 0 CONSTRAINT FK_oi_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
oi_user int REFERENCES /*_*/mwuser(user_id),
oi_user_text nvarchar(255) NOT NULL CONSTRAINT DF_oi_user_text DEFAULT '',
- oi_actor bigint unsigned NOT NULL CONSTRAINT DF_oi_actor DEFAULT 0,
+ oi_actor bigint NOT NULL CONSTRAINT DF_oi_actor DEFAULT 0,
oi_timestamp varchar(14) NOT NULL default '',
oi_metadata varbinary(max) NOT NULL,
fa_deleted_user int,
fa_deleted_timestamp varchar(14) default '',
fa_deleted_reason nvarchar(max) CONSTRAINT DF_fa_deleted_reason DEFAULT '',
- fa_deleted_reason_id bigint unsigned NOT NULL CONSTRAINT DF_fa_deleted_reason_id DEFAULT 0 CONSTRAINT FK_fa_deleted_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ fa_deleted_reason_id bigint NOT NULL CONSTRAINT DF_fa_deleted_reason_id DEFAULT 0 CONSTRAINT FK_fa_deleted_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
-- Duped fields from image
fa_size int default 0,
fa_major_mime varchar(16) not null default 'unknown',
fa_minor_mime nvarchar(100) default 'unknown',
fa_description nvarchar(255) CONSTRAINT DF_fa_description DEFAULT '',
- fa_description_id bigint unsigned NOT NULL CONSTRAINT DF_fa_description DEFAULT 0 CONSTRAINT FK_fa_description FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ fa_description_id bigint NOT NULL CONSTRAINT DF_fa_description_id DEFAULT 0 CONSTRAINT FK_fa_description_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
fa_user int default 0 REFERENCES /*_*/mwuser(user_id) ON DELETE SET NULL,
fa_user_text nvarchar(255) CONSTRAINT DF_fa_user_text DEFAULT '',
- fa_actor bigint unsigned NOT NULL CONSTRAINT DF_fa_actor DEFAULT 0,
+ fa_actor bigint NOT NULL CONSTRAINT DF_fa_actor DEFAULT 0,
fa_timestamp varchar(14) default '',
-- Visibility of deleted revisions, bitfield
-- As in revision
rc_user int NOT NULL default 0 CONSTRAINT rc_user__user_id__fk FOREIGN KEY REFERENCES /*_*/mwuser(user_id),
rc_user_text nvarchar(255) NOT NULL CONSTRAINT DF_rc_user_text DEFAULT '',
- rc_actor bigint unsigned NOT NULL CONSTRAINT DF_rc_actor DEFAULT 0,
+ rc_actor bigint NOT NULL CONSTRAINT DF_rc_actor DEFAULT 0,
-- When pages are renamed, their RC entries do _not_ change.
rc_namespace int NOT NULL default 0,
-- as in revision...
rc_comment nvarchar(255) NOT NULL default '',
- rc_comment_id bigint unsigned NOT NULL CONSTRAINT DF_rc_comment_id DEFAULT 0 CONSTRAINT FK_rc_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ rc_comment_id bigint NOT NULL CONSTRAINT DF_rc_comment_id DEFAULT 0 CONSTRAINT FK_rc_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
rc_minor bit NOT NULL default 0,
-- Edits by user accounts with the 'bot' rights key are
log_user_text nvarchar(255) NOT NULL default '',
-- The actor who performed this action
- log_actor bigint unsigned NOT NULL CONSTRAINT DF_log_actor DEFAULT 0,
+ log_actor bigint NOT NULL CONSTRAINT DF_log_actor DEFAULT 0,
-- Key to the page affected. Where a user is the target,
-- this will point to the user page.
-- Key to comment_id. Comment summarizing the change.
-- ("DEFAULT 0" is temporary, signaling that log_comment should be used)
- log_comment_id bigint unsigned NOT NULL CONSTRAINT DF_log_comment_id DEFAULT 0 CONSTRAINT FK_log_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ log_comment_id bigint NOT NULL CONSTRAINT DF_log_comment_id DEFAULT 0 CONSTRAINT FK_log_comment_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
-- miscellaneous parameters:
-- LF separated list (old system) or serialized PHP array (new system)
pt_title nvarchar(255) NOT NULL,
pt_user int REFERENCES /*_*/mwuser(user_id) ON DELETE SET NULL,
pt_reason nvarchar(255) CONSTRAINT DF_pt_reason DEFAULT '',
- pt_reason_id bigint unsigned NOT NULL CONSTRAINT DF_pt_reason_id DEFAULT 0 CONSTRAINT FK_pt_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
+ pt_reason_id bigint NOT NULL CONSTRAINT DF_pt_reason_id DEFAULT 0 CONSTRAINT FK_pt_reason_id FOREIGN KEY REFERENCES /*_*/comment(comment_id),
pt_timestamp varchar(14) NOT NULL,
pt_expiry varchar(14) NOT NULL,
pt_create_perm nvarchar(60) NOT NULL,
CREATE INDEX &mw_prefix.actor_timestamp ON &mw_prefix.revision_actor_temp (revactor_actor,revactor_timestamp);
CREATE INDEX &mw_prefix.page_actor_timestamp ON &mw_prefix.revision_actor_temp (revactor_page,revactor_actor,revactor_timestamp);
-ALTER TABLE &mw_prefix.archive ALTER COLUMN ar_user_text VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.archive ADD COLUMN ar_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.archive MODIFY ( ar_user_text NULL );
+ALTER TABLE &mw_prefix.archive ADD ( ar_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.ar_actor_timestamp ON &mw_prefix.archive (ar_actor,ar_timestamp);
-ALTER TABLE &mw_prefix.ipblocks ADD COLUMN ipb_by_actor NUMBER DEFUALT 0 NOT NULL;
+ALTER TABLE &mw_prefix.ipblocks ADD ( ipb_by_actor NUMBER DEFAULT 0 NOT NULL );
-ALTER TABLE &mw_prefix.image ALTER COLUMN img_user_text VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.image ADD COLUMN img_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.image MODIFY ( img_user_text NULL );
+ALTER TABLE &mw_prefix.image ADD ( img_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.img_actor_timestamp ON &mw_prefix.image (img_actor, img_timestamp);
-ALTER TABLE &mw_prefix.oldimage ALTER COLUMN oi_user_text VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.oldimage ADD COLUMN oi_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.oldimage MODIFY ( oi_user_text NULL );
+ALTER TABLE &mw_prefix.oldimage ADD ( oi_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.oi_actor_timestamp ON &mw_prefix.oldimage (oi_actor,oi_timestamp);
-ALTER TABLE &mw_prefix.filearchive ALTER COLUMN fa_user_text VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.filearchive ADD COLUMN fa_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.filearchive MODIFY ( fa_user_text NULL );
+ALTER TABLE &mw_prefix.filearchive ADD ( fa_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.fa_actor_timestamp ON &mw_prefix.filearchive (fa_actor,fa_timestamp);
-ALTER TABLE &mw_prefix.recentchanges ALTER COLUMN rc_user_text VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.recentchanges ADD COLUMN rc_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.recentchanges MODIFY ( rc_user_text NULL );
+ALTER TABLE &mw_prefix.recentchanges ADD ( rc_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.rc_ns_actor ON &mw_prefix.recentchanges (rc_namespace, rc_actor);
CREATE INDEX &mw_prefix.rc_actor ON &mw_prefix.recentchanges (rc_actor, rc_timestamp);
-ALTER TABLE &mw_prefix.logging ADD COLUMN log_actor NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.logging ADD ( log_actor NUMBER DEFAULT 0 NOT NULL );
CREATE INDEX &mw_prefix.actor_time ON &mw_prefix.logging (log_actor, log_timestamp);
CREATE INDEX &mw_prefix.log_actor_type_time ON &mw_prefix.logging (log_actor, log_type, log_timestamp);
CREATE UNIQUE INDEX &mw_prefix.imgcomment_name ON &mw_prefix.image_comment_temp (imgcomment_name);
-ALTER TABLE &mw_prefix.archive ADD COLUMN ar_comment_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.archive ADD ( ar_comment_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.archive ADD CONSTRAINT &mw_prefix.archive_fk2 FOREIGN KEY (ar_comment_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.ipblocks ALTER COLUMN ipb_reason VARCHAR2(255) NULL;
-ALTER TABLE &mw_prefix.ipblocks ADD COLUMN ipb_reason_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.ipblocks MODIFY ( ipb_reason NULL );
+ALTER TABLE &mw_prefix.ipblocks ADD ( ipb_reason_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.ipblocks ADD CONSTRAINT &mw_prefix.ipblocks_fk3 FOREIGN KEY (ipb_reason_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.oldimage ADD COLUMN oi_description_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.oldimage ADD ( oi_description_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.oldimage ADD CONSTRAINT &mw_prefix.oldimage_fk3 FOREIGN KEY (oi_description_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.filearchive ADD COLUMN fa_deleted_reason_id NUMBER DEFAULT 0 NOT NULL;
-ALTER TABLE &mw_prefix.filearchive ADD COLUMN fa_description_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.filearchive ADD ( fa_deleted_reason_id NUMBER DEFAULT 0 NOT NULL );
+ALTER TABLE &mw_prefix.filearchive ADD ( fa_description_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.filearchive ADD CONSTRAINT &mw_prefix.filearchive_fk3 FOREIGN KEY (fa_deleted_reason_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE &mw_prefix.filearchive ADD CONSTRAINT &mw_prefix.filearchive_fk4 FOREIGN KEY (fa_description_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.recentchanges ADD COLUMN rc_comment_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.recentchanges ADD ( rc_comment_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.recentchanges ADD CONSTRAINT &mw_prefix.recentchanges_fk3 FOREIGN KEY (rc_comment_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.logging ADD COLUMN log_comment_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.logging ADD ( log_comment_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.logging ADD CONSTRAINT &mw_prefix.logging_fk2 FOREIGN KEY (log_comment_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE &mw_prefix.protected_titles ADD COLUMN pt_reason_id NUMBER DEFAULT 0 NOT NULL;
+ALTER TABLE &mw_prefix.protected_titles ADD ( pt_reason_id NUMBER DEFAULT 0 NOT NULL );
ALTER TABLE &mw_prefix.protected_titles ADD CONSTRAINT &mw_prefix.protected_titles_fk1 FOREIGN KEY (pt_reason_id) REFERENCES &mw_prefix."COMMENT"(comment_id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
ipb_user NUMBER DEFAULT 0 NOT NULL,
ipb_by NUMBER DEFAULT 0 NOT NULL,
ipb_by_text VARCHAR2(255) NULL,
- ipb_by_actor NUMBER DEFUALT 0 NOT NULL,
+ ipb_by_actor NUMBER DEFAULT 0 NOT NULL,
ipb_reason VARCHAR2(255) NULL,
ipb_reason_id NUMBER DEFAULT 0 NOT NULL,
ipb_timestamp TIMESTAMP(6) WITH TIME ZONE NOT NULL,
'raw' => true,
],
+ /* EasyDeflate */
+
+ 'easy-deflate.core' => [
+ 'scripts' => [ 'resources/lib/easy-deflate/easydeflate.js' ],
+ 'targets' => [ 'desktop', 'mobile' ],
+ ],
+
+ 'easy-deflate.deflate' => [
+ 'scripts' => [ 'resources/lib/easy-deflate/deflate.js' ],
+ 'dependencies' => [ 'easy-deflate.core' ],
+ 'targets' => [ 'desktop', 'mobile' ],
+ ],
+
+ 'easy-deflate.inflate' => [
+ 'scripts' => [ 'resources/lib/easy-deflate/inflate.js' ],
+ 'dependencies' => [ 'easy-deflate.core' ],
+ 'targets' => [ 'desktop', 'mobile' ],
+ ],
+
/* OOjs */
'oojs' => [
'scripts' => [
--- /dev/null
+Modified version of Easy-Deflate https://github.com/Jacob-Christian-Munch-Andersen/Easy-Deflate
+
+This version: https://github.com/edg2s/Easy-Deflate
+
+* Added semi-colons to easydeflate.js so it can be minified
+* Namespaced functions inside global EasyDeflate object
+* Base64 lib replaced with one with detailed license info
+
+Modifications by Ed Sanders, Public Domain.
+
+Easy-Deflate
+============
+
+Library for compressing and decompressing strings in JavaScript, feature full Unicode support and is compatible with most browsers.
+
+Use:
+====
+Copy the script inclusion from demo.html.<br>
+Call EasyDeflate.deflate(foo) in order to compress a string.<br>
+Call EasyDeflate.inflate(bar) in order to decompress a string compressed in this manner.<br>
+Both functions return a string, or null in case of illegal input.
+
+The compression works by first UTF-8 encoding the input, then compressing it to a raw deflate stream. The stream is then base64 encoded, and finally the identifier "rawdeflate," is prepended.
+
+Credits:
+========
+Gildas Lormeau made the JavaScript conversion of a Deflate utility: https://github.com/gildas-lormeau/zip.js<br>
+Jacob Christian Munch-Andersen made this package in order to make simple use easier and compatible with older browsers.
+
+The following shims are included:<br>
+es5-shim by Kristopher Michael Kowal https://github.com/kriskowal/es5-shim<br>
+JSON 3 by Kit Cambridge http://bestiejs.github.com/json3/<br>
+Typed arrays light shim by Jacob Christian Munch-Andersen https://github.com/Jacob-Christian-Munch-Andersen/Typed-arrays-light-shim<br>
+<s>base64 by Yaffle https://gist.github.com/1284012</s>
+
+License:
+========
+Main packages come with a BSD licence, the shims, except for base64 that include no license text, each has a permissive license.
--- /dev/null
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
+ * JZlib is based on zlib-1.1.3, so all credit should go authors
+ * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+ * and contributors of zlib.
+ */
+
+(function(obj) {
+
+ // Global
+
+ var MAX_BITS = 15;
+ var D_CODES = 30;
+ var BL_CODES = 19;
+
+ var LENGTH_CODES = 29;
+ var LITERALS = 256;
+ var L_CODES = (LITERALS + 1 + LENGTH_CODES);
+ var HEAP_SIZE = (2 * L_CODES + 1);
+
+ var END_BLOCK = 256;
+
+ // Bit length codes must not exceed MAX_BL_BITS bits
+ var MAX_BL_BITS = 7;
+
+ // repeat previous bit length 3-6 times (2 bits of repeat count)
+ var REP_3_6 = 16;
+
+ // repeat a zero length 3-10 times (3 bits of repeat count)
+ var REPZ_3_10 = 17;
+
+ // repeat a zero length 11-138 times (7 bits of repeat count)
+ var REPZ_11_138 = 18;
+
+ // The lengths of the bit length codes are sent in order of decreasing
+ // probability, to avoid transmitting the lengths for unused bit
+ // length codes.
+
+ var Buf_size = 8 * 2;
+
+ // JZlib version : "1.0.2"
+ var Z_DEFAULT_COMPRESSION = -1;
+
+ // compression strategy
+ var Z_FILTERED = 1;
+ var Z_HUFFMAN_ONLY = 2;
+ var Z_DEFAULT_STRATEGY = 0;
+
+ var Z_NO_FLUSH = 0;
+ var Z_PARTIAL_FLUSH = 1;
+ var Z_FULL_FLUSH = 3;
+ var Z_FINISH = 4;
+
+ var Z_OK = 0;
+ var Z_STREAM_END = 1;
+ var Z_NEED_DICT = 2;
+ var Z_STREAM_ERROR = -2;
+ var Z_DATA_ERROR = -3;
+ var Z_BUF_ERROR = -5;
+
+ // Tree
+
+ // see definition of array dist_code below
+ var _dist_code = [ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 ];
+
+ function Tree() {
+ var that = this;
+
+ // dyn_tree; // the dynamic tree
+ // max_code; // largest code with non zero frequency
+ // stat_desc; // the corresponding static tree
+
+ // Compute the optimal bit lengths for a tree and update the total bit
+ // length
+ // for the current block.
+ // IN assertion: the fields freq and dad are set, heap[heap_max] and
+ // above are the tree nodes sorted by increasing frequency.
+ // OUT assertions: the field len is set to the optimal bit length, the
+ // array bl_count contains the frequencies for each bit length.
+ // The length opt_len is updated; static_len is also updated if stree is
+ // not null.
+ function gen_bitlen(s) {
+ var tree = that.dyn_tree;
+ var stree = that.stat_desc.static_tree;
+ var extra = that.stat_desc.extra_bits;
+ var base = that.stat_desc.extra_base;
+ var max_length = that.stat_desc.max_length;
+ var h; // heap index
+ var n, m; // iterate over the tree elements
+ var bits; // bit length
+ var xbits; // extra bits
+ var f; // frequency
+ var overflow = 0; // number of elements with bit length too large
+
+ for (bits = 0; bits <= MAX_BITS; bits++)
+ s.bl_count[bits] = 0;
+
+ // In a first pass, compute the optimal bit lengths (which may
+ // overflow in the case of the bit length tree).
+ tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
+
+ for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {
+ n = s.heap[h];
+ bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
+ if (bits > max_length) {
+ bits = max_length;
+ overflow++;
+ }
+ tree[n * 2 + 1] = bits;
+ // We overwrite tree[n*2+1] which is no longer needed
+
+ if (n > that.max_code)
+ continue; // not a leaf node
+
+ s.bl_count[bits]++;
+ xbits = 0;
+ if (n >= base)
+ xbits = extra[n - base];
+ f = tree[n * 2];
+ s.opt_len += f * (bits + xbits);
+ if (stree)
+ s.static_len += f * (stree[n * 2 + 1] + xbits);
+ }
+ if (overflow === 0)
+ return;
+
+ // This happens for example on obj2 and pic of the Calgary corpus
+ // Find the first bit length which could increase:
+ do {
+ bits = max_length - 1;
+ while (s.bl_count[bits] === 0)
+ bits--;
+ s.bl_count[bits]--; // move one leaf down the tree
+ s.bl_count[bits + 1] += 2; // move one overflow item as its brother
+ s.bl_count[max_length]--;
+ // The brother of the overflow item also moves one step up,
+ // but this does not affect bl_count[max_length]
+ overflow -= 2;
+ } while (overflow > 0);
+
+ for (bits = max_length; bits !== 0; bits--) {
+ n = s.bl_count[bits];
+ while (n !== 0) {
+ m = s.heap[--h];
+ if (m > that.max_code)
+ continue;
+ if (tree[m * 2 + 1] != bits) {
+ s.opt_len += (bits - tree[m * 2 + 1]) * tree[m * 2];
+ tree[m * 2 + 1] = bits;
+ }
+ n--;
+ }
+ }
+ }
+
+ // Reverse the first len bits of a code, using straightforward code (a
+ // faster
+ // method would use a table)
+ // IN assertion: 1 <= len <= 15
+ function bi_reverse(code, // the value to invert
+ len // its bit length
+ ) {
+ var res = 0;
+ do {
+ res |= code & 1;
+ code >>>= 1;
+ res <<= 1;
+ } while (--len > 0);
+ return res >>> 1;
+ }
+
+ // Generate the codes for a given tree and bit counts (which need not be
+ // optimal).
+ // IN assertion: the array bl_count contains the bit length statistics for
+ // the given tree and the field len is set for all tree elements.
+ // OUT assertion: the field code is set for all tree elements of non
+ // zero code length.
+ function gen_codes(tree, // the tree to decorate
+ max_code, // largest code with non zero frequency
+ bl_count // number of codes at each bit length
+ ) {
+ var next_code = []; // next code value for each
+ // bit length
+ var code = 0; // running code value
+ var bits; // bit index
+ var n; // code index
+ var len;
+
+ // The distribution counts are first used to generate the code values
+ // without bit reversal.
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = ((code + bl_count[bits - 1]) << 1);
+ }
+
+ // Check that the bit counts in bl_count are consistent. The last code
+ // must be all ones.
+ // Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ // "inconsistent bit counts");
+ // Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ len = tree[n * 2 + 1];
+ if (len === 0)
+ continue;
+ // Now reverse the bits
+ tree[n * 2] = bi_reverse(next_code[len]++, len);
+ }
+ }
+
+ // Construct one Huffman tree and assigns the code bit strings and lengths.
+ // Update the total bit length for the current block.
+ // IN assertion: the field freq is set for all tree elements.
+ // OUT assertions: the fields len and code are set to the optimal bit length
+ // and corresponding code. The length opt_len is updated; static_len is
+ // also updated if stree is not null. The field max_code is set.
+ that.build_tree = function(s) {
+ var tree = that.dyn_tree;
+ var stree = that.stat_desc.static_tree;
+ var elems = that.stat_desc.elems;
+ var n, m; // iterate over heap elements
+ var max_code = -1; // largest code with non zero frequency
+ var node; // new node being created
+
+ // Construct the initial heap, with least frequent element in
+ // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ // heap[0] is not used.
+ s.heap_len = 0;
+ s.heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n * 2] !== 0) {
+ s.heap[++s.heap_len] = max_code = n;
+ s.depth[n] = 0;
+ } else {
+ tree[n * 2 + 1] = 0;
+ }
+ }
+
+ // The pkzip format requires that at least one distance code exists,
+ // and that at least one bit should be sent even if there is only one
+ // possible code. So to avoid special checks later on we force at least
+ // two codes of non zero frequency.
+ while (s.heap_len < 2) {
+ node = s.heap[++s.heap_len] = max_code < 2 ? ++max_code : 0;
+ tree[node * 2] = 1;
+ s.depth[node] = 0;
+ s.opt_len--;
+ if (stree)
+ s.static_len -= stree[node * 2 + 1];
+ // node is 0 or 1 so it does not have extra bits
+ }
+ that.max_code = max_code;
+
+ // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ // establish sub-heaps of increasing lengths:
+
+ for (n = Math.floor(s.heap_len / 2); n >= 1; n--)
+ s.pqdownheap(tree, n);
+
+ // Construct the Huffman tree by repeatedly combining the least two
+ // frequent nodes.
+
+ node = elems; // next internal node of the tree
+ do {
+ // n = node of least frequency
+ n = s.heap[1];
+ s.heap[1] = s.heap[s.heap_len--];
+ s.pqdownheap(tree, 1);
+ m = s.heap[1]; // m = node of next least frequency
+
+ s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
+ s.heap[--s.heap_max] = m;
+
+ // Create a new node father of n and m
+ tree[node * 2] = (tree[n * 2] + tree[m * 2]);
+ s.depth[node] = Math.max(s.depth[n], s.depth[m]) + 1;
+ tree[n * 2 + 1] = tree[m * 2 + 1] = node;
+
+ // and insert the new node in the heap
+ s.heap[1] = node++;
+ s.pqdownheap(tree, 1);
+ } while (s.heap_len >= 2);
+
+ s.heap[--s.heap_max] = s.heap[1];
+
+ // At this point, the fields freq and dad are set. We can now
+ // generate the bit lengths.
+
+ gen_bitlen(s);
+
+ // The field len is now set, we can generate the bit codes
+ gen_codes(tree, that.max_code, s.bl_count);
+ };
+
+ }
+
+ Tree._length_code = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 ];
+
+ Tree.base_length = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 ];
+
+ Tree.base_dist = [ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384,
+ 24576 ];
+
+ // Mapping from a distance to a distance code. dist is the distance - 1 and
+ // must not have side effects. _dist_code[256] and _dist_code[257] are never
+ // used.
+ Tree.d_code = function(dist) {
+ return ((dist) < 256 ? _dist_code[dist] : _dist_code[256 + ((dist) >>> 7)]);
+ };
+
+ // extra bits for each length code
+ Tree.extra_lbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 ];
+
+ // extra bits for each distance code
+ Tree.extra_dbits = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ];
+
+ // extra bits for each bit length code
+ Tree.extra_blbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7 ];
+
+ Tree.bl_order = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
+
+ // StaticTree
+
+ function StaticTree(static_tree, extra_bits, extra_base, elems, max_length) {
+ var that = this;
+ that.static_tree = static_tree;
+ that.extra_bits = extra_bits;
+ that.extra_base = extra_base;
+ that.elems = elems;
+ that.max_length = max_length;
+ }
+
+ StaticTree.static_ltree = [ 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8, 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8, 2, 8,
+ 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8, 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8, 10, 8, 138, 8, 74, 8, 202, 8, 42,
+ 8, 170, 8, 106, 8, 234, 8, 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8, 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
+ 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8, 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8, 30, 8, 158, 8, 94, 8,
+ 222, 8, 62, 8, 190, 8, 126, 8, 254, 8, 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8, 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113,
+ 8, 241, 8, 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8, 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8, 5, 8, 133, 8,
+ 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8, 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8, 13, 8, 141, 8, 77, 8, 205, 8, 45, 8,
+ 173, 8, 109, 8, 237, 8, 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8, 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
+ 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9, 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9, 43, 9, 299, 9, 171, 9,
+ 427, 9, 107, 9, 363, 9, 235, 9, 491, 9, 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9, 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379,
+ 9, 251, 9, 507, 9, 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9, 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9, 23,
+ 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9, 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9, 15, 9, 271, 9, 143, 9,
+ 399, 9, 79, 9, 335, 9, 207, 9, 463, 9, 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9, 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9,
+ 223, 9, 479, 9, 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9, 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7, 8, 7, 72, 7,
+ 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7, 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7, 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8,
+ 99, 8, 227, 8 ];
+
+ StaticTree.static_dtree = [ 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, 1, 5, 17, 5, 9, 5,
+ 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 ];
+
+ StaticTree.static_l_desc = new StaticTree(StaticTree.static_ltree, Tree.extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);
+
+ StaticTree.static_d_desc = new StaticTree(StaticTree.static_dtree, Tree.extra_dbits, 0, D_CODES, MAX_BITS);
+
+ StaticTree.static_bl_desc = new StaticTree(null, Tree.extra_blbits, 0, BL_CODES, MAX_BL_BITS);
+
+ // Deflate
+
+ var MAX_MEM_LEVEL = 9;
+ var DEF_MEM_LEVEL = 8;
+
+ function Config(good_length, max_lazy, nice_length, max_chain, func) {
+ var that = this;
+ that.good_length = good_length;
+ that.max_lazy = max_lazy;
+ that.nice_length = nice_length;
+ that.max_chain = max_chain;
+ that.func = func;
+ }
+
+ var STORED = 0;
+ var FAST = 1;
+ var SLOW = 2;
+ var config_table = [ new Config(0, 0, 0, 0, STORED), new Config(4, 4, 8, 4, FAST), new Config(4, 5, 16, 8, FAST), new Config(4, 6, 32, 32, FAST),
+ new Config(4, 4, 16, 16, SLOW), new Config(8, 16, 32, 32, SLOW), new Config(8, 16, 128, 128, SLOW), new Config(8, 32, 128, 256, SLOW),
+ new Config(32, 128, 258, 1024, SLOW), new Config(32, 258, 258, 4096, SLOW) ];
+
+ var z_errmsg = [ "need dictionary", // Z_NEED_DICT
+ // 2
+ "stream end", // Z_STREAM_END 1
+ "", // Z_OK 0
+ "", // Z_ERRNO (-1)
+ "stream error", // Z_STREAM_ERROR (-2)
+ "data error", // Z_DATA_ERROR (-3)
+ "", // Z_MEM_ERROR (-4)
+ "buffer error", // Z_BUF_ERROR (-5)
+ "",// Z_VERSION_ERROR (-6)
+ "" ];
+
+ // block not completed, need more input or more output
+ var NeedMore = 0;
+
+ // block flush performed
+ var BlockDone = 1;
+
+ // finish started, need only more output at next deflate
+ var FinishStarted = 2;
+
+ // finish done, accept no more input or output
+ var FinishDone = 3;
+
+ // preset dictionary flag in zlib header
+ var PRESET_DICT = 0x20;
+
+ var INIT_STATE = 42;
+ var BUSY_STATE = 113;
+ var FINISH_STATE = 666;
+
+ // The deflate compression method
+ var Z_DEFLATED = 8;
+
+ var STORED_BLOCK = 0;
+ var STATIC_TREES = 1;
+ var DYN_TREES = 2;
+
+ var MIN_MATCH = 3;
+ var MAX_MATCH = 258;
+ var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
+
+ function smaller(tree, n, m, depth) {
+ var tn2 = tree[n * 2];
+ var tm2 = tree[m * 2];
+ return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
+ }
+
+ function Deflate() {
+
+ var that = this;
+ var strm; // pointer back to this zlib stream
+ var status; // as the name implies
+ // pending_buf; // output still pending
+ var pending_buf_size; // size of pending_buf
+ // pending_out; // next pending byte to output to the stream
+ // pending; // nb of bytes in the pending buffer
+ var method; // STORED (for zip only) or DEFLATED
+ var last_flush; // value of flush param for previous deflate call
+
+ var w_size; // LZ77 window size (32K by default)
+ var w_bits; // log2(w_size) (8..16)
+ var w_mask; // w_size - 1
+
+ var window;
+ // Sliding window. Input bytes are read into the second half of the window,
+ // and move to the first half later to keep a dictionary of at least wSize
+ // bytes. With this organization, matches are limited to a distance of
+ // wSize-MAX_MATCH bytes, but this ensures that IO is always
+ // performed with a length multiple of the block size. Also, it limits
+ // the window size to 64K, which is quite useful on MSDOS.
+ // To do: use the user input buffer as sliding window.
+
+ var window_size;
+ // Actual size of window: 2*wSize, except when the user input buffer
+ // is directly used as sliding window.
+
+ var prev;
+ // Link to older string with same hash index. To limit the size of this
+ // array to 64K, this link is maintained only for the last 32K strings.
+ // An index in this array is thus a window index modulo 32K.
+
+ var head; // Heads of the hash chains or NIL.
+
+ var ins_h; // hash index of string to be inserted
+ var hash_size; // number of elements in hash table
+ var hash_bits; // log2(hash_size)
+ var hash_mask; // hash_size-1
+
+ // Number of bits by which ins_h must be shifted at each input
+ // step. It must be such that after MIN_MATCH steps, the oldest
+ // byte no longer takes part in the hash key, that is:
+ // hash_shift * MIN_MATCH >= hash_bits
+ var hash_shift;
+
+ // Window position at the beginning of the current output block. Gets
+ // negative when the window is moved backwards.
+
+ var block_start;
+
+ var match_length; // length of best match
+ var prev_match; // previous match
+ var match_available; // set if previous match exists
+ var strstart; // start of string to insert
+ var match_start; // start of matching string
+ var lookahead; // number of valid bytes ahead in window
+
+ // Length of the best match at previous step. Matches not greater than this
+ // are discarded. This is used in the lazy match evaluation.
+ var prev_length;
+
+ // To speed up deflation, hash chains are never searched beyond this
+ // length. A higher limit improves compression ratio but degrades the speed.
+ var max_chain_length;
+
+ // Attempt to find a better match only when the current match is strictly
+ // smaller than this value. This mechanism is used only for compression
+ // levels >= 4.
+ var max_lazy_match;
+
+ // Insert new strings in the hash table only if the match length is not
+ // greater than this length. This saves time but degrades compression.
+ // max_insert_length is used only for compression levels <= 3.
+
+ var level; // compression level (1..9)
+ var strategy; // favor or force Huffman coding
+
+ // Use a faster search when the previous match is longer than this
+ var good_match;
+
+ // Stop searching when current match exceeds this
+ var nice_match;
+
+ var dyn_ltree; // literal and length tree
+ var dyn_dtree; // distance tree
+ var bl_tree; // Huffman tree for bit lengths
+
+ var l_desc = new Tree(); // desc for literal tree
+ var d_desc = new Tree(); // desc for distance tree
+ var bl_desc = new Tree(); // desc for bit length tree
+
+ // that.heap_len; // number of elements in the heap
+ // that.heap_max; // element of largest frequency
+ // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ // The same heap array is used to build all trees.
+
+ // Depth of each subtree used as tie breaker for trees of equal frequency
+ that.depth = [];
+
+ var l_buf; // index for literals or lengths */
+
+ // Size of match buffer for literals/lengths. There are 4 reasons for
+ // limiting lit_bufsize to 64K:
+ // - frequencies can be kept in 16 bit counters
+ // - if compression is not successful for the first block, all input
+ // data is still in the window so we can still emit a stored block even
+ // when input comes from standard input. (This can also be done for
+ // all blocks if lit_bufsize is not greater than 32K.)
+ // - if compression is not successful for a file smaller than 64K, we can
+ // even emit a stored file instead of a stored block (saving 5 bytes).
+ // This is applicable only for zip (not gzip or zlib).
+ // - creating new Huffman trees less frequently may not provide fast
+ // adaptation to changes in the input data statistics. (Take for
+ // example a binary file with poorly compressible code followed by
+ // a highly compressible string table.) Smaller buffer sizes give
+ // fast adaptation but have of course the overhead of transmitting
+ // trees more frequently.
+ // - I can't count above 4
+ var lit_bufsize;
+
+ var last_lit; // running index in l_buf
+
+ // Buffer for distances. To simplify the code, d_buf and l_buf have
+ // the same number of elements. To use different lengths, an extra flag
+ // array would be necessary.
+
+ var d_buf; // index of pendig_buf
+
+ // that.opt_len; // bit length of current block with optimal trees
+ // that.static_len; // bit length of current block with static trees
+ var matches; // number of string matches in current block
+ var last_eob_len; // bit length of EOB code for last block
+
+ // Output buffer. bits are inserted starting at the bottom (least
+ // significant bits).
+ var bi_buf;
+
+ // Number of valid bits in bi_buf. All bits above the last valid bit
+ // are always zero.
+ var bi_valid;
+
+ // number of codes at each bit length for an optimal tree
+ that.bl_count = [];
+
+ // heap used to build the Huffman trees
+ that.heap = [];
+
+ dyn_ltree = [];
+ dyn_dtree = [];
+ bl_tree = [];
+
+ function lm_init() {
+ var i;
+ window_size = 2 * w_size;
+
+ head[hash_size - 1] = 0;
+ for (i = 0; i < hash_size - 1; i++) {
+ head[i] = 0;
+ }
+
+ // Set the default configuration parameters:
+ max_lazy_match = config_table[level].max_lazy;
+ good_match = config_table[level].good_length;
+ nice_match = config_table[level].nice_length;
+ max_chain_length = config_table[level].max_chain;
+
+ strstart = 0;
+ block_start = 0;
+ lookahead = 0;
+ match_length = prev_length = MIN_MATCH - 1;
+ match_available = 0;
+ ins_h = 0;
+ }
+
+ function init_block() {
+ var i;
+ // Initialize the trees.
+ for (i = 0; i < L_CODES; i++)
+ dyn_ltree[i * 2] = 0;
+ for (i = 0; i < D_CODES; i++)
+ dyn_dtree[i * 2] = 0;
+ for (i = 0; i < BL_CODES; i++)
+ bl_tree[i * 2] = 0;
+
+ dyn_ltree[END_BLOCK * 2] = 1;
+ that.opt_len = that.static_len = 0;
+ last_lit = matches = 0;
+ }
+
+ // Initialize the tree data structures for a new zlib stream.
+ function tr_init() {
+
+ l_desc.dyn_tree = dyn_ltree;
+ l_desc.stat_desc = StaticTree.static_l_desc;
+
+ d_desc.dyn_tree = dyn_dtree;
+ d_desc.stat_desc = StaticTree.static_d_desc;
+
+ bl_desc.dyn_tree = bl_tree;
+ bl_desc.stat_desc = StaticTree.static_bl_desc;
+
+ bi_buf = 0;
+ bi_valid = 0;
+ last_eob_len = 8; // enough lookahead for inflate
+
+ // Initialize the first block of the first file:
+ init_block();
+ }
+
+ // Restore the heap property by moving down the tree starting at node k,
+ // exchanging a node with the smallest of its two sons if necessary,
+ // stopping
+ // when the heap property is re-established (each father smaller than its
+ // two sons).
+ that.pqdownheap = function(tree, // the tree to restore
+ k // node to move down
+ ) {
+ var heap = that.heap;
+ var v = heap[k];
+ var j = k << 1; // left son of k
+ while (j <= that.heap_len) {
+ // Set j to the smallest of the two sons:
+ if (j < that.heap_len && smaller(tree, heap[j + 1], heap[j], that.depth)) {
+ j++;
+ }
+ // Exit if v is smaller than both sons
+ if (smaller(tree, v, heap[j], that.depth))
+ break;
+
+ // Exchange v with the smallest son
+ heap[k] = heap[j];
+ k = j;
+ // And continue down the tree, setting j to the left son of k
+ j <<= 1;
+ }
+ heap[k] = v;
+ };
+
+ // Scan a literal or distance tree to determine the frequencies of the codes
+ // in the bit length tree.
+ function scan_tree(tree,// the tree to be scanned
+ max_code // and its largest code of non zero frequency
+ ) {
+ var n; // iterates over all tree elements
+ var prevlen = -1; // last emitted length
+ var curlen; // length of current code
+ var nextlen = tree[0 * 2 + 1]; // length of next code
+ var count = 0; // repeat count of the current code
+ var max_count = 7; // max repeat count
+ var min_count = 4; // min repeat count
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+ tree[(max_code + 1) * 2 + 1] = 0xffff; // guard
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ bl_tree[curlen * 2] += count;
+ } else if (curlen !== 0) {
+ if (curlen != prevlen)
+ bl_tree[curlen * 2]++;
+ bl_tree[REP_3_6 * 2]++;
+ } else if (count <= 10) {
+ bl_tree[REPZ_3_10 * 2]++;
+ } else {
+ bl_tree[REPZ_11_138 * 2]++;
+ }
+ count = 0;
+ prevlen = curlen;
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6;
+ min_count = 3;
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+ }
+
+ // Construct the Huffman tree for the bit lengths and return the index in
+ // bl_order of the last bit length code to send.
+ function build_bl_tree() {
+ var max_blindex; // index of last bit length code of non zero freq
+
+ // Determine the bit length frequencies for literal and distance trees
+ scan_tree(dyn_ltree, l_desc.max_code);
+ scan_tree(dyn_dtree, d_desc.max_code);
+
+ // Build the bit length tree:
+ bl_desc.build_tree(that);
+ // opt_len now includes the length of the tree representations, except
+ // the lengths of the bit lengths codes and the 5+5+4 bits for the
+ // counts.
+
+ // Determine the number of bit length codes to send. The pkzip format
+ // requires that at least 4 bit length codes be sent. (appnote.txt says
+ // 3 but the actual value used is 4.)
+ for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {
+ if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] !== 0)
+ break;
+ }
+ // Update opt_len to include the bit length tree and counts
+ that.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
+
+ return max_blindex;
+ }
+
+ // Output a byte on the stream.
+ // IN assertion: there is enough room in pending_buf.
+ function put_byte(p) {
+ that.pending_buf[that.pending++] = p;
+ }
+
+ function put_short(w) {
+ put_byte(w & 0xff);
+ put_byte((w >>> 8) & 0xff);
+ }
+
+ function putShortMSB(b) {
+ put_byte((b >> 8) & 0xff);
+ put_byte((b & 0xff) & 0xff);
+ }
+
+ function send_bits(value, length) {
+ var val, len = length;
+ if (bi_valid > Buf_size - len) {
+ val = value;
+ // bi_buf |= (val << bi_valid);
+ bi_buf |= ((val << bi_valid) & 0xffff);
+ put_short(bi_buf);
+ bi_buf = val >>> (Buf_size - bi_valid);
+ bi_valid += len - Buf_size;
+ } else {
+ // bi_buf |= (value) << bi_valid;
+ bi_buf |= (((value) << bi_valid) & 0xffff);
+ bi_valid += len;
+ }
+ }
+
+ function send_code(c, tree) {
+ var c2 = c * 2;
+ send_bits(tree[c2] & 0xffff, tree[c2 + 1] & 0xffff);
+ }
+
+ // Send a literal or distance tree in compressed form, using the codes in
+ // bl_tree.
+ function send_tree(tree,// the tree to be sent
+ max_code // and its largest code of non zero frequency
+ ) {
+ var n; // iterates over all tree elements
+ var prevlen = -1; // last emitted length
+ var curlen; // length of current code
+ var nextlen = tree[0 * 2 + 1]; // length of next code
+ var count = 0; // repeat count of the current code
+ var max_count = 7; // max repeat count
+ var min_count = 4; // min repeat count
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do {
+ send_code(curlen, bl_tree);
+ } while (--count !== 0);
+ } else if (curlen !== 0) {
+ if (curlen != prevlen) {
+ send_code(curlen, bl_tree);
+ count--;
+ }
+ send_code(REP_3_6, bl_tree);
+ send_bits(count - 3, 2);
+ } else if (count <= 10) {
+ send_code(REPZ_3_10, bl_tree);
+ send_bits(count - 3, 3);
+ } else {
+ send_code(REPZ_11_138, bl_tree);
+ send_bits(count - 11, 7);
+ }
+ count = 0;
+ prevlen = curlen;
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6;
+ min_count = 3;
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+ }
+
+ // Send the header for a block using dynamic Huffman trees: the counts, the
+ // lengths of the bit length codes, the literal tree and the distance tree.
+ // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ function send_all_trees(lcodes, dcodes, blcodes) {
+ var rank; // index in bl_order
+
+ send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
+ send_bits(dcodes - 1, 5);
+ send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
+ for (rank = 0; rank < blcodes; rank++) {
+ send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
+ }
+ send_tree(dyn_ltree, lcodes - 1); // literal tree
+ send_tree(dyn_dtree, dcodes - 1); // distance tree
+ }
+
+ // Flush the bit buffer, keeping at most 7 bits in it.
+ function bi_flush() {
+ if (bi_valid == 16) {
+ put_short(bi_buf);
+ bi_buf = 0;
+ bi_valid = 0;
+ } else if (bi_valid >= 8) {
+ put_byte(bi_buf & 0xff);
+ bi_buf >>>= 8;
+ bi_valid -= 8;
+ }
+ }
+
+ // Send one empty static block to give enough lookahead for inflate.
+ // This takes 10 bits, of which 7 may remain in the bit buffer.
+ // The current inflate code requires 9 bits of lookahead. If the
+ // last two codes for the previous block (real code plus EOB) were coded
+ // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ // the last real code. In this case we send two empty static blocks instead
+ // of one. (There are no problems if the previous block is stored or fixed.)
+ // To simplify the code, we assume the worst case of last real code encoded
+ // on one bit only.
+ function _tr_align() {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.static_ltree);
+
+ bi_flush();
+
+ // Of the 10 bits for the empty block, we have already sent
+ // (10 - bi_valid) bits. The lookahead for the last real code (before
+ // the EOB of the previous block) was thus at least one plus the length
+ // of the EOB plus what we have just sent of the empty static block.
+ if (1 + last_eob_len + 10 - bi_valid < 9) {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.static_ltree);
+ bi_flush();
+ }
+ last_eob_len = 7;
+ }
+
+ // Save the match info and tally the frequency counts. Return true if
+ // the current block must be flushed.
+ function _tr_tally(dist, // distance of matched string
+ lc // match length-MIN_MATCH or unmatched char (if dist==0)
+ ) {
+ var out_length, in_length, dcode;
+ that.pending_buf[d_buf + last_lit * 2] = (dist >>> 8) & 0xff;
+ that.pending_buf[d_buf + last_lit * 2 + 1] = dist & 0xff;
+
+ that.pending_buf[l_buf + last_lit] = lc & 0xff;
+ last_lit++;
+
+ if (dist === 0) {
+ // lc is the unmatched char
+ dyn_ltree[lc * 2]++;
+ } else {
+ matches++;
+ // Here, lc is the match length - MIN_MATCH
+ dist--; // dist = match distance - 1
+ dyn_ltree[(Tree._length_code[lc] + LITERALS + 1) * 2]++;
+ dyn_dtree[Tree.d_code(dist) * 2]++;
+ }
+
+ if ((last_lit & 0x1fff) === 0 && level > 2) {
+ // Compute an upper bound for the compressed length
+ out_length = last_lit * 8;
+ in_length = strstart - block_start;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += dyn_dtree[dcode * 2] * (5 + Tree.extra_dbits[dcode]);
+ }
+ out_length >>>= 3;
+ if ((matches < Math.floor(last_lit / 2)) && out_length < Math.floor(in_length / 2))
+ return true;
+ }
+
+ return (last_lit == lit_bufsize - 1);
+ // We avoid equality with lit_bufsize because of wraparound at 64K
+ // on 16 bit machines and because stored blocks are restricted to
+ // 64K-1 bytes.
+ }
+
+ // Send the block data compressed using the given Huffman trees
+ function compress_block(ltree, dtree) {
+ var dist; // distance of matched string
+ var lc; // match length or unmatched char (if dist === 0)
+ var lx = 0; // running index in l_buf
+ var code; // the code to send
+ var extra; // number of extra bits to send
+
+ if (last_lit !== 0) {
+ do {
+ dist = ((that.pending_buf[d_buf + lx * 2] << 8) & 0xff00) | (that.pending_buf[d_buf + lx * 2 + 1] & 0xff);
+ lc = (that.pending_buf[l_buf + lx]) & 0xff;
+ lx++;
+
+ if (dist === 0) {
+ send_code(lc, ltree); // send a literal byte
+ } else {
+ // Here, lc is the match length - MIN_MATCH
+ code = Tree._length_code[lc];
+
+ send_code(code + LITERALS + 1, ltree); // send the length
+ // code
+ extra = Tree.extra_lbits[code];
+ if (extra !== 0) {
+ lc -= Tree.base_length[code];
+ send_bits(lc, extra); // send the extra length bits
+ }
+ dist--; // dist is now the match distance - 1
+ code = Tree.d_code(dist);
+
+ send_code(code, dtree); // send the distance code
+ extra = Tree.extra_dbits[code];
+ if (extra !== 0) {
+ dist -= Tree.base_dist[code];
+ send_bits(dist, extra); // send the extra distance bits
+ }
+ } // literal or match pair ?
+
+ // Check that the overlay between pending_buf and d_buf+l_buf is
+ // ok:
+ } while (lx < last_lit);
+ }
+
+ send_code(END_BLOCK, ltree);
+ last_eob_len = ltree[END_BLOCK * 2 + 1];
+ }
+
+ // Flush the bit buffer and align the output on a byte boundary
+ function bi_windup() {
+ if (bi_valid > 8) {
+ put_short(bi_buf);
+ } else if (bi_valid > 0) {
+ put_byte(bi_buf & 0xff);
+ }
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+
+ // Copy a stored block, storing first the length and its
+ // one's complement if requested.
+ function copy_block(buf, // the input data
+ len, // its length
+ header // true if block header must be written
+ ) {
+ bi_windup(); // align on byte boundary
+ last_eob_len = 8; // enough lookahead for inflate
+
+ if (header) {
+ put_short(len);
+ put_short(~len);
+ }
+
+ that.pending_buf.set(window.subarray(buf, buf + len), that.pending);
+ that.pending += len;
+ }
+
+ // Send a stored block
+ function _tr_stored_block(buf, // input block
+ stored_len, // length of input block
+ eof // true if this is the last block for a file
+ ) {
+ send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
+ copy_block(buf, stored_len, true); // with header
+ }
+
+ // Determine the best encoding for the current block: dynamic trees, static
+ // trees or store, and output the encoded block to the zip file.
+ function _tr_flush_block(buf, // input block, or NULL if too old
+ stored_len, // length of input block
+ eof // true if this is the last block for a file
+ ) {
+ var opt_lenb, static_lenb;// opt_len and static_len in bytes
+ var max_blindex = 0; // index of last bit length code of non zero freq
+
+ // Build the Huffman trees unless a stored block is forced
+ if (level > 0) {
+ // Construct the literal and distance trees
+ l_desc.build_tree(that);
+
+ d_desc.build_tree(that);
+
+ // At this point, opt_len and static_len are the total bit lengths
+ // of
+ // the compressed block data, excluding the tree representations.
+
+ // Build the bit length tree for the above two trees, and get the
+ // index
+ // in bl_order of the last bit length code to send.
+ max_blindex = build_bl_tree();
+
+ // Determine the best encoding. Compute first the block length in
+ // bytes
+ opt_lenb = (that.opt_len + 3 + 7) >>> 3;
+ static_lenb = (that.static_len + 3 + 7) >>> 3;
+
+ if (static_lenb <= opt_lenb)
+ opt_lenb = static_lenb;
+ } else {
+ opt_lenb = static_lenb = stored_len + 5; // force a stored block
+ }
+
+ if ((stored_len + 4 <= opt_lenb) && buf != -1) {
+ // 4: two words for the lengths
+ // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ // Otherwise we can't have processed more than WSIZE input bytes
+ // since
+ // the last block flush, because compression would have been
+ // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ // transform a block into a stored block.
+ _tr_stored_block(buf, stored_len, eof);
+ } else if (static_lenb == opt_lenb) {
+ send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
+ compress_block(StaticTree.static_ltree, StaticTree.static_dtree);
+ } else {
+ send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
+ send_all_trees(l_desc.max_code + 1, d_desc.max_code + 1, max_blindex + 1);
+ compress_block(dyn_ltree, dyn_dtree);
+ }
+
+ // The above check is made mod 2^32, for files larger than 512 MB
+ // and uLong implemented on 32 bits.
+
+ init_block();
+
+ if (eof) {
+ bi_windup();
+ }
+ }
+
+ function flush_block_only(eof) {
+ _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
+ block_start = strstart;
+ strm.flush_pending();
+ }
+
+ // Fill the window when the lookahead becomes insufficient.
+ // Updates strstart and lookahead.
+ //
+ // IN assertion: lookahead < MIN_LOOKAHEAD
+ // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ // At least one byte has been read, or avail_in === 0; reads are
+ // performed for at least two bytes (required for the zip translate_eol
+ // option -- not supported here).
+ function fill_window() {
+ var n, m;
+ var p;
+ var more; // Amount of free space at the end of the window.
+
+ do {
+ more = (window_size - lookahead - strstart);
+
+ // Deal with !@#$% 64K limit:
+ if (more === 0 && strstart === 0 && lookahead === 0) {
+ more = w_size;
+ } else if (more == -1) {
+ // Very unlikely, but possible on 16 bit machine if strstart ==
+ // 0
+ // and lookahead == 1 (input done one byte at time)
+ more--;
+
+ // If the window is almost full and there is insufficient
+ // lookahead,
+ // move the upper half to the lower one to make room in the
+ // upper half.
+ } else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) {
+ window.set(window.subarray(w_size, w_size + w_size), 0);
+
+ match_start -= w_size;
+ strstart -= w_size; // we now have strstart >= MAX_DIST
+ block_start -= w_size;
+
+ // Slide the hash table (could be avoided with 32 bit values
+ // at the expense of memory usage). We slide even when level ==
+ // 0
+ // to keep the hash table consistent if we switch back to level
+ // > 0
+ // later. (Using level 0 permanently is not an optimal usage of
+ // zlib, so we don't care about this pathological case.)
+
+ n = hash_size;
+ p = n;
+ do {
+ m = (head[--p] & 0xffff);
+ head[p] = (m >= w_size ? m - w_size : 0);
+ } while (--n !== 0);
+
+ n = w_size;
+ p = n;
+ do {
+ m = (prev[--p] & 0xffff);
+ prev[p] = (m >= w_size ? m - w_size : 0);
+ // If n is not on any hash chain, prev[n] is garbage but
+ // its value will never be used.
+ } while (--n !== 0);
+ more += w_size;
+ }
+
+ if (strm.avail_in === 0)
+ return;
+
+ // If there was no sliding:
+ // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ // more == window_size - lookahead - strstart
+ // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ // => more >= window_size - 2*WSIZE + 2
+ // In the BIG_MEM or MMAP case (not yet supported),
+ // window_size == input_size + MIN_LOOKAHEAD &&
+ // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ // Otherwise, window_size == 2*WSIZE so more >= 2.
+ // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+
+ n = strm.read_buf(window, strstart + lookahead, more);
+ lookahead += n;
+
+ // Initialize the hash value now that we have some input:
+ if (lookahead >= MIN_MATCH) {
+ ins_h = window[strstart] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ }
+ // If the whole input has less than MIN_MATCH bytes, ins_h is
+ // garbage,
+ // but this is not important since only literal bytes will be
+ // emitted.
+ } while (lookahead < MIN_LOOKAHEAD && strm.avail_in !== 0);
+ }
+
+ // Copy without compression as much as possible from the input stream,
+ // return
+ // the current block state.
+ // This function does not insert new strings in the dictionary since
+ // uncompressible data is probably not useful. This function is used
+ // only for the level=0 compression option.
+ // NOTE: this function should be optimized to avoid extra copying from
+ // window to pending_buf.
+ function deflate_stored(flush) {
+ // Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ // to pending_buf_size, and each stored block has a 5 byte header:
+
+ var max_block_size = 0xffff;
+ var max_start;
+
+ if (max_block_size > pending_buf_size - 5) {
+ max_block_size = pending_buf_size - 5;
+ }
+
+ // Copy as much as possible from input to output:
+ while (true) {
+ // Fill the window as much as possible:
+ if (lookahead <= 1) {
+ fill_window();
+ if (lookahead === 0 && flush == Z_NO_FLUSH)
+ return NeedMore;
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ strstart += lookahead;
+ lookahead = 0;
+
+ // Emit a stored block if pending_buf will be full:
+ max_start = block_start + max_block_size;
+ if (strstart === 0 || strstart >= max_start) {
+ // strstart === 0 is possible when wraparound on 16-bit machine
+ lookahead = (strstart - max_start);
+ strstart = max_start;
+
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+
+ }
+
+ // Flush if we may have to slide, otherwise block_start may become
+ // negative and the data will be gone:
+ if (strstart - block_start >= w_size - MIN_LOOKAHEAD) {
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ }
+
+ flush_block_only(flush == Z_FINISH);
+ if (strm.avail_out === 0)
+ return (flush == Z_FINISH) ? FinishStarted : NeedMore;
+
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ function longest_match(cur_match) {
+ var chain_length = max_chain_length; // max hash chain length
+ var scan = strstart; // current string
+ var match; // matched string
+ var len; // length of current match
+ var best_len = prev_length; // best match length so far
+ var limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
+ var _nice_match = nice_match;
+
+ // Stop when cur_match becomes <= limit. To simplify the code,
+ // we prevent matches with the string of window index 0.
+
+ var wmask = w_mask;
+
+ var strend = strstart + MAX_MATCH;
+ var scan_end1 = window[scan + best_len - 1];
+ var scan_end = window[scan + best_len];
+
+ // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of
+ // 16.
+ // It is easy to get rid of this optimization if necessary.
+
+ // Do not waste too much time if we already have a good match:
+ if (prev_length >= good_match) {
+ chain_length >>= 2;
+ }
+
+ // Do not look for matches beyond the end of the input. This is
+ // necessary
+ // to make deflate deterministic.
+ if (_nice_match > lookahead)
+ _nice_match = lookahead;
+
+ do {
+ match = cur_match;
+
+ // Skip to next match if the match length cannot increase
+ // or if the match length is less than 2:
+ if (window[match + best_len] != scan_end || window[match + best_len - 1] != scan_end1 || window[match] != window[scan]
+ || window[++match] != window[scan + 1])
+ continue;
+
+ // The check at best_len-1 can be removed because it will be made
+ // again later. (This heuristic is not always a win.)
+ // It is not necessary to compare scan[2] and match[2] since they
+ // are always equal when the other bytes match, given that
+ // the hash keys are equal and that HASH_BITS >= 8.
+ scan += 2;
+ match++;
+
+ // We check for insufficient lookahead only every 8th comparison;
+ // the 256th check will be made at strstart+258.
+ do {
+ } while (window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
+ && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
+ && window[++scan] == window[++match] && window[++scan] == window[++match] && scan < strend);
+
+ len = MAX_MATCH - (strend - scan);
+ scan = strend - MAX_MATCH;
+
+ if (len > best_len) {
+ match_start = cur_match;
+ best_len = len;
+ if (len >= _nice_match)
+ break;
+ scan_end1 = window[scan + best_len - 1];
+ scan_end = window[scan + best_len];
+ }
+
+ } while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length !== 0);
+
+ if (best_len <= lookahead)
+ return best_len;
+ return lookahead;
+ }
+
+ // Compress as much as possible from the input stream, return the current
+ // block state.
+ // This function does not perform lazy evaluation of matches and inserts
+ // new strings in the dictionary only for unmatched strings or for short
+ // matches. It is used only for the fast compression options.
+ function deflate_fast(flush) {
+ // short hash_head = 0; // head of the hash chain
+ var hash_head = 0; // head of the hash chain
+ var bflush; // set if current block must be flushed
+
+ while (true) {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+ if (lookahead < MIN_LOOKAHEAD) {
+ fill_window();
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return NeedMore;
+ }
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+ if (lookahead >= MIN_MATCH) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ // At this point we have always match_length < MIN_MATCH
+
+ if (hash_head !== 0 && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+ if (strategy != Z_HUFFMAN_ONLY) {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+ }
+ if (match_length >= MIN_MATCH) {
+ // check_match(strstart, match_start, match_length);
+
+ bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
+
+ lookahead -= match_length;
+
+ // Insert new strings in the hash table only if the match length
+ // is not too large. This saves time but degrades compression.
+ if (match_length <= max_lazy_match && lookahead >= MIN_MATCH) {
+ match_length--; // string at strstart already in hash table
+ do {
+ strstart++;
+
+ ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+
+ // strstart never exceeds WSIZE-MAX_MATCH, so there are
+ // always MIN_MATCH bytes ahead.
+ } while (--match_length !== 0);
+ strstart++;
+ } else {
+ strstart += match_length;
+ match_length = 0;
+ ins_h = window[strstart] & 0xff;
+
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ // If lookahead < MIN_MATCH, ins_h is garbage, but it does
+ // not
+ // matter since it will be recomputed at next deflate call.
+ }
+ } else {
+ // No match, output a literal byte
+
+ bflush = _tr_tally(0, window[strstart] & 0xff);
+ lookahead--;
+ strstart++;
+ }
+ if (bflush) {
+
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ }
+
+ flush_block_only(flush == Z_FINISH);
+ if (strm.avail_out === 0) {
+ if (flush == Z_FINISH)
+ return FinishStarted;
+ else
+ return NeedMore;
+ }
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ // Same as above, but achieves better compression. We use a lazy
+ // evaluation for matches: a match is finally adopted only if there is
+ // no better match at the next window position.
+ function deflate_slow(flush) {
+ // short hash_head = 0; // head of hash chain
+ var hash_head = 0; // head of hash chain
+ var bflush; // set if current block must be flushed
+ var max_insert;
+
+ // Process the input block.
+ while (true) {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+
+ if (lookahead < MIN_LOOKAHEAD) {
+ fill_window();
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return NeedMore;
+ }
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+
+ if (lookahead >= MIN_MATCH) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ prev_length = match_length;
+ prev_match = match_start;
+ match_length = MIN_MATCH - 1;
+
+ if (hash_head !== 0 && prev_length < max_lazy_match && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+
+ if (strategy != Z_HUFFMAN_ONLY) {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+
+ if (match_length <= 5 && (strategy == Z_FILTERED || (match_length == MIN_MATCH && strstart - match_start > 4096))) {
+
+ // If prev_match is also MIN_MATCH, match_start is garbage
+ // but we will ignore the current match anyway.
+ match_length = MIN_MATCH - 1;
+ }
+ }
+
+ // If there was a match at the previous step and the current
+ // match is not better, output the previous match:
+ if (prev_length >= MIN_MATCH && match_length <= prev_length) {
+ max_insert = strstart + lookahead - MIN_MATCH;
+ // Do not insert strings in hash table beyond this.
+
+ // check_match(strstart-1, prev_match, prev_length);
+
+ bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
+
+ // Insert in hash table all strings up to the end of the match.
+ // strstart-1 and strstart are already inserted. If there is not
+ // enough lookahead, the last two strings are not inserted in
+ // the hash table.
+ lookahead -= prev_length - 1;
+ prev_length -= 2;
+ do {
+ if (++strstart <= max_insert) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+ } while (--prev_length !== 0);
+ match_available = 0;
+ match_length = MIN_MATCH - 1;
+ strstart++;
+
+ if (bflush) {
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ } else if (match_available !== 0) {
+
+ // If there was no match at the previous position, output a
+ // single literal. If there was a match but the current match
+ // is longer, truncate the previous match to a single literal.
+
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+
+ if (bflush) {
+ flush_block_only(false);
+ }
+ strstart++;
+ lookahead--;
+ if (strm.avail_out === 0)
+ return NeedMore;
+ } else {
+ // There is no previous match to compare with, wait for
+ // the next step to decide.
+
+ match_available = 1;
+ strstart++;
+ lookahead--;
+ }
+ }
+
+ if (match_available !== 0) {
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+ match_available = 0;
+ }
+ flush_block_only(flush == Z_FINISH);
+
+ if (strm.avail_out === 0) {
+ if (flush == Z_FINISH)
+ return FinishStarted;
+ else
+ return NeedMore;
+ }
+
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ function deflateReset(strm) {
+ strm.total_in = strm.total_out = 0;
+ strm.msg = null; //
+
+ that.pending = 0;
+ that.pending_out = 0;
+
+ status = BUSY_STATE;
+
+ last_flush = Z_NO_FLUSH;
+
+ tr_init();
+ lm_init();
+ return Z_OK;
+ }
+
+ that.deflateInit = function(strm, _level, bits, _method, memLevel, _strategy) {
+ if (!_method)
+ _method = Z_DEFLATED;
+ if (!memLevel)
+ memLevel = DEF_MEM_LEVEL;
+ if (!_strategy)
+ _strategy = Z_DEFAULT_STRATEGY;
+
+ // byte[] my_version=ZLIB_VERSION;
+
+ //
+ // if (!version || version[0] != my_version[0]
+ // || stream_size != sizeof(z_stream)) {
+ // return Z_VERSION_ERROR;
+ // }
+
+ strm.msg = null;
+
+ if (_level == Z_DEFAULT_COMPRESSION)
+ _level = 6;
+
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || _method != Z_DEFLATED || bits < 9 || bits > 15 || _level < 0 || _level > 9 || _strategy < 0
+ || _strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm.dstate = that;
+
+ w_bits = bits;
+ w_size = 1 << w_bits;
+ w_mask = w_size - 1;
+
+ hash_bits = memLevel + 7;
+ hash_size = 1 << hash_bits;
+ hash_mask = hash_size - 1;
+ hash_shift = Math.floor((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
+
+ window = new Uint8Array(w_size * 2);
+ prev = [];
+ head = [];
+
+ lit_bufsize = 1 << (memLevel + 6); // 16K elements by default
+
+ // We overlay pending_buf and d_buf+l_buf. This works since the average
+ // output size for (length,distance) codes is <= 24 bits.
+ that.pending_buf = new Uint8Array(lit_bufsize * 4);
+ pending_buf_size = lit_bufsize * 4;
+
+ d_buf = Math.floor(lit_bufsize / 2);
+ l_buf = (1 + 2) * lit_bufsize;
+
+ level = _level;
+
+ strategy = _strategy;
+ method = _method & 0xff;
+
+ return deflateReset(strm);
+ };
+
+ that.deflateEnd = function() {
+ if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+ // Deallocate in reverse order of allocations:
+ that.pending_buf = null;
+ head = null;
+ prev = null;
+ window = null;
+ // free
+ that.dstate = null;
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+ };
+
+ that.deflateParams = function(strm, _level, _strategy) {
+ var err = Z_OK;
+
+ if (_level == Z_DEFAULT_COMPRESSION) {
+ _level = 6;
+ }
+ if (_level < 0 || _level > 9 || _strategy < 0 || _strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+
+ if (config_table[level].func != config_table[_level].func && strm.total_in !== 0) {
+ // Flush the last buffer:
+ err = strm.deflate(Z_PARTIAL_FLUSH);
+ }
+
+ if (level != _level) {
+ level = _level;
+ max_lazy_match = config_table[level].max_lazy;
+ good_match = config_table[level].good_length;
+ nice_match = config_table[level].nice_length;
+ max_chain_length = config_table[level].max_chain;
+ }
+ strategy = _strategy;
+ return err;
+ };
+
+ that.deflateSetDictionary = function(strm, dictionary, dictLength) {
+ var length = dictLength;
+ var n, index = 0;
+
+ if (!dictionary || status != INIT_STATE)
+ return Z_STREAM_ERROR;
+
+ if (length < MIN_MATCH)
+ return Z_OK;
+ if (length > w_size - MIN_LOOKAHEAD) {
+ length = w_size - MIN_LOOKAHEAD;
+ index = dictLength - length; // use the tail of the dictionary
+ }
+ window.set(dictionary.subarray(index, index + length), 0);
+
+ strstart = length;
+ block_start = length;
+
+ // Insert all strings in the hash table (except for the last two bytes).
+ // s->lookahead stays null, so s->ins_h will be recomputed at the next
+ // call of fill_window.
+
+ ins_h = window[0] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
+
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ prev[n & w_mask] = head[ins_h];
+ head[ins_h] = n;
+ }
+ return Z_OK;
+ };
+
+ that.deflate = function(_strm, flush) {
+ var i, header, level_flags, old_flush, bstate;
+
+ if (flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+
+ if (!_strm.next_out || (!_strm.next_in && _strm.avail_in !== 0) || (status == FINISH_STATE && flush != Z_FINISH)) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_STREAM_ERROR)];
+ return Z_STREAM_ERROR;
+ }
+ if (_strm.avail_out === 0) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ strm = _strm; // just in case
+ old_flush = last_flush;
+ last_flush = flush;
+
+ // Write the zlib header
+ if (status == INIT_STATE) {
+ header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
+ level_flags = ((level - 1) & 0xff) >> 1;
+
+ if (level_flags > 3)
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (strstart !== 0)
+ header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ status = BUSY_STATE;
+ putShortMSB(header);
+ }
+
+ // Flush as much pending output as possible
+ if (that.pending !== 0) {
+ strm.flush_pending();
+ if (strm.avail_out === 0) {
+ // console.log(" avail_out==0");
+ // Since avail_out is 0, deflate will be called again with
+ // more output space, but possibly with both pending and
+ // avail_in equal to zero. There won't be anything to do,
+ // but this is not an error situation so make sure we
+ // return OK instead of BUF_ERROR at next call of deflate:
+ last_flush = -1;
+ return Z_OK;
+ }
+
+ // Make sure there is something to do and avoid duplicate
+ // consecutive
+ // flushes. For repeated and useless calls with Z_FINISH, we keep
+ // returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ } else if (strm.avail_in === 0 && flush <= old_flush && flush != Z_FINISH) {
+ strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ // User must not provide more input after the first FINISH:
+ if (status == FINISH_STATE && strm.avail_in !== 0) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ // Start a new block or continue the current one.
+ if (strm.avail_in !== 0 || lookahead !== 0 || (flush != Z_NO_FLUSH && status != FINISH_STATE)) {
+ bstate = -1;
+ switch (config_table[level].func) {
+ case STORED:
+ bstate = deflate_stored(flush);
+ break;
+ case FAST:
+ bstate = deflate_fast(flush);
+ break;
+ case SLOW:
+ bstate = deflate_slow(flush);
+ break;
+ default:
+ }
+
+ if (bstate == FinishStarted || bstate == FinishDone) {
+ status = FINISH_STATE;
+ }
+ if (bstate == NeedMore || bstate == FinishStarted) {
+ if (strm.avail_out === 0) {
+ last_flush = -1; // avoid BUF_ERROR next call, see above
+ }
+ return Z_OK;
+ // If flush != Z_NO_FLUSH && avail_out === 0, the next call
+ // of deflate should use the same flush parameter to make sure
+ // that the flush is complete. So we don't have to output an
+ // empty block here, this will be done at next call. This also
+ // ensures that for a very small output buffer, we emit at most
+ // one empty block.
+ }
+
+ if (bstate == BlockDone) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align();
+ } else { // FULL_FLUSH or SYNC_FLUSH
+ _tr_stored_block(0, 0, false);
+ // For a full flush, this empty block will be recognized
+ // as a special marker by inflate_sync().
+ if (flush == Z_FULL_FLUSH) {
+ // state.head[s.hash_size-1]=0;
+ for (i = 0; i < hash_size/*-1*/; i++)
+ // forget history
+ head[i] = 0;
+ }
+ }
+ strm.flush_pending();
+ if (strm.avail_out === 0) {
+ last_flush = -1; // avoid BUF_ERROR at next call, see above
+ return Z_OK;
+ }
+ }
+ }
+
+ if (flush != Z_FINISH)
+ return Z_OK;
+ return Z_STREAM_END;
+ };
+ }
+
+ // ZStream
+
+ function ZStream() {
+ var that = this;
+ that.next_in_index = 0;
+ that.next_out_index = 0;
+ // that.next_in; // next input byte
+ that.avail_in = 0; // number of bytes available at next_in
+ that.total_in = 0; // total nb of input bytes read so far
+ // that.next_out; // next output byte should be put there
+ that.avail_out = 0; // remaining free space at next_out
+ that.total_out = 0; // total nb of bytes output so far
+ // that.msg;
+ // that.dstate;
+ }
+
+ ZStream.prototype = {
+ deflateInit : function(level, bits) {
+ var that = this;
+ that.dstate = new Deflate();
+ if (!bits)
+ bits = MAX_BITS;
+ return that.dstate.deflateInit(that, level, bits);
+ },
+
+ deflate : function(flush) {
+ var that = this;
+ if (!that.dstate) {
+ return Z_STREAM_ERROR;
+ }
+ return that.dstate.deflate(that, flush);
+ },
+
+ deflateEnd : function() {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ var ret = that.dstate.deflateEnd();
+ that.dstate = null;
+ return ret;
+ },
+
+ deflateParams : function(level, strategy) {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ return that.dstate.deflateParams(that, level, strategy);
+ },
+
+ deflateSetDictionary : function(dictionary, dictLength) {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ return that.dstate.deflateSetDictionary(that, dictionary, dictLength);
+ },
+
+ // Read a new buffer from the current input stream, update the
+ // total number of bytes read. All deflate() input goes through
+ // this function so some applications may wish to modify it to avoid
+ // allocating a large strm->next_in buffer and copying from it.
+ // (See also flush_pending()).
+ read_buf : function(buf, start, size) {
+ var that = this;
+ var len = that.avail_in;
+ if (len > size)
+ len = size;
+ if (len === 0)
+ return 0;
+ that.avail_in -= len;
+ buf.set(that.next_in.subarray(that.next_in_index, that.next_in_index + len), start);
+ that.next_in_index += len;
+ that.total_in += len;
+ return len;
+ },
+
+ // Flush as much pending output as possible. All deflate() output goes
+ // through this function so some applications may wish to modify it
+ // to avoid allocating a large strm->next_out buffer and copying into it.
+ // (See also read_buf()).
+ flush_pending : function() {
+ var that = this;
+ var len = that.dstate.pending;
+
+ if (len > that.avail_out)
+ len = that.avail_out;
+ if (len === 0)
+ return;
+
+ // if (that.dstate.pending_buf.length <= that.dstate.pending_out || that.next_out.length <= that.next_out_index
+ // || that.dstate.pending_buf.length < (that.dstate.pending_out + len) || that.next_out.length < (that.next_out_index +
+ // len)) {
+ // console.log(that.dstate.pending_buf.length + ", " + that.dstate.pending_out + ", " + that.next_out.length + ", " +
+ // that.next_out_index + ", " + len);
+ // console.log("avail_out=" + that.avail_out);
+ // }
+
+ that.next_out.set(that.dstate.pending_buf.subarray(that.dstate.pending_out, that.dstate.pending_out + len), that.next_out_index);
+
+ that.next_out_index += len;
+ that.dstate.pending_out += len;
+ that.total_out += len;
+ that.avail_out -= len;
+ that.dstate.pending -= len;
+ if (that.dstate.pending === 0) {
+ that.dstate.pending_out = 0;
+ }
+ }
+ };
+
+ // Deflater
+
+ function Deflater(level) {
+ var that = this;
+ var z = new ZStream();
+ var bufsize = 512;
+ var flush = Z_NO_FLUSH;
+ var buf = new Uint8Array(bufsize);
+
+ if (typeof level == "undefined")
+ level = Z_DEFAULT_COMPRESSION;
+ z.deflateInit(level);
+ z.next_out = buf;
+
+ that.append = function(data, onprogress) {
+ var err, buffers = [], lastIndex = 0, bufferIndex = 0, bufferSize = 0, array;
+ if (!data.length)
+ return;
+ z.next_in_index = 0;
+ z.next_in = data;
+ z.avail_in = data.length;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ err = z.deflate(flush);
+ if (err != Z_OK)
+ throw "deflating: " + z.msg;
+ if (z.next_out_index)
+ if (z.next_out_index == bufsize)
+ buffers.push(new Uint8Array(buf));
+ else
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
+ onprogress(z.next_in_index);
+ lastIndex = z.next_in_index;
+ }
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ that.flush = function() {
+ var err, buffers = [], bufferIndex = 0, bufferSize = 0, array;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ err = z.deflate(Z_FINISH);
+ if (err != Z_STREAM_END && err != Z_OK)
+ throw "deflating: " + z.msg;
+ if (bufsize - z.avail_out > 0)
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ z.deflateEnd();
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ }
+
+ var deflater;
+
+ if (obj.zip)
+ obj.zip.Deflater = Deflater;
+ else {
+ deflater = new Deflater();
+ obj.addEventListener("message", function(event) {
+ var message = event.data;
+ if (message.init) {
+ deflater = new Deflater(message.level);
+ obj.postMessage({
+ oninit : true
+ });
+ }
+ if (message.append)
+ obj.postMessage({
+ onappend : true,
+ data : deflater.append(message.data, function(current) {
+ obj.postMessage({
+ progress : true,
+ current : current
+ });
+ })
+ });
+ if (message.flush)
+ obj.postMessage({
+ onflush : true,
+ data : deflater.flush()
+ });
+ }, false);
+ }
+
+})(EasyDeflate);
--- /dev/null
+/**\r
+Copyright (c) 2013, Specialisterne.\r
+http://specialisterne.com/dk/\r
+All rights reserved.\r
+Authors:\r
+Jacob Christian Munch-Andersen\r
+\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are met:\r
+\r
+1. Redistributions of source code must retain the above copyright notice, this\r
+ list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright notice,\r
+ this list of conditions and the following disclaimer in the documentation\r
+ and/or other materials provided with the distribution.\r
+\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND\r
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\r
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**/\r
+// For information and latest version see: https://github.com/Jacob-Christian-Munch-Andersen/Easy-Deflate\r
+(function(){\r
+\r
+var zip={};\r
+function UTF8encode(str){\r
+ var out=[];\r
+ var a;\r
+ var c,c2;\r
+ for(a=0;a<str.length;a++){\r
+ c=str.charCodeAt(a);\r
+ if(c<128){\r
+ out.push(c);\r
+ }\r
+ else if(c<2048){\r
+ out.push((c >> 6)+192);\r
+ out.push((c & 63)+128);\r
+ }\r
+ else if(c<65536){\r
+ if(c>=0xD800 && c<0xDC00){\r
+ a++;\r
+ if(a>=str.length){\r
+ return null;\r
+ }\r
+ c2=str.charCodeAt(a);\r
+ if(c2>=0xDC00 && c2<0xE000){\r
+ c=65536+(c-0xD800)*1024+c2-0xDC00;\r
+ out.push((c >> 18)+240);\r
+ out.push(((c >> 12) & 63)+128);\r
+ out.push(((c >> 6) & 63)+128);\r
+ out.push((c & 63)+128);\r
+ }\r
+ else{\r
+ return null;\r
+ }\r
+ }\r
+ else if(c>=0xDC00 && c<0xE000){\r
+ return null;\r
+ }\r
+ else{\r
+ out.push((c >> 12)+224);\r
+ out.push(((c >> 6) & 63)+128);\r
+ out.push((c & 63)+128);\r
+ }\r
+ }\r
+ else{\r
+ return null;\r
+ }\r
+ }\r
+ return new Uint8Array(out);\r
+}\r
+function UTF8decodeA(arrarr){\r
+ var result="";\r
+ var intermediate;\r
+ var minvalue;\r
+ var missing=0;\r
+ var a,b;\r
+ var arr;\r
+ var c;\r
+ var lower,upper;\r
+ for(a=0;a<arrarr.length;a++){\r
+ arr=arrarr[a];\r
+ for(b=0;b<arr.length;b++){\r
+ c=arr[b];\r
+ if(missing){\r
+ if(c>127 && c<192){\r
+ intermediate=intermediate*64+c-128;\r
+ missing--;\r
+ if(!missing){\r
+ if(intermediate>=minvalue){\r
+ if(intermediate>=65536){\r
+ if(intermediate>0x10FFFF){\r
+ return null;\r
+ }\r
+ upper=(intermediate-65536)>>10;\r
+ lower=intermediate%1024;\r
+ result+=String.fromCharCode(upper+0xD800,lower+0xDC00);\r
+ }\r
+ else{\r
+ result+=String.fromCharCode(intermediate);\r
+ }\r
+ }\r
+ else{\r
+ return null;\r
+ }\r
+ }\r
+ }\r
+ else{\r
+ return null;\r
+ }\r
+ }\r
+ else if(c<128){\r
+ result+=String.fromCharCode(c);\r
+ }\r
+ else if(c>191 && c<248){\r
+ if(c<224){\r
+ intermediate=c-192;\r
+ minvalue=128;\r
+ missing=1;\r
+ }\r
+ else if(c<240){\r
+ intermediate=c-224;\r
+ minvalue=2048;\r
+ missing=2;\r
+ }\r
+ else{\r
+ intermediate=c-240;\r
+ minvalue=65536;\r
+ missing=3;\r
+ }\r
+ }\r
+ else{\r
+ return null;\r
+ }\r
+ }\r
+ }\r
+ if(missing){\r
+ return null;\r
+ }\r
+ return result;\r
+}\r
+function deflate(str){\r
+ var a,c;\r
+ var readlen=50000;\r
+ var resulta=[];\r
+ var results="";\r
+ var b,d;\r
+ var zipper=new zip.Deflater(9);\r
+ for(a=0;a<str.length;a+=readlen){\r
+ d=UTF8encode(str.substr(a,readlen));\r
+ if(d===null){ //This error may be due to a 4 byte charachter being split, retry with a string that is 1 longer to fix it.\r
+ d=UTF8encode(str.substr(a,readlen+1));\r
+ a+=1;\r
+ if(d===null){\r
+ return null;\r
+ }\r
+ }\r
+ b=zipper.append(d);\r
+ if(b.length!==0){\r
+ resulta.push(b);\r
+ }\r
+ }\r
+ b=zipper.flush();\r
+ if(b.length!==0){\r
+ resulta.push(b);\r
+ }\r
+ for(a=0;a<resulta.length;a++){\r
+ for(c=0;c<resulta[a].length;c++){\r
+ results+=String.fromCharCode(resulta[a][c]);\r
+ }\r
+ }\r
+ return "rawdeflate,"+btoa(results);\r
+}\r
+function inflate(dfl){\r
+ var unzipper=new zip.Inflater();\r
+ var resulta=[];\r
+ var dfls;\r
+ var a,c;\r
+ var b,d;\r
+ if(dfl.slice(0,11)!="rawdeflate,"){\r
+ return null;\r
+ }\r
+ try{\r
+ dfls=atob(dfl.slice(11));\r
+ }\r
+ catch(e){\r
+ return null;\r
+ }\r
+ try{\r
+ for(a=0;a<dfls.length;a+=50000){\r
+ b=new Uint8Array(Math.min(50000,dfls.length-a));\r
+ for(c=0;c<b.length;c++){\r
+ b[c]=dfls.charCodeAt(c+a);\r
+ }\r
+ d=unzipper.append(b);\r
+ if(d.length){\r
+ resulta.push(d);\r
+ }\r
+ }\r
+ return UTF8decodeA(resulta);\r
+ }\r
+ catch(e){\r
+ return null;\r
+ }\r
+}\r
+\r
+window.EasyDeflate = {\r
+ 'zip': zip,\r
+ 'inflate': inflate,\r
+ 'deflate': deflate\r
+};\r
+\r
+})();
\ No newline at end of file
--- /dev/null
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
+ * JZlib is based on zlib-1.1.3, so all credit should go authors
+ * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+ * and contributors of zlib.
+ */
+
+(function(obj) {
+
+ // Global
+ var MAX_BITS = 15;
+
+ var Z_OK = 0;
+ var Z_STREAM_END = 1;
+ var Z_NEED_DICT = 2;
+ var Z_STREAM_ERROR = -2;
+ var Z_DATA_ERROR = -3;
+ var Z_MEM_ERROR = -4;
+ var Z_BUF_ERROR = -5;
+
+ var inflate_mask = [ 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 0x000001ff, 0x000003ff,
+ 0x000007ff, 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff ];
+
+ var MANY = 1440;
+
+ // JZlib version : "1.0.2"
+ var Z_NO_FLUSH = 0;
+ var Z_FINISH = 4;
+
+ // InfTree
+ var fixed_bl = 9;
+ var fixed_bd = 5;
+
+ var fixed_tl = [ 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0,
+ 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40,
+ 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13,
+ 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60,
+ 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7,
+ 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8,
+ 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80,
+ 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0,
+ 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0,
+ 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97,
+ 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210,
+ 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117,
+ 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154,
+ 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83,
+ 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230,
+ 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139,
+ 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174,
+ 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111,
+ 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9,
+ 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8,
+ 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8,
+ 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8,
+ 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9,
+ 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8,
+ 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9,
+ 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8,
+ 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9,
+ 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0,
+ 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9,
+ 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8,
+ 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9,
+ 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8,
+ 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9,
+ 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8,
+ 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9,
+ 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8,
+ 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255 ];
+ var fixed_td = [ 80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5,
+ 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5,
+ 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577 ];
+
+ // Tables for deflate from PKZIP's appnote.txt.
+ var cplens = [ // Copy lengths for literal codes 257..285
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 ];
+
+ // see note #13 above about 258
+ var cplext = [ // Extra bits for literal codes 257..285
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112 // 112==invalid
+ ];
+
+ var cpdist = [ // Copy offsets for distance codes 0..29
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 ];
+
+ var cpdext = [ // Extra bits for distance codes
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ];
+
+ // If BMAX needs to be larger than 16, then h and x[] should be uLong.
+ var BMAX = 15; // maximum bit length of any code
+
+ function InfTree() {
+ var that = this;
+
+ var hn; // hufts used in space
+ var v; // work area for huft_build
+ var c; // bit length count table
+ var r; // table entry for structure assignment
+ var u; // table stack
+ var x; // bit offsets, then code stack
+
+ function huft_build(b, // code lengths in bits (all assumed <=
+ // BMAX)
+ bindex, n, // number of codes (assumed <= 288)
+ s, // number of simple-valued codes (0..s-1)
+ d, // list of base values for non-simple codes
+ e, // list of extra bits for non-simple codes
+ t, // result: starting table
+ m, // maximum lookup bits, returns actual
+ hp,// space for trees
+ hn,// hufts used in space
+ v // working area: values in order of bit length
+ ) {
+ // Given a list of code lengths and a maximum table size, make a set of
+ // tables to decode that set of codes. Return Z_OK on success,
+ // Z_BUF_ERROR
+ // if the given code set is incomplete (the tables are still built in
+ // this
+ // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set
+ // of
+ // lengths), or Z_MEM_ERROR if not enough memory.
+
+ var a; // counter for codes of length k
+ var f; // i repeats in table every f entries
+ var g; // maximum code length
+ var h; // table level
+ var i; // counter, current code
+ var j; // counter
+ var k; // number of bits in current code
+ var l; // bits per table (returned in m)
+ var mask; // (1 << w) - 1, to avoid cc -O bug on HP
+ var p; // pointer into c[], b[], or v[]
+ var q; // points to current table
+ var w; // bits before this table == (l * h)
+ var xp; // pointer into x
+ var y; // number of dummy codes added
+ var z; // number of entries in current table
+
+ // Generate counts for each bit length
+
+ p = 0;
+ i = n;
+ do {
+ c[b[bindex + p]]++;
+ p++;
+ i--; // assume all entries <= BMAX
+ } while (i !== 0);
+
+ if (c[0] == n) { // null input--all zero length codes
+ t[0] = -1;
+ m[0] = 0;
+ return Z_OK;
+ }
+
+ // Find minimum and maximum length, bound *m by those
+ l = m[0];
+ for (j = 1; j <= BMAX; j++)
+ if (c[j] !== 0)
+ break;
+ k = j; // minimum code length
+ if (l < j) {
+ l = j;
+ }
+ for (i = BMAX; i !== 0; i--) {
+ if (c[i] !== 0)
+ break;
+ }
+ g = i; // maximum code length
+ if (l > i) {
+ l = i;
+ }
+ m[0] = l;
+
+ // Adjust last length count to fill out codes, if needed
+ for (y = 1 << j; j < i; j++, y <<= 1) {
+ if ((y -= c[j]) < 0) {
+ return Z_DATA_ERROR;
+ }
+ }
+ if ((y -= c[i]) < 0) {
+ return Z_DATA_ERROR;
+ }
+ c[i] += y;
+
+ // Generate starting offsets into the value table for each length
+ x[1] = j = 0;
+ p = 1;
+ xp = 2;
+ while (--i !== 0) { // note that i == g from above
+ x[xp] = (j += c[p]);
+ xp++;
+ p++;
+ }
+
+ // Make a table of values in order of bit lengths
+ i = 0;
+ p = 0;
+ do {
+ if ((j = b[bindex + p]) !== 0) {
+ v[x[j]++] = i;
+ }
+ p++;
+ } while (++i < n);
+ n = x[g]; // set n to length of v
+
+ // Generate the Huffman codes and for each, make the table entries
+ x[0] = i = 0; // first Huffman code is zero
+ p = 0; // grab values in bit order
+ h = -1; // no tables yet--level -1
+ w = -l; // bits decoded == (l * h)
+ u[0] = 0; // just to keep compilers happy
+ q = 0; // ditto
+ z = 0; // ditto
+
+ // go through the bit lengths (k already is bits in shortest code)
+ for (; k <= g; k++) {
+ a = c[k];
+ while (a-- !== 0) {
+ // here i is the Huffman code of length k bits for value *p
+ // make tables up to required level
+ while (k > w + l) {
+ h++;
+ w += l; // previous table always l bits
+ // compute minimum size table less than or equal to l bits
+ z = g - w;
+ z = (z > l) ? l : z; // table size upper limit
+ if ((f = 1 << (j = k - w)) > a + 1) { // try a k-w bit table
+ // too few codes for
+ // k-w bit table
+ f -= a + 1; // deduct codes from patterns left
+ xp = k;
+ if (j < z) {
+ while (++j < z) { // try smaller tables up to z bits
+ if ((f <<= 1) <= c[++xp])
+ break; // enough codes to use up j bits
+ f -= c[xp]; // else deduct codes from patterns
+ }
+ }
+ }
+ z = 1 << j; // table entries for j-bit table
+
+ // allocate new table
+ if (hn[0] + z > MANY) { // (note: doesn't matter for fixed)
+ return Z_DATA_ERROR; // overflow of MANY
+ }
+ u[h] = q = /* hp+ */hn[0]; // DEBUG
+ hn[0] += z;
+
+ // connect to last table, if there is one
+ if (h !== 0) {
+ x[h] = i; // save pattern for backing up
+ r[0] = /* (byte) */j; // bits in this table
+ r[1] = /* (byte) */l; // bits to dump before this table
+ j = i >>> (w - l);
+ r[2] = /* (int) */(q - u[h - 1] - j); // offset to this table
+ hp.set(r, (u[h - 1] + j) * 3);
+ // to
+ // last
+ // table
+ } else {
+ t[0] = q; // first table is returned result
+ }
+ }
+
+ // set up table entry in r
+ r[1] = /* (byte) */(k - w);
+ if (p >= n) {
+ r[0] = 128 + 64; // out of values--invalid code
+ } else if (v[p] < s) {
+ r[0] = /* (byte) */(v[p] < 256 ? 0 : 32 + 64); // 256 is
+ // end-of-block
+ r[2] = v[p++]; // simple code is just the value
+ } else {
+ r[0] = /* (byte) */(e[v[p] - s] + 16 + 64); // non-simple--look
+ // up in lists
+ r[2] = d[v[p++] - s];
+ }
+
+ // fill code-like entries with r
+ f = 1 << (k - w);
+ for (j = i >>> w; j < z; j += f) {
+ hp.set(r, (q + j) * 3);
+ }
+
+ // backwards increment the k-bit code i
+ for (j = 1 << (k - 1); (i & j) !== 0; j >>>= 1) {
+ i ^= j;
+ }
+ i ^= j;
+
+ // backup over finished tables
+ mask = (1 << w) - 1; // needed on HP, cc -O bug
+ while ((i & mask) != x[h]) {
+ h--; // don't need to update q
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+ // Return Z_BUF_ERROR if we were given an incomplete table
+ return y !== 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+ }
+
+ function initWorkArea(vsize) {
+ var i;
+ if (!hn) {
+ hn = []; // []; //new Array(1);
+ v = []; // new Array(vsize);
+ c = new Int32Array(BMAX + 1); // new Array(BMAX + 1);
+ r = []; // new Array(3);
+ u = new Int32Array(BMAX); // new Array(BMAX);
+ x = new Int32Array(BMAX + 1); // new Array(BMAX + 1);
+ }
+ if (v.length < vsize) {
+ v = []; // new Array(vsize);
+ }
+ for (i = 0; i < vsize; i++) {
+ v[i] = 0;
+ }
+ for (i = 0; i < BMAX + 1; i++) {
+ c[i] = 0;
+ }
+ for (i = 0; i < 3; i++) {
+ r[i] = 0;
+ }
+ // for(int i=0; i<BMAX; i++){u[i]=0;}
+ u.set(c.subarray(0, BMAX), 0);
+ // for(int i=0; i<BMAX+1; i++){x[i]=0;}
+ x.set(c.subarray(0, BMAX + 1), 0);
+ }
+
+ that.inflate_trees_bits = function(c, // 19 code lengths
+ bb, // bits tree desired/actual depth
+ tb, // bits tree result
+ hp, // space for trees
+ z // for messages
+ ) {
+ var result;
+ initWorkArea(19);
+ hn[0] = 0;
+ result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v);
+
+ if (result == Z_DATA_ERROR) {
+ z.msg = "oversubscribed dynamic bit lengths tree";
+ } else if (result == Z_BUF_ERROR || bb[0] === 0) {
+ z.msg = "incomplete dynamic bit lengths tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ };
+
+ that.inflate_trees_dynamic = function(nl, // number of literal/length codes
+ nd, // number of distance codes
+ c, // that many (total) code lengths
+ bl, // literal desired/actual bit depth
+ bd, // distance desired/actual bit depth
+ tl, // literal/length tree result
+ td, // distance tree result
+ hp, // space for trees
+ z // for messages
+ ) {
+ var result;
+
+ // build literal/length tree
+ initWorkArea(288);
+ hn[0] = 0;
+ result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v);
+ if (result != Z_OK || bl[0] === 0) {
+ if (result == Z_DATA_ERROR) {
+ z.msg = "oversubscribed literal/length tree";
+ } else if (result != Z_MEM_ERROR) {
+ z.msg = "incomplete literal/length tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ // build distance tree
+ initWorkArea(288);
+ result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v);
+
+ if (result != Z_OK || (bd[0] === 0 && nl > 257)) {
+ if (result == Z_DATA_ERROR) {
+ z.msg = "oversubscribed distance tree";
+ } else if (result == Z_BUF_ERROR) {
+ z.msg = "incomplete distance tree";
+ result = Z_DATA_ERROR;
+ } else if (result != Z_MEM_ERROR) {
+ z.msg = "empty distance tree with lengths";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ return Z_OK;
+ };
+
+ }
+
+ InfTree.inflate_trees_fixed = function(bl, // literal desired/actual bit depth
+ bd, // distance desired/actual bit depth
+ tl,// literal/length tree result
+ td// distance tree result
+ ) {
+ bl[0] = fixed_bl;
+ bd[0] = fixed_bd;
+ tl[0] = fixed_tl;
+ td[0] = fixed_td;
+ return Z_OK;
+ };
+
+ // InfCodes
+
+ // waiting for "i:"=input,
+ // "o:"=output,
+ // "x:"=nothing
+ var START = 0; // x: set up for LEN
+ var LEN = 1; // i: get length/literal/eob next
+ var LENEXT = 2; // i: getting length extra (have base)
+ var DIST = 3; // i: get distance next
+ var DISTEXT = 4;// i: getting distance extra
+ var COPY = 5; // o: copying bytes in window, waiting
+ // for space
+ var LIT = 6; // o: got literal, waiting for output
+ // space
+ var WASH = 7; // o: got eob, possibly still output
+ // waiting
+ var END = 8; // x: got eob and all data flushed
+ var BADCODE = 9;// x: got error
+
+ function InfCodes() {
+ var that = this;
+
+ var mode; // current inflate_codes mode
+
+ // mode dependent information
+ var len = 0;
+
+ var tree; // pointer into tree
+ var tree_index = 0;
+ var need = 0; // bits needed
+
+ var lit = 0;
+
+ // if EXT or COPY, where and how much
+ var get = 0; // bits to get for extra
+ var dist = 0; // distance back to copy from
+
+ var lbits = 0; // ltree bits decoded per branch
+ var dbits = 0; // dtree bits decoder per branch
+ var ltree; // literal/length/eob tree
+ var ltree_index = 0; // literal/length/eob tree
+ var dtree; // distance tree
+ var dtree_index = 0; // distance tree
+
+ // Called with number of bytes left to write in window at least 258
+ // (the maximum string length) and number of input bytes available
+ // at least ten. The ten bytes are six bytes for the longest length/
+ // distance pair plus four bytes for overloading the bit buffer.
+
+ function inflate_fast(bl, bd, tl, tl_index, td, td_index, s, z) {
+ var t; // temporary pointer
+ var tp; // temporary pointer
+ var tp_index; // temporary pointer
+ var e; // extra bits or operation
+ var b; // bit buffer
+ var k; // bits in bit buffer
+ var p; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+ var ml; // mask for literal/length tree
+ var md; // mask for distance tree
+ var c; // bytes to copy
+ var d; // distance back to copy from
+ var r; // copy source pointer
+
+ var tp_index_t_3; // (tp_index+t)*3
+
+ // load input, output, bit values
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ // initialize masks
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ // do until not enough input or output space for fast loop
+ do { // assume called with m >= 258 && n >= 10
+ // get literal/length code
+ while (k < (20)) { // max bits for literal/length code
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ t = b & ml;
+ tp = tl;
+ tp_index = tl_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) === 0) {
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = /* (byte) */tp[tp_index_t_3 + 2];
+ m--;
+ continue;
+ }
+ do {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) !== 0) {
+ e &= 15;
+ c = tp[tp_index_t_3 + 2] + (/* (int) */b & inflate_mask[e]);
+
+ b >>= e;
+ k -= e;
+
+ // decode distance base of block to copy
+ while (k < (15)) { // max bits for distance code
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ t = b & md;
+ tp = td;
+ tp_index = td_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+
+ do {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) !== 0) {
+ // get extra bits to add to distance base
+ e &= 15;
+ while (k < (e)) { // get extra bits (up to 13)
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ d = tp[tp_index_t_3 + 2] + (b & inflate_mask[e]);
+
+ b >>= (e);
+ k -= (e);
+
+ // do the copy
+ m -= c;
+ if (q >= d) { // offset before dest
+ // just copy
+ r = q - d;
+ if (q - r > 0 && 2 > (q - r)) {
+ s.window[q++] = s.window[r++]; // minimum
+ // count is
+ // three,
+ s.window[q++] = s.window[r++]; // so unroll
+ // loop a
+ // little
+ c -= 2;
+ } else {
+ s.window.set(s.window.subarray(r, r + 2), q);
+ q += 2;
+ r += 2;
+ c -= 2;
+ }
+ } else { // else offset after destination
+ r = q - d;
+ do {
+ r += s.end; // force pointer in window
+ } while (r < 0); // covers invalid distances
+ e = s.end - r;
+ if (c > e) { // if source crosses,
+ c -= e; // wrapped copy
+ if (q - r > 0 && e > (q - r)) {
+ do {
+ s.window[q++] = s.window[r++];
+ } while (--e !== 0);
+ } else {
+ s.window.set(s.window.subarray(r, r + e), q);
+ q += e;
+ r += e;
+ e = 0;
+ }
+ r = 0; // copy rest from start of window
+ }
+
+ }
+
+ // copy all or what's left
+ if (q - r > 0 && c > (q - r)) {
+ do {
+ s.window[q++] = s.window[r++];
+ } while (--c !== 0);
+ } else {
+ s.window.set(s.window.subarray(r, r + c), q);
+ q += c;
+ r += c;
+ c = 0;
+ }
+ break;
+ } else if ((e & 64) === 0) {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & inflate_mask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+ } else {
+ z.msg = "invalid distance code";
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_DATA_ERROR;
+ }
+ } while (true);
+ break;
+ }
+
+ if ((e & 64) === 0) {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & inflate_mask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) === 0) {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = /* (byte) */tp[tp_index_t_3 + 2];
+ m--;
+ break;
+ }
+ } else if ((e & 32) !== 0) {
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_STREAM_END;
+ } else {
+ z.msg = "invalid literal/length code";
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_DATA_ERROR;
+ }
+ } while (true);
+ } while (m >= 258 && n >= 10);
+
+ // not enough input or output--restore pointers and return
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_OK;
+ }
+
+ that.init = function(bl, bd, tl, tl_index, td, td_index) {
+ mode = START;
+ lbits = /* (byte) */bl;
+ dbits = /* (byte) */bd;
+ ltree = tl;
+ ltree_index = tl_index;
+ dtree = td;
+ dtree_index = td_index;
+ tree = null;
+ };
+
+ that.proc = function(s, z, r) {
+ var j; // temporary storage
+ var tindex; // temporary pointer
+ var e; // extra bits or operation
+ var b = 0; // bit buffer
+ var k = 0; // bits in bit buffer
+ var p = 0; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+ var f; // pointer to copy strings from
+
+ // copy input/output information to locals (UPDATE macro restores)
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ // process input and output based on current state
+ while (true) {
+ switch (mode) {
+ // waiting for "i:"=input, "o:"=output, "x:"=nothing
+ case START: // x: set up for LEN
+ if (m >= 258 && n >= 10) {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ r = inflate_fast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, s, z);
+
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (r != Z_OK) {
+ mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+ need = lbits;
+ tree = ltree;
+ tree_index = ltree_index;
+
+ mode = LEN;
+ case LEN: // i: get length/literal/eob next
+ j = need;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & inflate_mask[j])) * 3;
+
+ b >>>= (tree[tindex + 1]);
+ k -= (tree[tindex + 1]);
+
+ e = tree[tindex];
+
+ if (e === 0) { // literal
+ lit = tree[tindex + 2];
+ mode = LIT;
+ break;
+ }
+ if ((e & 16) !== 0) { // length
+ get = e & 15;
+ len = tree[tindex + 2];
+ mode = LENEXT;
+ break;
+ }
+ if ((e & 64) === 0) { // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ if ((e & 32) !== 0) { // end of block
+ mode = WASH;
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.msg = "invalid literal/length code";
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case LENEXT: // i: getting length extra (have base)
+ j = get;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ len += (b & inflate_mask[j]);
+
+ b >>= j;
+ k -= j;
+
+ need = dbits;
+ tree = dtree;
+ tree_index = dtree_index;
+ mode = DIST;
+ case DIST: // i: get distance next
+ j = need;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & inflate_mask[j])) * 3;
+
+ b >>= tree[tindex + 1];
+ k -= tree[tindex + 1];
+
+ e = (tree[tindex]);
+ if ((e & 16) !== 0) { // distance
+ get = e & 15;
+ dist = tree[tindex + 2];
+ mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) === 0) { // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.msg = "invalid distance code";
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case DISTEXT: // i: getting distance extra
+ j = get;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ dist += (b & inflate_mask[j]);
+
+ b >>= j;
+ k -= j;
+
+ mode = COPY;
+ case COPY: // o: copying bytes in window, waiting for space
+ f = q - dist;
+ while (f < 0) { // modulo window size-"while" instead
+ f += s.end; // of "if" handles invalid distances
+ }
+ while (len !== 0) {
+
+ if (m === 0) {
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+
+ if (m === 0) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ }
+
+ s.window[q++] = s.window[f++];
+ m--;
+
+ if (f == s.end)
+ f = 0;
+ len--;
+ }
+ mode = START;
+ break;
+ case LIT: // o: got literal, waiting for output space
+ if (m === 0) {
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ }
+ r = Z_OK;
+
+ s.window[q++] = /* (byte) */lit;
+ m--;
+
+ mode = START;
+ break;
+ case WASH: // o: got eob, possibly more output
+ if (k > 7) { // return unused byte, if any
+ k -= 8;
+ n++;
+ p--; // can always return one
+ }
+
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (s.read != s.write) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ mode = END;
+ case END:
+ r = Z_STREAM_END;
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case BADCODE: // x: got error
+
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ default:
+ r = Z_STREAM_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ };
+
+ that.free = function() {
+ // ZFREE(z, c);
+ };
+
+ }
+
+ // InfBlocks
+
+ // Table for deflate from PKZIP's appnote.txt.
+ var border = [ // Order of the bit length code lengths
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
+
+ var TYPE = 0; // get type bits (3, including end bit)
+ var LENS = 1; // get lengths for stored
+ var STORED = 2;// processing stored block
+ var TABLE = 3; // get table lengths
+ var BTREE = 4; // get bit lengths tree for a dynamic
+ // block
+ var DTREE = 5; // get length, distance trees for a
+ // dynamic block
+ var CODES = 6; // processing fixed or dynamic block
+ var DRY = 7; // output remaining window bytes
+ var DONELOCKS = 8; // finished last block, done
+ var BADBLOCKS = 9; // ot a data error--stuck here
+
+ function InfBlocks(z, w) {
+ var that = this;
+
+ var mode = TYPE; // current inflate_block mode
+
+ var left = 0; // if STORED, bytes left to copy
+
+ var table = 0; // table lengths (14 bits)
+ var index = 0; // index into blens (or border)
+ var blens; // bit lengths of codes
+ var bb = [ 0 ]; // bit length tree depth
+ var tb = [ 0 ]; // bit length decoding tree
+
+ var codes = new InfCodes(); // if CODES, current state
+
+ var last = 0; // true if this block is the last block
+
+ var hufts = new Int32Array(MANY * 3); // single malloc for tree space
+ var check = 0; // check on output
+ var inftree = new InfTree();
+
+ that.bitk = 0; // bits in bit buffer
+ that.bitb = 0; // bit buffer
+ that.window = new Uint8Array(w); // sliding window
+ that.end = w; // one byte after sliding window
+ that.read = 0; // window read pointer
+ that.write = 0; // window write pointer
+
+ that.reset = function(z, c) {
+ if (c)
+ c[0] = check;
+ // if (mode == BTREE || mode == DTREE) {
+ // }
+ if (mode == CODES) {
+ codes.free(z);
+ }
+ mode = TYPE;
+ that.bitk = 0;
+ that.bitb = 0;
+ that.read = that.write = 0;
+ };
+
+ that.reset(z, null);
+
+ // copy as much as possible from the sliding window to the output area
+ that.inflate_flush = function(z, r) {
+ var n;
+ var p;
+ var q;
+
+ // local copies of source and destination pointers
+ p = z.next_out_index;
+ q = that.read;
+
+ // compute number of bytes to copy as far as end of window
+ n = /* (int) */((q <= that.write ? that.write : that.end) - q);
+ if (n > z.avail_out)
+ n = z.avail_out;
+ if (n !== 0 && r == Z_BUF_ERROR)
+ r = Z_OK;
+
+ // update counters
+ z.avail_out -= n;
+ z.total_out += n;
+
+ // copy as far as end of window
+ z.next_out.set(that.window.subarray(q, q + n), p);
+ p += n;
+ q += n;
+
+ // see if more to copy at beginning of window
+ if (q == that.end) {
+ // wrap pointers
+ q = 0;
+ if (that.write == that.end)
+ that.write = 0;
+
+ // compute bytes to copy
+ n = that.write - q;
+ if (n > z.avail_out)
+ n = z.avail_out;
+ if (n !== 0 && r == Z_BUF_ERROR)
+ r = Z_OK;
+
+ // update counters
+ z.avail_out -= n;
+ z.total_out += n;
+
+ // copy
+ z.next_out.set(that.window.subarray(q, q + n), p);
+ p += n;
+ q += n;
+ }
+
+ // update pointers
+ z.next_out_index = p;
+ that.read = q;
+
+ // done
+ return r;
+ };
+
+ that.proc = function(z, r) {
+ var t; // temporary storage
+ var b; // bit buffer
+ var k; // bits in bit buffer
+ var p; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+
+ var i;
+
+ // copy input/output information to locals (UPDATE macro restores)
+ // {
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = that.bitb;
+ k = that.bitk;
+ // }
+ // {
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ // }
+
+ // process input based on current state
+ // DEBUG dtree
+ while (true) {
+ switch (mode) {
+ case TYPE:
+
+ while (k < (3)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+ t = /* (int) */(b & 7);
+ last = t & 1;
+
+ switch (t >>> 1) {
+ case 0: // stored
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ t = k & 7; // go to byte boundary
+
+ // {
+ b >>>= (t);
+ k -= (t);
+ // }
+ mode = LENS; // get length of stored block
+ break;
+ case 1: // fixed
+ // {
+ var bl = []; // new Array(1);
+ var bd = []; // new Array(1);
+ var tl = [ [] ]; // new Array(1);
+ var td = [ [] ]; // new Array(1);
+
+ InfTree.inflate_trees_fixed(bl, bd, tl, td);
+ codes.init(bl[0], bd[0], tl[0], 0, td[0], 0);
+ // }
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+
+ mode = CODES;
+ break;
+ case 2: // dynamic
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+
+ mode = TABLE;
+ break;
+ case 3: // illegal
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ mode = BADBLOCKS;
+ z.msg = "invalid block type";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ break;
+ case LENS:
+
+ while (k < (32)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ if ((((~b) >>> 16) & 0xffff) != (b & 0xffff)) {
+ mode = BADBLOCKS;
+ z.msg = "invalid stored block lengths";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ left = (b & 0xffff);
+ b = k = 0; // dump bits
+ mode = left !== 0 ? STORED : (last !== 0 ? DRY : TYPE);
+ break;
+ case STORED:
+ if (n === 0) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ if (m === 0) {
+ if (q == that.end && that.read !== 0) {
+ q = 0;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ }
+ if (m === 0) {
+ that.write = q;
+ r = that.inflate_flush(z, r);
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ if (q == that.end && that.read !== 0) {
+ q = 0;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ }
+ if (m === 0) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ }
+ }
+ r = Z_OK;
+
+ t = left;
+ if (t > n)
+ t = n;
+ if (t > m)
+ t = m;
+ that.window.set(z.read_buf(p, t), q);
+ p += t;
+ n -= t;
+ q += t;
+ m -= t;
+ if ((left -= t) !== 0)
+ break;
+ mode = last !== 0 ? DRY : TYPE;
+ break;
+ case TABLE:
+
+ while (k < (14)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ table = t = (b & 0x3fff);
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) {
+ mode = BADBLOCKS;
+ z.msg = "too many length or distance symbols";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (!blens || blens.length < t) {
+ blens = []; // new Array(t);
+ } else {
+ for (i = 0; i < t; i++) {
+ blens[i] = 0;
+ }
+ }
+
+ // {
+ b >>>= (14);
+ k -= (14);
+ // }
+
+ index = 0;
+ mode = BTREE;
+ case BTREE:
+ while (index < 4 + (table >>> 10)) {
+ while (k < (3)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ blens[border[index++]] = b & 7;
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ }
+
+ while (index < 19) {
+ blens[border[index++]] = 0;
+ }
+
+ bb[0] = 7;
+ t = inftree.inflate_trees_bits(blens, bb, tb, hufts, z);
+ if (t != Z_OK) {
+ r = t;
+ if (r == Z_DATA_ERROR) {
+ blens = null;
+ mode = BADBLOCKS;
+ }
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ index = 0;
+ mode = DTREE;
+ case DTREE:
+ while (true) {
+ t = table;
+ if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))) {
+ break;
+ }
+
+ var j, c;
+
+ t = bb[0];
+
+ while (k < (t)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ // if (tb[0] == -1) {
+ // System.err.println("null...");
+ // }
+
+ t = hufts[(tb[0] + (b & inflate_mask[t])) * 3 + 1];
+ c = hufts[(tb[0] + (b & inflate_mask[t])) * 3 + 2];
+
+ if (c < 16) {
+ b >>>= (t);
+ k -= (t);
+ blens[index++] = c;
+ } else { // c == 16..18
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+
+ while (k < (t + i)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ b >>>= (t);
+ k -= (t);
+
+ j += (b & inflate_mask[i]);
+
+ b >>>= (i);
+ k -= (i);
+
+ i = index;
+ t = table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) {
+ blens = null;
+ mode = BADBLOCKS;
+ z.msg = "invalid bit length repeat";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ c = c == 16 ? blens[i - 1] : 0;
+ do {
+ blens[i++] = c;
+ } while (--j !== 0);
+ index = i;
+ }
+ }
+
+ tb[0] = -1;
+ // {
+ var bl_ = []; // new Array(1);
+ var bd_ = []; // new Array(1);
+ var tl_ = []; // new Array(1);
+ var td_ = []; // new Array(1);
+ bl_[0] = 9; // must be <= 9 for lookahead assumptions
+ bd_[0] = 6; // must be <= 9 for lookahead assumptions
+
+ t = table;
+ t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl_, bd_, tl_, td_, hufts, z);
+
+ if (t != Z_OK) {
+ if (t == Z_DATA_ERROR) {
+ blens = null;
+ mode = BADBLOCKS;
+ }
+ r = t;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ codes.init(bl_[0], bd_[0], hufts, tl_[0], hufts, td_[0]);
+ // }
+ mode = CODES;
+ case CODES:
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+
+ if ((r = codes.proc(that, z, r)) != Z_STREAM_END) {
+ return that.inflate_flush(z, r);
+ }
+ r = Z_OK;
+ codes.free(z);
+
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = that.bitb;
+ k = that.bitk;
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+
+ if (last === 0) {
+ mode = TYPE;
+ break;
+ }
+ mode = DRY;
+ case DRY:
+ that.write = q;
+ r = that.inflate_flush(z, r);
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ if (that.read != that.write) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ mode = DONELOCKS;
+ case DONELOCKS:
+ r = Z_STREAM_END;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ case BADBLOCKS:
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+
+ default:
+ r = Z_STREAM_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ }
+ };
+
+ that.free = function(z) {
+ that.reset(z, null);
+ that.window = null;
+ hufts = null;
+ // ZFREE(z, s);
+ };
+
+ that.set_dictionary = function(d, start, n) {
+ that.window.set(d.subarray(start, start + n), 0);
+ that.read = that.write = n;
+ };
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH.
+ that.sync_point = function() {
+ return mode == LENS ? 1 : 0;
+ };
+
+ }
+
+ // Inflate
+
+ // preset dictionary flag in zlib header
+ var PRESET_DICT = 0x20;
+
+ var Z_DEFLATED = 8;
+
+ var METHOD = 0; // waiting for method byte
+ var FLAG = 1; // waiting for flag byte
+ var DICT4 = 2; // four dictionary check bytes to go
+ var DICT3 = 3; // three dictionary check bytes to go
+ var DICT2 = 4; // two dictionary check bytes to go
+ var DICT1 = 5; // one dictionary check byte to go
+ var DICT0 = 6; // waiting for inflateSetDictionary
+ var BLOCKS = 7; // decompressing blocks
+ var DONE = 12; // finished check, done
+ var BAD = 13; // got an error--stay here
+
+ var mark = [ 0, 0, 0xff, 0xff ];
+
+ function Inflate() {
+ var that = this;
+
+ that.mode = 0; // current inflate mode
+
+ // mode dependent information
+ that.method = 0; // if FLAGS, method byte
+
+ // if CHECK, check values to compare
+ that.was = [ 0 ]; // new Array(1); // computed check value
+ that.need = 0; // stream check value
+
+ // if BAD, inflateSync's marker bytes count
+ that.marker = 0;
+
+ // mode independent information
+ that.wbits = 0; // log2(window size) (8..15, defaults to 15)
+
+ // this.blocks; // current inflate_blocks state
+
+ function inflateReset(z) {
+ if (!z || !z.istate)
+ return Z_STREAM_ERROR;
+
+ z.total_in = z.total_out = 0;
+ z.msg = null;
+ z.istate.mode = BLOCKS;
+ z.istate.blocks.reset(z, null);
+ return Z_OK;
+ }
+
+ that.inflateEnd = function(z) {
+ if (that.blocks)
+ that.blocks.free(z);
+ that.blocks = null;
+ // ZFREE(z, z->state);
+ return Z_OK;
+ };
+
+ that.inflateInit = function(z, w) {
+ z.msg = null;
+ that.blocks = null;
+
+ // set window size
+ if (w < 8 || w > 15) {
+ that.inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ that.wbits = w;
+
+ z.istate.blocks = new InfBlocks(z, 1 << w);
+
+ // reset state
+ inflateReset(z);
+ return Z_OK;
+ };
+
+ that.inflate = function(z, f) {
+ var r;
+ var b;
+
+ if (!z || !z.istate || !z.next_in)
+ return Z_STREAM_ERROR;
+ f = f == Z_FINISH ? Z_BUF_ERROR : Z_OK;
+ r = Z_BUF_ERROR;
+ while (true) {
+ // System.out.println("mode: "+z.istate.mode);
+ switch (z.istate.mode) {
+ case METHOD:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ if (((z.istate.method = z.read_byte(z.next_in_index++)) & 0xf) != Z_DEFLATED) {
+ z.istate.mode = BAD;
+ z.msg = "unknown compression method";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+ if ((z.istate.method >> 4) + 8 > z.istate.wbits) {
+ z.istate.mode = BAD;
+ z.msg = "invalid window size";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+ z.istate.mode = FLAG;
+ case FLAG:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ b = (z.read_byte(z.next_in_index++)) & 0xff;
+
+ if ((((z.istate.method << 8) + b) % 31) !== 0) {
+ z.istate.mode = BAD;
+ z.msg = "incorrect header check";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+
+ if ((b & PRESET_DICT) === 0) {
+ z.istate.mode = BLOCKS;
+ break;
+ }
+ z.istate.mode = DICT4;
+ case DICT4:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need = ((z.read_byte(z.next_in_index++) & 0xff) << 24) & 0xff000000;
+ z.istate.mode = DICT3;
+ case DICT3:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += ((z.read_byte(z.next_in_index++) & 0xff) << 16) & 0xff0000;
+ z.istate.mode = DICT2;
+ case DICT2:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += ((z.read_byte(z.next_in_index++) & 0xff) << 8) & 0xff00;
+ z.istate.mode = DICT1;
+ case DICT1:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += (z.read_byte(z.next_in_index++) & 0xff);
+ z.istate.mode = DICT0;
+ return Z_NEED_DICT;
+ case DICT0:
+ z.istate.mode = BAD;
+ z.msg = "need dictionary";
+ z.istate.marker = 0; // can try inflateSync
+ return Z_STREAM_ERROR;
+ case BLOCKS:
+
+ r = z.istate.blocks.proc(z, r);
+ if (r == Z_DATA_ERROR) {
+ z.istate.mode = BAD;
+ z.istate.marker = 0; // can try inflateSync
+ break;
+ }
+ if (r == Z_OK) {
+ r = f;
+ }
+ if (r != Z_STREAM_END) {
+ return r;
+ }
+ r = f;
+ z.istate.blocks.reset(z, z.istate.was);
+ z.istate.mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+ }
+ };
+
+ that.inflateSetDictionary = function(z, dictionary, dictLength) {
+ var index = 0;
+ var length = dictLength;
+ if (!z || !z.istate || z.istate.mode != DICT0)
+ return Z_STREAM_ERROR;
+
+ if (length >= (1 << z.istate.wbits)) {
+ length = (1 << z.istate.wbits) - 1;
+ index = dictLength - length;
+ }
+ z.istate.blocks.set_dictionary(dictionary, index, length);
+ z.istate.mode = BLOCKS;
+ return Z_OK;
+ };
+
+ that.inflateSync = function(z) {
+ var n; // number of bytes to look at
+ var p; // pointer to bytes
+ var m; // number of marker bytes found in a row
+ var r, w; // temporaries to save total_in and total_out
+
+ // set up
+ if (!z || !z.istate)
+ return Z_STREAM_ERROR;
+ if (z.istate.mode != BAD) {
+ z.istate.mode = BAD;
+ z.istate.marker = 0;
+ }
+ if ((n = z.avail_in) === 0)
+ return Z_BUF_ERROR;
+ p = z.next_in_index;
+ m = z.istate.marker;
+
+ // search
+ while (n !== 0 && m < 4) {
+ if (z.read_byte(p) == mark[m]) {
+ m++;
+ } else if (z.read_byte(p) !== 0) {
+ m = 0;
+ } else {
+ m = 4 - m;
+ }
+ p++;
+ n--;
+ }
+
+ // restore
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ z.avail_in = n;
+ z.istate.marker = m;
+
+ // return no joy or set up to restart on a new block
+ if (m != 4) {
+ return Z_DATA_ERROR;
+ }
+ r = z.total_in;
+ w = z.total_out;
+ inflateReset(z);
+ z.total_in = r;
+ z.total_out = w;
+ z.istate.mode = BLOCKS;
+ return Z_OK;
+ };
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ // implementation to provide an additional safety check. PPP uses
+ // Z_SYNC_FLUSH
+ // but removes the length bytes of the resulting empty stored block. When
+ // decompressing, PPP checks that at the end of input packet, inflate is
+ // waiting for these length bytes.
+ that.inflateSyncPoint = function(z) {
+ if (!z || !z.istate || !z.istate.blocks)
+ return Z_STREAM_ERROR;
+ return z.istate.blocks.sync_point();
+ };
+ }
+
+ // ZStream
+
+ function ZStream() {
+ }
+
+ ZStream.prototype = {
+ inflateInit : function(bits) {
+ var that = this;
+ that.istate = new Inflate();
+ if (!bits)
+ bits = MAX_BITS;
+ return that.istate.inflateInit(that, bits);
+ },
+
+ inflate : function(f) {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflate(that, f);
+ },
+
+ inflateEnd : function() {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ var ret = that.istate.inflateEnd(that);
+ that.istate = null;
+ return ret;
+ },
+
+ inflateSync : function() {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflateSync(that);
+ },
+ inflateSetDictionary : function(dictionary, dictLength) {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflateSetDictionary(that, dictionary, dictLength);
+ },
+ read_byte : function(start) {
+ var that = this;
+ return that.next_in.subarray(start, start + 1)[0];
+ },
+ read_buf : function(start, size) {
+ var that = this;
+ return that.next_in.subarray(start, start + size);
+ }
+ };
+
+ // Inflater
+
+ function Inflater() {
+ var that = this;
+ var z = new ZStream();
+ var bufsize = 512;
+ var flush = Z_NO_FLUSH;
+ var buf = new Uint8Array(bufsize);
+ var nomoreinput = false;
+
+ z.inflateInit();
+ z.next_out = buf;
+
+ that.append = function(data, onprogress) {
+ var err, buffers = [], lastIndex = 0, bufferIndex = 0, bufferSize = 0, array;
+ if (data.length === 0)
+ return;
+ z.next_in_index = 0;
+ z.next_in = data;
+ z.avail_in = data.length;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ if ((z.avail_in === 0) && (!nomoreinput)) { // if buffer is empty and more input is available, refill it
+ z.next_in_index = 0;
+ nomoreinput = true;
+ }
+ err = z.inflate(flush);
+ if (nomoreinput && (err == Z_BUF_ERROR))
+ return -1;
+ if (err != Z_OK && err != Z_STREAM_END)
+ throw "inflating: " + z.msg;
+ if ((nomoreinput || err == Z_STREAM_END) && (z.avail_in == data.length))
+ return -1;
+ if (z.next_out_index)
+ if (z.next_out_index == bufsize)
+ buffers.push(new Uint8Array(buf));
+ else
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
+ onprogress(z.next_in_index);
+ lastIndex = z.next_in_index;
+ }
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ that.flush = function() {
+ z.inflateEnd();
+ };
+ }
+
+ var inflater;
+
+ if (obj.zip)
+ obj.zip.Inflater = Inflater;
+ else {
+ inflater = new Inflater();
+ obj.addEventListener("message", function(event) {
+ var message = event.data;
+
+ if (message.append)
+ obj.postMessage({
+ onappend : true,
+ data : inflater.append(message.data, function(current) {
+ obj.postMessage({
+ progress : true,
+ current : current
+ });
+ })
+ });
+ if (message.flush) {
+ inflater.flush();
+ obj.postMessage({
+ onflush : true
+ });
+ }
+ }, false);
+ }
+
+})(EasyDeflate);
* For best performance, call this once only for all tests. However, it can
* be called at the start of each test if more isolation is desired.
*
- * @todo: This is basically an unrefactored copy of
+ * @todo This is basically an unrefactored copy of
* MediaWikiTestCase::setupAllTestDBs. They should be factored out somehow.
*
* Do not call this function from a MediaWikiTestCase subclass, since
parent::setUp();
+ $this->setContentLang( $wgContLang );
+
$this->setMwGlobals( [
'wgExtraNamespaces' => $wgExtraNamespaces,
'wgNamespaceContentModels' => $wgNamespaceContentModels,
'wgContentHandlers' => $wgContentHandlers,
- 'wgContLang' => $wgContLang,
] );
$wgExtraNamespaces[12312] = 'Dummy';
--- /dev/null
+<?php
+/**
+ * @group GlobalFunctions
+ * @covers ::wfExpandUrl
+ */
+class WfExpandUrlTest extends MediaWikiTestCase {
+ /**
+ * @dataProvider provideExpandableUrls
+ */
+ public function testWfExpandUrl( $fullUrl, $shortUrl, $defaultProto,
+ $server, $canServer, $httpsMode, $httpsPort, $message
+ ) {
+ // Fake $wgServer, $wgCanonicalServer and $wgRequest->getProtocol()
+ // fake edit to fake globals
+ $this->setMwGlobals( [
+ 'wgServer' => $server,
+ 'wgCanonicalServer' => $canServer,
+ 'wgRequest' => new FauxRequest( [], false, null, $httpsMode ? 'https' : 'http' ),
+ 'wgHttpsPort' => $httpsPort
+ ] );
+
+ $this->assertEquals( $fullUrl, wfExpandUrl( $shortUrl, $defaultProto ), $message );
+ }
+
+ /**
+ * Provider of URL examples for testing wfExpandUrl()
+ *
+ * @return array
+ */
+ public static function provideExpandableUrls() {
+ $modes = [ 'http', 'https' ];
+ $servers = [
+ 'http' => 'http://example.com',
+ 'https' => 'https://example.com',
+ 'protocol-relative' => '//example.com'
+ ];
+ $defaultProtos = [
+ 'http' => PROTO_HTTP,
+ 'https' => PROTO_HTTPS,
+ 'protocol-relative' => PROTO_RELATIVE,
+ 'current' => PROTO_CURRENT,
+ 'canonical' => PROTO_CANONICAL
+ ];
+
+ $retval = [];
+ foreach ( $modes as $mode ) {
+ $httpsMode = $mode == 'https';
+ foreach ( $servers as $serverDesc => $server ) {
+ foreach ( $modes as $canServerMode ) {
+ $canServer = "$canServerMode://example2.com";
+ foreach ( $defaultProtos as $protoDesc => $defaultProto ) {
+ $retval[] = [
+ 'http://example.com', 'http://example.com',
+ $defaultProto, $server, $canServer, $httpsMode, 443,
+ "Testing fully qualified http URLs (no need to expand) "
+ . "(defaultProto: $protoDesc , wgServer: $server, "
+ . "wgCanonicalServer: $canServer, current request protocol: $mode )"
+ ];
+ $retval[] = [
+ 'https://example.com', 'https://example.com',
+ $defaultProto, $server, $canServer, $httpsMode, 443,
+ "Testing fully qualified https URLs (no need to expand) "
+ . "(defaultProto: $protoDesc , wgServer: $server, "
+ . "wgCanonicalServer: $canServer, current request protocol: $mode )"
+ ];
+ # Would be nice to support this, see fixme on wfExpandUrl()
+ $retval[] = [
+ "wiki/FooBar", 'wiki/FooBar',
+ $defaultProto, $server, $canServer, $httpsMode, 443,
+ "Test non-expandable relative URLs (defaultProto: $protoDesc, "
+ . "wgServer: $server, wgCanonicalServer: $canServer, "
+ . "current request protocol: $mode )"
+ ];
+
+ // Determine expected protocol
+ if ( $protoDesc == 'protocol-relative' ) {
+ $p = '';
+ } elseif ( $protoDesc == 'current' ) {
+ $p = "$mode:";
+ } elseif ( $protoDesc == 'canonical' ) {
+ $p = "$canServerMode:";
+ } else {
+ $p = $protoDesc . ':';
+ }
+ // Determine expected server name
+ if ( $protoDesc == 'canonical' ) {
+ $srv = $canServer;
+ } elseif ( $serverDesc == 'protocol-relative' ) {
+ $srv = $p . $server;
+ } else {
+ $srv = $server;
+ }
+
+ $retval[] = [
+ "$p//wikipedia.org", '//wikipedia.org',
+ $defaultProto, $server, $canServer, $httpsMode, 443,
+ "Test protocol-relative URL (defaultProto: $protoDesc, "
+ . "wgServer: $server, wgCanonicalServer: $canServer, "
+ . "current request protocol: $mode )"
+ ];
+ $retval[] = [
+ "$srv/wiki/FooBar",
+ '/wiki/FooBar',
+ $defaultProto,
+ $server,
+ $canServer,
+ $httpsMode,
+ 443,
+ "Testing expanding URL beginning with / (defaultProto: $protoDesc, "
+ . "wgServer: $server, wgCanonicalServer: $canServer, "
+ . "current request protocol: $mode )"
+ ];
+ }
+ }
+ }
+ }
+
+ // Don't add HTTPS port to foreign URLs
+ $retval[] = [
+ 'https://foreign.example.com/foo',
+ 'https://foreign.example.com/foo',
+ PROTO_HTTPS,
+ '//wiki.example.com',
+ 'http://wiki.example.com',
+ 'https',
+ 111,
+ "Don't add HTTPS port to foreign URLs"
+ ];
+ $retval[] = [
+ 'https://foreign.example.com:222/foo',
+ 'https://foreign.example.com:222/foo',
+ PROTO_HTTPS,
+ '//wiki.example.com',
+ 'http://wiki.example.com',
+ 'https',
+ 111,
+ "Don't overwrite HTTPS port of foreign URLs"
+ ];
+ // Do add HTTPS port to local URLs
+ $retval[] = [
+ 'https://wiki.example.com:111/foo',
+ '/foo',
+ PROTO_HTTPS,
+ '//wiki.example.com',
+ 'http://wiki.example.com',
+ 'https',
+ 111,
+ "Do add HTTPS port to protocol-relative URLs"
+ ];
+
+ return $retval;
+ }
+}
+++ /dev/null
-<?php
-/**
- * @group GlobalFunctions
- * @covers ::wfExpandUrl
- */
-class WfExpandUrlTest extends MediaWikiTestCase {
- /**
- * @dataProvider provideExpandableUrls
- */
- public function testWfExpandUrl( $fullUrl, $shortUrl, $defaultProto,
- $server, $canServer, $httpsMode, $httpsPort, $message
- ) {
- // Fake $wgServer, $wgCanonicalServer and $wgRequest->getProtocol()
- $this->setMwGlobals( [
- 'wgServer' => $server,
- 'wgCanonicalServer' => $canServer,
- 'wgRequest' => new FauxRequest( [], false, null, $httpsMode ? 'https' : 'http' ),
- 'wgHttpsPort' => $httpsPort
- ] );
-
- $this->assertEquals( $fullUrl, wfExpandUrl( $shortUrl, $defaultProto ), $message );
- }
-
- /**
- * Provider of URL examples for testing wfExpandUrl()
- *
- * @return array
- */
- public static function provideExpandableUrls() {
- $modes = [ 'http', 'https' ];
- $servers = [
- 'http' => 'http://example.com',
- 'https' => 'https://example.com',
- 'protocol-relative' => '//example.com'
- ];
- $defaultProtos = [
- 'http' => PROTO_HTTP,
- 'https' => PROTO_HTTPS,
- 'protocol-relative' => PROTO_RELATIVE,
- 'current' => PROTO_CURRENT,
- 'canonical' => PROTO_CANONICAL
- ];
-
- $retval = [];
- foreach ( $modes as $mode ) {
- $httpsMode = $mode == 'https';
- foreach ( $servers as $serverDesc => $server ) {
- foreach ( $modes as $canServerMode ) {
- $canServer = "$canServerMode://example2.com";
- foreach ( $defaultProtos as $protoDesc => $defaultProto ) {
- $retval[] = [
- 'http://example.com', 'http://example.com',
- $defaultProto, $server, $canServer, $httpsMode, 443,
- "Testing fully qualified http URLs (no need to expand) "
- . "(defaultProto: $protoDesc , wgServer: $server, "
- . "wgCanonicalServer: $canServer, current request protocol: $mode )"
- ];
- $retval[] = [
- 'https://example.com', 'https://example.com',
- $defaultProto, $server, $canServer, $httpsMode, 443,
- "Testing fully qualified https URLs (no need to expand) "
- . "(defaultProto: $protoDesc , wgServer: $server, "
- . "wgCanonicalServer: $canServer, current request protocol: $mode )"
- ];
- # Would be nice to support this, see fixme on wfExpandUrl()
- $retval[] = [
- "wiki/FooBar", 'wiki/FooBar',
- $defaultProto, $server, $canServer, $httpsMode, 443,
- "Test non-expandable relative URLs (defaultProto: $protoDesc, "
- . "wgServer: $server, wgCanonicalServer: $canServer, "
- . "current request protocol: $mode )"
- ];
-
- // Determine expected protocol
- if ( $protoDesc == 'protocol-relative' ) {
- $p = '';
- } elseif ( $protoDesc == 'current' ) {
- $p = "$mode:";
- } elseif ( $protoDesc == 'canonical' ) {
- $p = "$canServerMode:";
- } else {
- $p = $protoDesc . ':';
- }
- // Determine expected server name
- if ( $protoDesc == 'canonical' ) {
- $srv = $canServer;
- } elseif ( $serverDesc == 'protocol-relative' ) {
- $srv = $p . $server;
- } else {
- $srv = $server;
- }
-
- $retval[] = [
- "$p//wikipedia.org", '//wikipedia.org',
- $defaultProto, $server, $canServer, $httpsMode, 443,
- "Test protocol-relative URL (defaultProto: $protoDesc, "
- . "wgServer: $server, wgCanonicalServer: $canServer, "
- . "current request protocol: $mode )"
- ];
- $retval[] = [
- "$srv/wiki/FooBar",
- '/wiki/FooBar',
- $defaultProto,
- $server,
- $canServer,
- $httpsMode,
- 443,
- "Testing expanding URL beginning with / (defaultProto: $protoDesc, "
- . "wgServer: $server, wgCanonicalServer: $canServer, "
- . "current request protocol: $mode )"
- ];
- }
- }
- }
- }
-
- // Don't add HTTPS port to foreign URLs
- $retval[] = [
- 'https://foreign.example.com/foo',
- 'https://foreign.example.com/foo',
- PROTO_HTTPS,
- '//wiki.example.com',
- 'http://wiki.example.com',
- 'https',
- 111,
- "Don't add HTTPS port to foreign URLs"
- ];
- $retval[] = [
- 'https://foreign.example.com:222/foo',
- 'https://foreign.example.com:222/foo',
- PROTO_HTTPS,
- '//wiki.example.com',
- 'http://wiki.example.com',
- 'https',
- 111,
- "Don't overwrite HTTPS port of foreign URLs"
- ];
- // Do add HTTPS port to local URLs
- $retval[] = [
- 'https://wiki.example.com:111/foo',
- '/foo',
- PROTO_HTTPS,
- '//wiki.example.com',
- 'http://wiki.example.com',
- 'https',
- 111,
- "Do add HTTPS port to protocol-relative URLs"
- ];
-
- return $retval;
- }
-}
// This sets the globals and will restore them automatically
// after each test.
+ $this->setContentLang( 'en' );
$this->setMwGlobals( [
- 'wgContLang' => Language::factory( 'en' ),
- 'wgLanguageCode' => 'en',
'wgCapitalLinks' => true,
] );
}
parent::setUp();
+ $this->setContentLang( $wgContLang );
+
$this->setMwGlobals( [
'wgExtraNamespaces' => $wgExtraNamespaces,
'wgNamespaceContentModels' => $wgNamespaceContentModels,
'wgContentHandlers' => $wgContentHandlers,
- 'wgContLang' => $wgContLang,
] );
$wgExtraNamespaces[12312] = 'Dummy';
$context = \RequestContext::getMain();
$reset = new ScopedCallback( [ $context, 'setLanguage' ], [ $context->getLanguage() ] );
$context->setLanguage( 'de' );
- $this->setMwGlobals( 'wgContLang', \Language::factory( 'zh' ) );
+ $this->setContentLang( 'zh' );
$user = \User::newFromName( self::usernameForCreation() );
$user->addToDatabase();
$this->assertSame( 'de', $user->getOption( 'language' ) );
$this->assertSame( 'zh', $user->getOption( 'variant' ) );
- $this->setMwGlobals( 'wgContLang', \Language::factory( 'fr' ) );
+ $this->setContentLang( 'fr' );
$user = \User::newFromName( self::usernameForCreation() );
$user->addToDatabase();
* page.
*/
public function testGetAutosummary() {
- $this->setMwGlobals( 'wgContLang', Language::factory( 'en' ) );
+ $this->setContentLang( 'en' );
$content = new DummyContentHandlerForTesting( CONTENT_MODEL_WIKITEXT );
$title = Title::newFromText( 'Help:Test' );
private function doTestLockCalls() {
$backendName = $this->backendClass();
+ $base = $this->backend->getContainerStoragePath( 'test' );
$paths = [
- "test1.txt",
- "test2.txt",
- "test3.txt",
- "subdir1",
- "subdir1", // duplicate
- "subdir1/test1.txt",
- "subdir1/test2.txt",
- "subdir2",
- "subdir2", // duplicate
- "subdir2/test3.txt",
- "subdir2/test4.txt",
- "subdir2/subdir",
- "subdir2/subdir/test1.txt",
- "subdir2/subdir/test2.txt",
- "subdir2/subdir/test3.txt",
- "subdir2/subdir/test4.txt",
- "subdir2/subdir/test5.txt",
- "subdir2/subdir/sub",
- "subdir2/subdir/sub/test0.txt",
- "subdir2/subdir/sub/120-px-file.txt",
+ "$base/test1.txt",
+ "$base/test2.txt",
+ "$base/test3.txt",
+ "$base/subdir1",
+ "$base/subdir1", // duplicate
+ "$base/subdir1/test1.txt",
+ "$base/subdir1/test2.txt",
+ "$base/subdir2",
+ "$base/subdir2", // duplicate
+ "$base/subdir2/test3.txt",
+ "$base/subdir2/test4.txt",
+ "$base/subdir2/subdir",
+ "$base/subdir2/subdir/test1.txt",
+ "$base/subdir2/subdir/test2.txt",
+ "$base/subdir2/subdir/test3.txt",
+ "$base/subdir2/subdir/test4.txt",
+ "$base/subdir2/subdir/test5.txt",
+ "$base/subdir2/subdir/sub",
+ "$base/subdir2/subdir/sub/test0.txt",
+ "$base/subdir2/subdir/sub/120-px-file.txt",
];
for ( $i = 0; $i < 25; $i++ ) {
--- /dev/null
+<?php
+/**
+ * Copyright (C) 2018 Kunal Mehta <legoktm@member.fsf.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/**
+ * @covers EasyDeflate
+ */
+class EasyDeflateTest extends PHPUnit\Framework\TestCase {
+
+ public function provideIsDeflated() {
+ return [
+ [ 'rawdeflate,S8vPT0osAgA=', true ],
+ [ 'abcdefghijklmnopqrstuvwxyz', false ],
+ ];
+ }
+
+ /**
+ * @dataProvider provideIsDeflated
+ */
+ public function testIsDeflated( $data, $expected ) {
+ $actual = EasyDeflate::isDeflated( $data );
+ $this->assertSame( $expected, $actual );
+ }
+
+ public function provideInflate() {
+ return [
+ [ 'rawdeflate,S8vPT0osAgA=', true, 'foobar' ],
+ // Fails base64_decode
+ [ 'rawdeflate,🌻', false, 'easydeflate-invaliddeflate' ],
+ // Fails gzinflate
+ [ 'rawdeflate,S8vPT0dfdAgB=', false, 'easydeflate-invaliddeflate' ],
+ ];
+ }
+
+ /**
+ * @dataProvider provideInflate
+ */
+ public function testInflate( $data, $ok, $value ) {
+ $actual = EasyDeflate::inflate( $data );
+ if ( $ok ) {
+ $this->assertTrue( $actual->isOK() );
+ $this->assertSame( $value, $actual->getValue() );
+ } else {
+ $this->assertFalse( $actual->isOK() );
+ $this->assertTrue( $actual->hasMessage( $value ) );
+ }
+ }
+}
parent::setUp();
$contLang = Language::factory( 'en' );
- $this->setMwGlobals( [
- 'wgLanguageCode' => 'en',
- 'wgContLang' => $contLang,
- ] );
+ $this->setContentLang( $contLang );
$this->testParser = new Parser();
$this->testParser->Options( ParserOptions::newFromUserAndLang( new User, $contLang ) );
$wgLang = Language::factory( 'fr' );
$wgContLang = Language::factory( 'qqx' );
+ $this->setContentLang( $wgContLang );
$this->setMwGlobals( [
'wgUser' => $wgUser,
'wgLang' => $wgLang,
- 'wgContLang' => $wgContLang,
] );
$user = $this->getMutableTestUser()->getUser();
* @covers SpecialPageFactory::resolveAlias
*/
public function testResolveAlias() {
- $this->setMwGlobals( 'wgContLang', Language::factory( 'de' ) );
+ $this->setContentLang( 'de' );
SpecialPageFactory::resetList();
list( $name, $param ) = SpecialPageFactory::resolveAlias( 'Spezialseiten/Foo' );
* @covers SpecialPageFactory::getLocalNameFor
*/
public function testGetLocalNameFor() {
- $this->setMwGlobals( 'wgContLang', Language::factory( 'de' ) );
+ $this->setContentLang( 'de' );
SpecialPageFactory::resetList();
$name = SpecialPageFactory::getLocalNameFor( 'Specialpages', 'Foo' );
* @covers SpecialPageFactory::getTitleForAlias
*/
public function testGetTitleForAlias() {
- $this->setMwGlobals( 'wgContLang', Language::factory( 'de' ) );
+ $this->setContentLang( 'de' );
SpecialPageFactory::resetList();
$title = SpecialPageFactory::getTitleForAlias( 'Specialpages/Foo' );
global $wgContLang;
$lang = clone $wgContLang;
$lang->mExtendedSpecialPageAliases = $aliasesList;
- $this->setMwGlobals( 'wgContLang', $lang );
+ $this->setContentLang( $lang );
$this->setMwGlobals( 'wgSpecialPages',
array_combine( array_keys( $aliasesList ), array_keys( $aliasesList ) )
);
protected function setUp() {
parent::setUp();
+ $this->setContentLang( 'en' );
$this->setMwGlobals( [
'wgScript' => '/index.php',
- 'wgContLang' => Language::factory( 'en' )
] );
}
protected function setUp() {
parent::setUp();
+ $this->setContentLang( 'en' );
$this->setMwGlobals( [
- 'wgLanguageCode' => 'en',
- 'wgContLang' => Language::factory( 'en' ),
'wgExtraNamespaces' => [ 100 => 'Portal' ],
] );
}
protected function setUp() {
parent::setUp();
- $this->setMwGlobals( [
- 'wgLanguageCode' => 'en',
- 'wgContLang' => Language::factory( 'en' ),
- ] );
+ $this->setContentLang( 'en' );
}
public function basicProvider() {
protected function setUp() {
parent::setUp();
+ $this->setContentLang( 'en' );
$this->setMwGlobals( [
- 'wgLanguageCode' => 'en',
- 'wgContLang' => Language::factory( 'en' ),
'wgNamespacesWithSubpages' => [ 0 => false, 2 => true ],
] );
}
];
}
- /**
- * Test, if for all rights a right- message exist,
- * which is used on Special:ListGroupRights as help text
- * Extensions and core
- *
- * @coversNothing
- */
- public function testAllRightsWithMessage() {
- // Getting all user rights, for core: User::$mCoreRights, for extensions: $wgAvailableRights
- $allRights = User::getAllRights();
- $allMessageKeys = Language::getMessageKeysFor( 'en' );
-
- $rightsWithMessage = [];
- foreach ( $allMessageKeys as $message ) {
- // === 0: must be at beginning of string (position 0)
- if ( strpos( $message, 'right-' ) === 0 ) {
- $rightsWithMessage[] = substr( $message, strlen( 'right-' ) );
- }
- }
-
- sort( $allRights );
- sort( $rightsWithMessage );
-
- $this->assertEquals(
- $allRights,
- $rightsWithMessage,
- 'Each user rights (core/extensions) has a corresponding right- message.'
- );
- }
-
/**
* Test User::editCount
* @group medium
protected function setUp() {
parent::setUp();
+ $this->setContentLang( 'tg' );
+
$this->setMwGlobals( [
- 'wgContLang' => Language::factory( 'tg' ),
- 'wgLanguageCode' => 'tg',
'wgDefaultLanguageVariant' => false,
'wgRequest' => new FauxRequest( [] ),
'wgUser' => new User,
namespace MediaWiki\Tests\Maintenance;
use DumpBackup;
-use Language;
use Title;
use WikiExporter;
use WikiPage;
function addDBData() {
// be sure, titles created here using english namespace names
- $this->setMwGlobals( [
- 'wgLanguageCode' => 'en',
- 'wgContLang' => Language::factory( 'en' ),
- ] );
+ $this->setContentLang( 'en' );
$this->tablesUsed[] = 'page';
$this->tablesUsed[] = 'revision';
'https://www.mediawiki.org/wiki/Manual:User_rights#Adding_new_rights'
);
}
+
+ /**
+ * Test, if for all rights a right- message exist,
+ * which is used on Special:ListGroupRights as help text
+ * Extensions and core
+ *
+ * @coversNothing
+ */
+ public function testAllRightsWithMessage() {
+ // Getting all user rights, for core: User::$mCoreRights, for extensions: $wgAvailableRights
+ $allRights = User::getAllRights();
+ $allMessageKeys = Language::getMessageKeysFor( 'en' );
+
+ $rightsWithMessage = [];
+ foreach ( $allMessageKeys as $message ) {
+ // === 0: must be at beginning of string (position 0)
+ if ( strpos( $message, 'right-' ) === 0 ) {
+ $rightsWithMessage[] = substr( $message, strlen( 'right-' ) );
+ }
+ }
+
+ $missing = array_diff(
+ $allRights,
+ $rightsWithMessage
+ );
+
+ $this->assertEquals(
+ [],
+ $missing,
+ 'Each user rights (core/extensions) has a corresponding right- message.'
+ );
+ }
}
/** @var ScopedCallback */
private $ptTeardownScope;
+ private $oldTablePrefix = '';
+
/**
* @defgroup filtering_constants Filtering constants
*
$type = $db->getType();
$prefix = $type === 'oracle' ?
MediaWikiTestCase::ORA_DB_PREFIX : MediaWikiTestCase::DB_PREFIX;
+ $this->oldTablePrefix = $db->tablePrefix();
MediaWikiTestCase::setupTestDB( $db, $prefix );
+ CloneDatabase::changePrefix( $prefix );
$teardown = $this->ptRunner->setDatabase( $db );
$teardown = $this->ptRunner->setupUploads( $teardown );
$this->ptTeardownScope = $teardown;
if ( $this->ptTeardownScope ) {
ScopedCallback::consume( $this->ptTeardownScope );
}
+ CloneDatabase::changePrefix( $this->oldTablePrefix );
}
/**