diff --git a/devServer.js b/devServer.js deleted file mode 100644 index a023e57..0000000 --- a/devServer.js +++ /dev/null @@ -1,26 +0,0 @@ -var path = require('path'); -var express = require('express'); -var webpack = require('webpack'); -var config = require('./webpack.config.dev'); - -var app = express(); -var compiler = webpack(config); - -app.use(require('webpack-dev-middleware')(compiler, { - noInfo: true, - publicPath: config[0].output.publicPath -})); - -// app.use(require('webpack-hot-middleware')(compiler)); - -app.use('/', express.static('./')); - -var port = 7355 -app.listen(port, 'localhost', function(err) { - if (err) { - console.log(err); - return; - } - - console.log('Listening at http://localhost:' + port); -}); \ No newline at end of file diff --git a/dist/tesseract.js b/dist/tesseract.js index ffd3eee..e777724 100644 --- a/dist/tesseract.js +++ b/dist/tesseract.js @@ -1 +1,251 @@ -!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.Tesseract=t():e.Tesseract=t()}(this,function(){return function(e){function t(n){if(r[n])return r[n].exports;var o=r[n]={exports:{},id:n,loaded:!1};return e[n].call(o.exports,o,o.exports,t),o.loaded=!0,o.exports}var r={};return t.m=e,t.c=r,t.p="",t(0)}([function(e,t){"use strict";function r(e,t){return a||(a=o(g.coreUrl,g.workerUrl,g.langUrl)),a.recognize(e,t)}function n(e){return a||(a=o(g.coreUrl,g.workerUrl,g.langUrl)),a.detect(e)}function o(){function e(e,t){var r=s++;u[r]={};var n=0;return Object.getOwnPropertyNames(t).filter(function(e){return"function"==typeof t[e]}).forEach(function(o){n++,t[o](function(a){t[o]=a,0==--n&&i.postMessage({jobId:r,action:e,args:t})})}),0==n&&i.postMessage({jobId:r,action:e,args:t}),{then:function(e){return u[r].result=e,this},error:function(e){return u[r].error=e,this},progress:function(e){return u[r].progress=e,this}}}function t(e){if(e.match&&e.match(/^https?:\/\//))return function(r){var n=new Image;n.src=e,n.onload=function(){return r(t(n))}};if("string"==typeof e&&(e=document.querySelector(e)),e.getContext)e=e.getContext("2d");else if("IMG"==e.tagName||"VIDEO"==e.tagName){var r=document.createElement("canvas");r.width=e.naturalWidth||e.videoWidth,r.height=e.naturalHeight||e.videoHeight;var n=r.getContext("2d");n.drawImage(e,0,0),e=n}return e.getImageData&&(e=e.getImageData(0,0,e.canvas.width,e.canvas.height)),e}var r=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g.coreUrl,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:g.workerUrl,o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:g.langUrl,a=new Blob(["importScripts('"+r+"');\n\t\t importScripts('"+n+"');"]),i=new Worker(window.URL.createObjectURL(a)),c=!1,s=0,u={};return i.onmessage=function(e){var t=e.data,r=t.jobId,n=t.progress,o=t.error,a=t.result,i=u[r];n&&i.progress&&i.progress(n),o&&i.error&&i.error(o),a&&i.result&&i.result(a)},e("init",{mem:100663296,langUrl:o}),{detect:function(r){return e("detect",{image:t(r)})},recognize:function(r){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"eng";return"string"==typeof n?n={lang:n}:n.lang=n.lang||"eng",c||["chi_sim","chi_tra","jpn"].indexOf(n.lang)==-1||(e("init",{mem:167772160,langUrl:o}),c=!0),e("recognize",{options:n,image:t(r)})}}}var a,i="https://cdn.rawgit.com/naptha/tesseract.js-core/master/index.js",c="https://cdn.rawgit.com/naptha/tesseract.js/8b915dc/dist/tesseract.worker.js",s="https://cdn.rawgit.com/naptha/tessdata/gh-pages/3.02/",g={coreUrl:i,workerUrl:c,langUrl:s,recognize:r,detect:n,createWorker:o};e.exports=g}])}); \ No newline at end of file +(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Tesseract = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o 0) { + this._queue[0](); + } + } + }, { + key: '_recv', + value: function _recv(packet) { + if (this._currentJob.id === packet.jobId) { + this._currentJob._handle(packet); + } else { + console.warn('Job ID ' + packet.jobId + ' not known.'); + } + } + }]); + + return TesseractWorker; +}(); + +var jobCounter = 0; + +var TesseractJob = function () { + function TesseractJob(instance) { + _classCallCheck(this, TesseractJob); + + this.id = 'Job-' + ++jobCounter + '-' + Math.random().toString(16).slice(3, 8); + + this._instance = instance; + this._resolve = []; + this._reject = []; + this._progress = []; + } + + _createClass(TesseractJob, [{ + key: 'then', + value: function then(resolve, reject) { + if (this._resolve.push) { + this._resolve.push(resolve); + } else { + resolve(this._resolve); + } + + if (reject) this.catch(reject); + return this; + } + }, { + key: 'catch', + value: function _catch(reject) { + if (this._reject.push) { + this._reject.push(reject); + } else { + reject(this._reject); + } + return this; + } + }, { + key: 'progress', + value: function progress(fn) { + this._progress.push(fn); + return this; + } + }, { + key: '_send', + value: function _send(action, payload) { + adapter.sendPacket(this._instance, { + jobId: this.id, + action: action, + payload: payload + }); + } + }, { + key: '_handle', + value: function _handle(packet) { + var data = packet.data; + if (packet.status === 'resolve') { + if (this._resolve.length === 0) console.debug(data); + this._resolve.forEach(function (fn) { + var ret = fn(data); + if (ret && typeof ret.then == 'function') { + console.warn('TesseractJob instances do not chain like ES6 Promises. To convert it into a real promise, use Promise.resolve.'); + } + }); + this._resolve = data; + this._instance._dequeue(); + } else if (packet.status === 'reject') { + if (this._reject.length === 0) console.error(data); + this._reject.forEach(function (fn) { + return fn(data); + }); + this._reject = data; + this._instance._dequeue(); + } else if (packet.status === 'progress') { + this._progress.forEach(function (fn) { + return fn(data); + }); + } else { + console.warn('Message type unknown', packet.status); + } + } + }]); + + return TesseractJob; +}(); + +var DefaultTesseract = createWorker(adapter.defaultOptions); +DefaultTesseract.createWorker = createWorker; + +module.exports = DefaultTesseract; + +},{"./node/index.js":1}]},{},[2])(2) +}); \ No newline at end of file diff --git a/dist/worker.js b/dist/worker.js new file mode 100644 index 0000000..1d27bde --- /dev/null +++ b/dist/worker.js @@ -0,0 +1,12293 @@ +(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o 0) { + throw new Error('Invalid string. Length must be a multiple of 4') + } + + // the number of equal signs (place holders) + // if there are two placeholders, than the two characters before it + // represent one byte + // if there is only one, then the three characters before it represent 2 bytes + // this is just a cheap hack to not do indexOf twice + return b64[len - 2] === '=' ? 2 : b64[len - 1] === '=' ? 1 : 0 +} + +function byteLength (b64) { + // base64 is 4/3 + up to two characters of the original data + return b64.length * 3 / 4 - placeHoldersCount(b64) +} + +function toByteArray (b64) { + var i, j, l, tmp, placeHolders, arr + var len = b64.length + placeHolders = placeHoldersCount(b64) + + arr = new Arr(len * 3 / 4 - placeHolders) + + // if there are placeholders, only get up to the last complete 4 chars + l = placeHolders > 0 ? len - 4 : len + + var L = 0 + + for (i = 0, j = 0; i < l; i += 4, j += 3) { + tmp = (revLookup[b64.charCodeAt(i)] << 18) | (revLookup[b64.charCodeAt(i + 1)] << 12) | (revLookup[b64.charCodeAt(i + 2)] << 6) | revLookup[b64.charCodeAt(i + 3)] + arr[L++] = (tmp >> 16) & 0xFF + arr[L++] = (tmp >> 8) & 0xFF + arr[L++] = tmp & 0xFF + } + + if (placeHolders === 2) { + tmp = (revLookup[b64.charCodeAt(i)] << 2) | (revLookup[b64.charCodeAt(i + 1)] >> 4) + arr[L++] = tmp & 0xFF + } else if (placeHolders === 1) { + tmp = (revLookup[b64.charCodeAt(i)] << 10) | (revLookup[b64.charCodeAt(i + 1)] << 4) | (revLookup[b64.charCodeAt(i + 2)] >> 2) + arr[L++] = (tmp >> 8) & 0xFF + arr[L++] = tmp & 0xFF + } + + return arr +} + +function tripletToBase64 (num) { + return lookup[num >> 18 & 0x3F] + lookup[num >> 12 & 0x3F] + lookup[num >> 6 & 0x3F] + lookup[num & 0x3F] +} + +function encodeChunk (uint8, start, end) { + var tmp + var output = [] + for (var i = start; i < end; i += 3) { + tmp = (uint8[i] << 16) + (uint8[i + 1] << 8) + (uint8[i + 2]) + output.push(tripletToBase64(tmp)) + } + return output.join('') +} + +function fromByteArray (uint8) { + var tmp + var len = uint8.length + var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes + var output = '' + var parts = [] + var maxChunkLength = 16383 // must be multiple of 3 + + // go through the array every three bytes, we'll deal with trailing stuff later + for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) { + parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength))) + } + + // pad the end with zeros, but make sure to not forget the extra bytes + if (extraBytes === 1) { + tmp = uint8[len - 1] + output += lookup[tmp >> 2] + output += lookup[(tmp << 4) & 0x3F] + output += '==' + } else if (extraBytes === 2) { + tmp = (uint8[len - 2] << 8) + (uint8[len - 1]) + output += lookup[tmp >> 10] + output += lookup[(tmp >> 4) & 0x3F] + output += lookup[(tmp << 2) & 0x3F] + output += '=' + } + + parts.push(output) + + return parts.join('') +} + +},{}],6:[function(require,module,exports){ +(function (global){ +/*! + * The buffer module from node.js, for the browser. + * + * @author Feross Aboukhadijeh + * @license MIT + */ +/* eslint-disable no-proto */ + +'use strict' + +var base64 = require('base64-js') +var ieee754 = require('ieee754') +var isArray = require('isarray') + +exports.Buffer = Buffer +exports.SlowBuffer = SlowBuffer +exports.INSPECT_MAX_BYTES = 50 + +/** + * If `Buffer.TYPED_ARRAY_SUPPORT`: + * === true Use Uint8Array implementation (fastest) + * === false Use Object implementation (most compatible, even IE6) + * + * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+, + * Opera 11.6+, iOS 4.2+. + * + * Due to various browser bugs, sometimes the Object implementation will be used even + * when the browser supports typed arrays. + * + * Note: + * + * - Firefox 4-29 lacks support for adding new properties to `Uint8Array` instances, + * See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438. + * + * - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function. + * + * - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of + * incorrect length in some situations. + + * We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they + * get the Object implementation, which is slower but behaves correctly. + */ +Buffer.TYPED_ARRAY_SUPPORT = global.TYPED_ARRAY_SUPPORT !== undefined + ? global.TYPED_ARRAY_SUPPORT + : typedArraySupport() + +/* + * Export kMaxLength after typed array support is determined. + */ +exports.kMaxLength = kMaxLength() + +function typedArraySupport () { + try { + var arr = new Uint8Array(1) + arr.__proto__ = {__proto__: Uint8Array.prototype, foo: function () { return 42 }} + return arr.foo() === 42 && // typed array instances can be augmented + typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray` + arr.subarray(1, 1).byteLength === 0 // ie10 has broken `subarray` + } catch (e) { + return false + } +} + +function kMaxLength () { + return Buffer.TYPED_ARRAY_SUPPORT + ? 0x7fffffff + : 0x3fffffff +} + +function createBuffer (that, length) { + if (kMaxLength() < length) { + throw new RangeError('Invalid typed array length') + } + if (Buffer.TYPED_ARRAY_SUPPORT) { + // Return an augmented `Uint8Array` instance, for best performance + that = new Uint8Array(length) + that.__proto__ = Buffer.prototype + } else { + // Fallback: Return an object instance of the Buffer class + if (that === null) { + that = new Buffer(length) + } + that.length = length + } + + return that +} + +/** + * The Buffer constructor returns instances of `Uint8Array` that have their + * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of + * `Uint8Array`, so the returned instances will have all the node `Buffer` methods + * and the `Uint8Array` methods. Square bracket notation works as expected -- it + * returns a single octet. + * + * The `Uint8Array` prototype remains unmodified. + */ + +function Buffer (arg, encodingOrOffset, length) { + if (!Buffer.TYPED_ARRAY_SUPPORT && !(this instanceof Buffer)) { + return new Buffer(arg, encodingOrOffset, length) + } + + // Common case. + if (typeof arg === 'number') { + if (typeof encodingOrOffset === 'string') { + throw new Error( + 'If encoding is specified then the first argument must be a string' + ) + } + return allocUnsafe(this, arg) + } + return from(this, arg, encodingOrOffset, length) +} + +Buffer.poolSize = 8192 // not used by this implementation + +// TODO: Legacy, not needed anymore. Remove in next major version. +Buffer._augment = function (arr) { + arr.__proto__ = Buffer.prototype + return arr +} + +function from (that, value, encodingOrOffset, length) { + if (typeof value === 'number') { + throw new TypeError('"value" argument must not be a number') + } + + if (typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer) { + return fromArrayBuffer(that, value, encodingOrOffset, length) + } + + if (typeof value === 'string') { + return fromString(that, value, encodingOrOffset) + } + + return fromObject(that, value) +} + +/** + * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError + * if value is a number. + * Buffer.from(str[, encoding]) + * Buffer.from(array) + * Buffer.from(buffer) + * Buffer.from(arrayBuffer[, byteOffset[, length]]) + **/ +Buffer.from = function (value, encodingOrOffset, length) { + return from(null, value, encodingOrOffset, length) +} + +if (Buffer.TYPED_ARRAY_SUPPORT) { + Buffer.prototype.__proto__ = Uint8Array.prototype + Buffer.__proto__ = Uint8Array + if (typeof Symbol !== 'undefined' && Symbol.species && + Buffer[Symbol.species] === Buffer) { + // Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97 + Object.defineProperty(Buffer, Symbol.species, { + value: null, + configurable: true + }) + } +} + +function assertSize (size) { + if (typeof size !== 'number') { + throw new TypeError('"size" argument must be a number') + } else if (size < 0) { + throw new RangeError('"size" argument must not be negative') + } +} + +function alloc (that, size, fill, encoding) { + assertSize(size) + if (size <= 0) { + return createBuffer(that, size) + } + if (fill !== undefined) { + // Only pay attention to encoding if it's a string. This + // prevents accidentally sending in a number that would + // be interpretted as a start offset. + return typeof encoding === 'string' + ? createBuffer(that, size).fill(fill, encoding) + : createBuffer(that, size).fill(fill) + } + return createBuffer(that, size) +} + +/** + * Creates a new filled Buffer instance. + * alloc(size[, fill[, encoding]]) + **/ +Buffer.alloc = function (size, fill, encoding) { + return alloc(null, size, fill, encoding) +} + +function allocUnsafe (that, size) { + assertSize(size) + that = createBuffer(that, size < 0 ? 0 : checked(size) | 0) + if (!Buffer.TYPED_ARRAY_SUPPORT) { + for (var i = 0; i < size; ++i) { + that[i] = 0 + } + } + return that +} + +/** + * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance. + * */ +Buffer.allocUnsafe = function (size) { + return allocUnsafe(null, size) +} +/** + * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance. + */ +Buffer.allocUnsafeSlow = function (size) { + return allocUnsafe(null, size) +} + +function fromString (that, string, encoding) { + if (typeof encoding !== 'string' || encoding === '') { + encoding = 'utf8' + } + + if (!Buffer.isEncoding(encoding)) { + throw new TypeError('"encoding" must be a valid string encoding') + } + + var length = byteLength(string, encoding) | 0 + that = createBuffer(that, length) + + var actual = that.write(string, encoding) + + if (actual !== length) { + // Writing a hex string, for example, that contains invalid characters will + // cause everything after the first invalid character to be ignored. (e.g. + // 'abxxcd' will be treated as 'ab') + that = that.slice(0, actual) + } + + return that +} + +function fromArrayLike (that, array) { + var length = array.length < 0 ? 0 : checked(array.length) | 0 + that = createBuffer(that, length) + for (var i = 0; i < length; i += 1) { + that[i] = array[i] & 255 + } + return that +} + +function fromArrayBuffer (that, array, byteOffset, length) { + array.byteLength // this throws if `array` is not a valid ArrayBuffer + + if (byteOffset < 0 || array.byteLength < byteOffset) { + throw new RangeError('\'offset\' is out of bounds') + } + + if (array.byteLength < byteOffset + (length || 0)) { + throw new RangeError('\'length\' is out of bounds') + } + + if (byteOffset === undefined && length === undefined) { + array = new Uint8Array(array) + } else if (length === undefined) { + array = new Uint8Array(array, byteOffset) + } else { + array = new Uint8Array(array, byteOffset, length) + } + + if (Buffer.TYPED_ARRAY_SUPPORT) { + // Return an augmented `Uint8Array` instance, for best performance + that = array + that.__proto__ = Buffer.prototype + } else { + // Fallback: Return an object instance of the Buffer class + that = fromArrayLike(that, array) + } + return that +} + +function fromObject (that, obj) { + if (Buffer.isBuffer(obj)) { + var len = checked(obj.length) | 0 + that = createBuffer(that, len) + + if (that.length === 0) { + return that + } + + obj.copy(that, 0, 0, len) + return that + } + + if (obj) { + if ((typeof ArrayBuffer !== 'undefined' && + obj.buffer instanceof ArrayBuffer) || 'length' in obj) { + if (typeof obj.length !== 'number' || isnan(obj.length)) { + return createBuffer(that, 0) + } + return fromArrayLike(that, obj) + } + + if (obj.type === 'Buffer' && isArray(obj.data)) { + return fromArrayLike(that, obj.data) + } + } + + throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.') +} + +function checked (length) { + // Note: cannot use `length < kMaxLength()` here because that fails when + // length is NaN (which is otherwise coerced to zero.) + if (length >= kMaxLength()) { + throw new RangeError('Attempt to allocate Buffer larger than maximum ' + + 'size: 0x' + kMaxLength().toString(16) + ' bytes') + } + return length | 0 +} + +function SlowBuffer (length) { + if (+length != length) { // eslint-disable-line eqeqeq + length = 0 + } + return Buffer.alloc(+length) +} + +Buffer.isBuffer = function isBuffer (b) { + return !!(b != null && b._isBuffer) +} + +Buffer.compare = function compare (a, b) { + if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) { + throw new TypeError('Arguments must be Buffers') + } + + if (a === b) return 0 + + var x = a.length + var y = b.length + + for (var i = 0, len = Math.min(x, y); i < len; ++i) { + if (a[i] !== b[i]) { + x = a[i] + y = b[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +Buffer.isEncoding = function isEncoding (encoding) { + switch (String(encoding).toLowerCase()) { + case 'hex': + case 'utf8': + case 'utf-8': + case 'ascii': + case 'latin1': + case 'binary': + case 'base64': + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return true + default: + return false + } +} + +Buffer.concat = function concat (list, length) { + if (!isArray(list)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } + + if (list.length === 0) { + return Buffer.alloc(0) + } + + var i + if (length === undefined) { + length = 0 + for (i = 0; i < list.length; ++i) { + length += list[i].length + } + } + + var buffer = Buffer.allocUnsafe(length) + var pos = 0 + for (i = 0; i < list.length; ++i) { + var buf = list[i] + if (!Buffer.isBuffer(buf)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } + buf.copy(buffer, pos) + pos += buf.length + } + return buffer +} + +function byteLength (string, encoding) { + if (Buffer.isBuffer(string)) { + return string.length + } + if (typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer.isView === 'function' && + (ArrayBuffer.isView(string) || string instanceof ArrayBuffer)) { + return string.byteLength + } + if (typeof string !== 'string') { + string = '' + string + } + + var len = string.length + if (len === 0) return 0 + + // Use a for loop to avoid recursion + var loweredCase = false + for (;;) { + switch (encoding) { + case 'ascii': + case 'latin1': + case 'binary': + return len + case 'utf8': + case 'utf-8': + case undefined: + return utf8ToBytes(string).length + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return len * 2 + case 'hex': + return len >>> 1 + case 'base64': + return base64ToBytes(string).length + default: + if (loweredCase) return utf8ToBytes(string).length // assume utf8 + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} +Buffer.byteLength = byteLength + +function slowToString (encoding, start, end) { + var loweredCase = false + + // No need to verify that "this.length <= MAX_UINT32" since it's a read-only + // property of a typed array. + + // This behaves neither like String nor Uint8Array in that we set start/end + // to their upper/lower bounds if the value passed is out of range. + // undefined is handled specially as per ECMA-262 6th Edition, + // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization. + if (start === undefined || start < 0) { + start = 0 + } + // Return early if start > this.length. Done here to prevent potential uint32 + // coercion fail below. + if (start > this.length) { + return '' + } + + if (end === undefined || end > this.length) { + end = this.length + } + + if (end <= 0) { + return '' + } + + // Force coersion to uint32. This will also coerce falsey/NaN values to 0. + end >>>= 0 + start >>>= 0 + + if (end <= start) { + return '' + } + + if (!encoding) encoding = 'utf8' + + while (true) { + switch (encoding) { + case 'hex': + return hexSlice(this, start, end) + + case 'utf8': + case 'utf-8': + return utf8Slice(this, start, end) + + case 'ascii': + return asciiSlice(this, start, end) + + case 'latin1': + case 'binary': + return latin1Slice(this, start, end) + + case 'base64': + return base64Slice(this, start, end) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return utf16leSlice(this, start, end) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = (encoding + '').toLowerCase() + loweredCase = true + } + } +} + +// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect +// Buffer instances. +Buffer.prototype._isBuffer = true + +function swap (b, n, m) { + var i = b[n] + b[n] = b[m] + b[m] = i +} + +Buffer.prototype.swap16 = function swap16 () { + var len = this.length + if (len % 2 !== 0) { + throw new RangeError('Buffer size must be a multiple of 16-bits') + } + for (var i = 0; i < len; i += 2) { + swap(this, i, i + 1) + } + return this +} + +Buffer.prototype.swap32 = function swap32 () { + var len = this.length + if (len % 4 !== 0) { + throw new RangeError('Buffer size must be a multiple of 32-bits') + } + for (var i = 0; i < len; i += 4) { + swap(this, i, i + 3) + swap(this, i + 1, i + 2) + } + return this +} + +Buffer.prototype.swap64 = function swap64 () { + var len = this.length + if (len % 8 !== 0) { + throw new RangeError('Buffer size must be a multiple of 64-bits') + } + for (var i = 0; i < len; i += 8) { + swap(this, i, i + 7) + swap(this, i + 1, i + 6) + swap(this, i + 2, i + 5) + swap(this, i + 3, i + 4) + } + return this +} + +Buffer.prototype.toString = function toString () { + var length = this.length | 0 + if (length === 0) return '' + if (arguments.length === 0) return utf8Slice(this, 0, length) + return slowToString.apply(this, arguments) +} + +Buffer.prototype.equals = function equals (b) { + if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer') + if (this === b) return true + return Buffer.compare(this, b) === 0 +} + +Buffer.prototype.inspect = function inspect () { + var str = '' + var max = exports.INSPECT_MAX_BYTES + if (this.length > 0) { + str = this.toString('hex', 0, max).match(/.{2}/g).join(' ') + if (this.length > max) str += ' ... ' + } + return '' +} + +Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) { + if (!Buffer.isBuffer(target)) { + throw new TypeError('Argument must be a Buffer') + } + + if (start === undefined) { + start = 0 + } + if (end === undefined) { + end = target ? target.length : 0 + } + if (thisStart === undefined) { + thisStart = 0 + } + if (thisEnd === undefined) { + thisEnd = this.length + } + + if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) { + throw new RangeError('out of range index') + } + + if (thisStart >= thisEnd && start >= end) { + return 0 + } + if (thisStart >= thisEnd) { + return -1 + } + if (start >= end) { + return 1 + } + + start >>>= 0 + end >>>= 0 + thisStart >>>= 0 + thisEnd >>>= 0 + + if (this === target) return 0 + + var x = thisEnd - thisStart + var y = end - start + var len = Math.min(x, y) + + var thisCopy = this.slice(thisStart, thisEnd) + var targetCopy = target.slice(start, end) + + for (var i = 0; i < len; ++i) { + if (thisCopy[i] !== targetCopy[i]) { + x = thisCopy[i] + y = targetCopy[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`, +// OR the last index of `val` in `buffer` at offset <= `byteOffset`. +// +// Arguments: +// - buffer - a Buffer to search +// - val - a string, Buffer, or number +// - byteOffset - an index into `buffer`; will be clamped to an int32 +// - encoding - an optional encoding, relevant is val is a string +// - dir - true for indexOf, false for lastIndexOf +function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) { + // Empty buffer means no match + if (buffer.length === 0) return -1 + + // Normalize byteOffset + if (typeof byteOffset === 'string') { + encoding = byteOffset + byteOffset = 0 + } else if (byteOffset > 0x7fffffff) { + byteOffset = 0x7fffffff + } else if (byteOffset < -0x80000000) { + byteOffset = -0x80000000 + } + byteOffset = +byteOffset // Coerce to Number. + if (isNaN(byteOffset)) { + // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer + byteOffset = dir ? 0 : (buffer.length - 1) + } + + // Normalize byteOffset: negative offsets start from the end of the buffer + if (byteOffset < 0) byteOffset = buffer.length + byteOffset + if (byteOffset >= buffer.length) { + if (dir) return -1 + else byteOffset = buffer.length - 1 + } else if (byteOffset < 0) { + if (dir) byteOffset = 0 + else return -1 + } + + // Normalize val + if (typeof val === 'string') { + val = Buffer.from(val, encoding) + } + + // Finally, search either indexOf (if dir is true) or lastIndexOf + if (Buffer.isBuffer(val)) { + // Special case: looking for empty string/buffer always fails + if (val.length === 0) { + return -1 + } + return arrayIndexOf(buffer, val, byteOffset, encoding, dir) + } else if (typeof val === 'number') { + val = val & 0xFF // Search for a byte value [0-255] + if (Buffer.TYPED_ARRAY_SUPPORT && + typeof Uint8Array.prototype.indexOf === 'function') { + if (dir) { + return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset) + } else { + return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset) + } + } + return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir) + } + + throw new TypeError('val must be string, number or Buffer') +} + +function arrayIndexOf (arr, val, byteOffset, encoding, dir) { + var indexSize = 1 + var arrLength = arr.length + var valLength = val.length + + if (encoding !== undefined) { + encoding = String(encoding).toLowerCase() + if (encoding === 'ucs2' || encoding === 'ucs-2' || + encoding === 'utf16le' || encoding === 'utf-16le') { + if (arr.length < 2 || val.length < 2) { + return -1 + } + indexSize = 2 + arrLength /= 2 + valLength /= 2 + byteOffset /= 2 + } + } + + function read (buf, i) { + if (indexSize === 1) { + return buf[i] + } else { + return buf.readUInt16BE(i * indexSize) + } + } + + var i + if (dir) { + var foundIndex = -1 + for (i = byteOffset; i < arrLength; i++) { + if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) { + if (foundIndex === -1) foundIndex = i + if (i - foundIndex + 1 === valLength) return foundIndex * indexSize + } else { + if (foundIndex !== -1) i -= i - foundIndex + foundIndex = -1 + } + } + } else { + if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength + for (i = byteOffset; i >= 0; i--) { + var found = true + for (var j = 0; j < valLength; j++) { + if (read(arr, i + j) !== read(val, j)) { + found = false + break + } + } + if (found) return i + } + } + + return -1 +} + +Buffer.prototype.includes = function includes (val, byteOffset, encoding) { + return this.indexOf(val, byteOffset, encoding) !== -1 +} + +Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, true) +} + +Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, false) +} + +function hexWrite (buf, string, offset, length) { + offset = Number(offset) || 0 + var remaining = buf.length - offset + if (!length) { + length = remaining + } else { + length = Number(length) + if (length > remaining) { + length = remaining + } + } + + // must be an even number of digits + var strLen = string.length + if (strLen % 2 !== 0) throw new TypeError('Invalid hex string') + + if (length > strLen / 2) { + length = strLen / 2 + } + for (var i = 0; i < length; ++i) { + var parsed = parseInt(string.substr(i * 2, 2), 16) + if (isNaN(parsed)) return i + buf[offset + i] = parsed + } + return i +} + +function utf8Write (buf, string, offset, length) { + return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length) +} + +function asciiWrite (buf, string, offset, length) { + return blitBuffer(asciiToBytes(string), buf, offset, length) +} + +function latin1Write (buf, string, offset, length) { + return asciiWrite(buf, string, offset, length) +} + +function base64Write (buf, string, offset, length) { + return blitBuffer(base64ToBytes(string), buf, offset, length) +} + +function ucs2Write (buf, string, offset, length) { + return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length) +} + +Buffer.prototype.write = function write (string, offset, length, encoding) { + // Buffer#write(string) + if (offset === undefined) { + encoding = 'utf8' + length = this.length + offset = 0 + // Buffer#write(string, encoding) + } else if (length === undefined && typeof offset === 'string') { + encoding = offset + length = this.length + offset = 0 + // Buffer#write(string, offset[, length][, encoding]) + } else if (isFinite(offset)) { + offset = offset | 0 + if (isFinite(length)) { + length = length | 0 + if (encoding === undefined) encoding = 'utf8' + } else { + encoding = length + length = undefined + } + // legacy write(string, encoding, offset, length) - remove in v0.13 + } else { + throw new Error( + 'Buffer.write(string, encoding, offset[, length]) is no longer supported' + ) + } + + var remaining = this.length - offset + if (length === undefined || length > remaining) length = remaining + + if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) { + throw new RangeError('Attempt to write outside buffer bounds') + } + + if (!encoding) encoding = 'utf8' + + var loweredCase = false + for (;;) { + switch (encoding) { + case 'hex': + return hexWrite(this, string, offset, length) + + case 'utf8': + case 'utf-8': + return utf8Write(this, string, offset, length) + + case 'ascii': + return asciiWrite(this, string, offset, length) + + case 'latin1': + case 'binary': + return latin1Write(this, string, offset, length) + + case 'base64': + // Warning: maxLength not taken into account in base64Write + return base64Write(this, string, offset, length) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return ucs2Write(this, string, offset, length) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} + +Buffer.prototype.toJSON = function toJSON () { + return { + type: 'Buffer', + data: Array.prototype.slice.call(this._arr || this, 0) + } +} + +function base64Slice (buf, start, end) { + if (start === 0 && end === buf.length) { + return base64.fromByteArray(buf) + } else { + return base64.fromByteArray(buf.slice(start, end)) + } +} + +function utf8Slice (buf, start, end) { + end = Math.min(buf.length, end) + var res = [] + + var i = start + while (i < end) { + var firstByte = buf[i] + var codePoint = null + var bytesPerSequence = (firstByte > 0xEF) ? 4 + : (firstByte > 0xDF) ? 3 + : (firstByte > 0xBF) ? 2 + : 1 + + if (i + bytesPerSequence <= end) { + var secondByte, thirdByte, fourthByte, tempCodePoint + + switch (bytesPerSequence) { + case 1: + if (firstByte < 0x80) { + codePoint = firstByte + } + break + case 2: + secondByte = buf[i + 1] + if ((secondByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F) + if (tempCodePoint > 0x7F) { + codePoint = tempCodePoint + } + } + break + case 3: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F) + if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) { + codePoint = tempCodePoint + } + } + break + case 4: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + fourthByte = buf[i + 3] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F) + if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) { + codePoint = tempCodePoint + } + } + } + } + + if (codePoint === null) { + // we did not generate a valid codePoint so insert a + // replacement char (U+FFFD) and advance only 1 byte + codePoint = 0xFFFD + bytesPerSequence = 1 + } else if (codePoint > 0xFFFF) { + // encode to utf16 (surrogate pair dance) + codePoint -= 0x10000 + res.push(codePoint >>> 10 & 0x3FF | 0xD800) + codePoint = 0xDC00 | codePoint & 0x3FF + } + + res.push(codePoint) + i += bytesPerSequence + } + + return decodeCodePointsArray(res) +} + +// Based on http://stackoverflow.com/a/22747272/680742, the browser with +// the lowest limit is Chrome, with 0x10000 args. +// We go 1 magnitude less, for safety +var MAX_ARGUMENTS_LENGTH = 0x1000 + +function decodeCodePointsArray (codePoints) { + var len = codePoints.length + if (len <= MAX_ARGUMENTS_LENGTH) { + return String.fromCharCode.apply(String, codePoints) // avoid extra slice() + } + + // Decode in chunks to avoid "call stack size exceeded". + var res = '' + var i = 0 + while (i < len) { + res += String.fromCharCode.apply( + String, + codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH) + ) + } + return res +} + +function asciiSlice (buf, start, end) { + var ret = '' + end = Math.min(buf.length, end) + + for (var i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i] & 0x7F) + } + return ret +} + +function latin1Slice (buf, start, end) { + var ret = '' + end = Math.min(buf.length, end) + + for (var i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i]) + } + return ret +} + +function hexSlice (buf, start, end) { + var len = buf.length + + if (!start || start < 0) start = 0 + if (!end || end < 0 || end > len) end = len + + var out = '' + for (var i = start; i < end; ++i) { + out += toHex(buf[i]) + } + return out +} + +function utf16leSlice (buf, start, end) { + var bytes = buf.slice(start, end) + var res = '' + for (var i = 0; i < bytes.length; i += 2) { + res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256) + } + return res +} + +Buffer.prototype.slice = function slice (start, end) { + var len = this.length + start = ~~start + end = end === undefined ? len : ~~end + + if (start < 0) { + start += len + if (start < 0) start = 0 + } else if (start > len) { + start = len + } + + if (end < 0) { + end += len + if (end < 0) end = 0 + } else if (end > len) { + end = len + } + + if (end < start) end = start + + var newBuf + if (Buffer.TYPED_ARRAY_SUPPORT) { + newBuf = this.subarray(start, end) + newBuf.__proto__ = Buffer.prototype + } else { + var sliceLen = end - start + newBuf = new Buffer(sliceLen, undefined) + for (var i = 0; i < sliceLen; ++i) { + newBuf[i] = this[i + start] + } + } + + return newBuf +} + +/* + * Need to make sure that buffer isn't trying to write out of bounds. + */ +function checkOffset (offset, ext, length) { + if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint') + if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length') +} + +Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) { + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var val = this[offset] + var mul = 1 + var i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + + return val +} + +Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) { + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) { + checkOffset(offset, byteLength, this.length) + } + + var val = this[offset + --byteLength] + var mul = 1 + while (byteLength > 0 && (mul *= 0x100)) { + val += this[offset + --byteLength] * mul + } + + return val +} + +Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) { + if (!noAssert) checkOffset(offset, 1, this.length) + return this[offset] +} + +Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 2, this.length) + return this[offset] | (this[offset + 1] << 8) +} + +Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 2, this.length) + return (this[offset] << 8) | this[offset + 1] +} + +Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + + return ((this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16)) + + (this[offset + 3] * 0x1000000) +} + +Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] * 0x1000000) + + ((this[offset + 1] << 16) | + (this[offset + 2] << 8) | + this[offset + 3]) +} + +Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) { + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var val = this[offset] + var mul = 1 + var i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) { + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var i = byteLength + var mul = 1 + var val = this[offset + --i] + while (i > 0 && (mul *= 0x100)) { + val += this[offset + --i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) { + if (!noAssert) checkOffset(offset, 1, this.length) + if (!(this[offset] & 0x80)) return (this[offset]) + return ((0xff - this[offset] + 1) * -1) +} + +Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 2, this.length) + var val = this[offset] | (this[offset + 1] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 2, this.length) + var val = this[offset + 1] | (this[offset] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16) | + (this[offset + 3] << 24) +} + +Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] << 24) | + (this[offset + 1] << 16) | + (this[offset + 2] << 8) | + (this[offset + 3]) +} + +Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, true, 23, 4) +} + +Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, false, 23, 4) +} + +Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, true, 52, 8) +} + +Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) { + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, false, 52, 8) +} + +function checkInt (buf, value, offset, ext, max, min) { + if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance') + if (value > max || value < min) throw new RangeError('"value" argument is out of bounds') + if (offset + ext > buf.length) throw new RangeError('Index out of range') +} + +Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) { + var maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + var mul = 1 + var i = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset | 0 + byteLength = byteLength | 0 + if (!noAssert) { + var maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + var i = byteLength - 1 + var mul = 1 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0) + if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value) + this[offset] = (value & 0xff) + return offset + 1 +} + +function objectWriteUInt16 (buf, value, offset, littleEndian) { + if (value < 0) value = 0xffff + value + 1 + for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; ++i) { + buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>> + (littleEndian ? i : 1 - i) * 8 + } +} + +Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + } else { + objectWriteUInt16(this, value, offset, true) + } + return offset + 2 +} + +Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + } else { + objectWriteUInt16(this, value, offset, false) + } + return offset + 2 +} + +function objectWriteUInt32 (buf, value, offset, littleEndian) { + if (value < 0) value = 0xffffffff + value + 1 + for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; ++i) { + buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff + } +} + +Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset + 3] = (value >>> 24) + this[offset + 2] = (value >>> 16) + this[offset + 1] = (value >>> 8) + this[offset] = (value & 0xff) + } else { + objectWriteUInt32(this, value, offset, true) + } + return offset + 4 +} + +Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + } else { + objectWriteUInt32(this, value, offset, false) + } + return offset + 4 +} + +Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) { + var limit = Math.pow(2, 8 * byteLength - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + var i = 0 + var mul = 1 + var sub = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) { + var limit = Math.pow(2, 8 * byteLength - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + var i = byteLength - 1 + var mul = 1 + var sub = 0 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80) + if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value) + if (value < 0) value = 0xff + value + 1 + this[offset] = (value & 0xff) + return offset + 1 +} + +Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + } else { + objectWriteUInt16(this, value, offset, true) + } + return offset + 2 +} + +Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + } else { + objectWriteUInt16(this, value, offset, false) + } + return offset + 2 +} + +Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + this[offset + 2] = (value >>> 16) + this[offset + 3] = (value >>> 24) + } else { + objectWriteUInt32(this, value, offset, true) + } + return offset + 4 +} + +Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) { + value = +value + offset = offset | 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + if (value < 0) value = 0xffffffff + value + 1 + if (Buffer.TYPED_ARRAY_SUPPORT) { + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + } else { + objectWriteUInt32(this, value, offset, false) + } + return offset + 4 +} + +function checkIEEE754 (buf, value, offset, ext, max, min) { + if (offset + ext > buf.length) throw new RangeError('Index out of range') + if (offset < 0) throw new RangeError('Index out of range') +} + +function writeFloat (buf, value, offset, littleEndian, noAssert) { + if (!noAssert) { + checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38) + } + ieee754.write(buf, value, offset, littleEndian, 23, 4) + return offset + 4 +} + +Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) { + return writeFloat(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) { + return writeFloat(this, value, offset, false, noAssert) +} + +function writeDouble (buf, value, offset, littleEndian, noAssert) { + if (!noAssert) { + checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308) + } + ieee754.write(buf, value, offset, littleEndian, 52, 8) + return offset + 8 +} + +Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) { + return writeDouble(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) { + return writeDouble(this, value, offset, false, noAssert) +} + +// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length) +Buffer.prototype.copy = function copy (target, targetStart, start, end) { + if (!start) start = 0 + if (!end && end !== 0) end = this.length + if (targetStart >= target.length) targetStart = target.length + if (!targetStart) targetStart = 0 + if (end > 0 && end < start) end = start + + // Copy 0 bytes; we're done + if (end === start) return 0 + if (target.length === 0 || this.length === 0) return 0 + + // Fatal error conditions + if (targetStart < 0) { + throw new RangeError('targetStart out of bounds') + } + if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds') + if (end < 0) throw new RangeError('sourceEnd out of bounds') + + // Are we oob? + if (end > this.length) end = this.length + if (target.length - targetStart < end - start) { + end = target.length - targetStart + start + } + + var len = end - start + var i + + if (this === target && start < targetStart && targetStart < end) { + // descending copy from end + for (i = len - 1; i >= 0; --i) { + target[i + targetStart] = this[i + start] + } + } else if (len < 1000 || !Buffer.TYPED_ARRAY_SUPPORT) { + // ascending copy from start + for (i = 0; i < len; ++i) { + target[i + targetStart] = this[i + start] + } + } else { + Uint8Array.prototype.set.call( + target, + this.subarray(start, start + len), + targetStart + ) + } + + return len +} + +// Usage: +// buffer.fill(number[, offset[, end]]) +// buffer.fill(buffer[, offset[, end]]) +// buffer.fill(string[, offset[, end]][, encoding]) +Buffer.prototype.fill = function fill (val, start, end, encoding) { + // Handle string cases: + if (typeof val === 'string') { + if (typeof start === 'string') { + encoding = start + start = 0 + end = this.length + } else if (typeof end === 'string') { + encoding = end + end = this.length + } + if (val.length === 1) { + var code = val.charCodeAt(0) + if (code < 256) { + val = code + } + } + if (encoding !== undefined && typeof encoding !== 'string') { + throw new TypeError('encoding must be a string') + } + if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) { + throw new TypeError('Unknown encoding: ' + encoding) + } + } else if (typeof val === 'number') { + val = val & 255 + } + + // Invalid ranges are not set to a default, so can range check early. + if (start < 0 || this.length < start || this.length < end) { + throw new RangeError('Out of range index') + } + + if (end <= start) { + return this + } + + start = start >>> 0 + end = end === undefined ? this.length : end >>> 0 + + if (!val) val = 0 + + var i + if (typeof val === 'number') { + for (i = start; i < end; ++i) { + this[i] = val + } + } else { + var bytes = Buffer.isBuffer(val) + ? val + : utf8ToBytes(new Buffer(val, encoding).toString()) + var len = bytes.length + for (i = 0; i < end - start; ++i) { + this[i + start] = bytes[i % len] + } + } + + return this +} + +// HELPER FUNCTIONS +// ================ + +var INVALID_BASE64_RE = /[^+\/0-9A-Za-z-_]/g + +function base64clean (str) { + // Node strips out invalid characters like \n and \t from the string, base64-js does not + str = stringtrim(str).replace(INVALID_BASE64_RE, '') + // Node converts strings with length < 2 to '' + if (str.length < 2) return '' + // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not + while (str.length % 4 !== 0) { + str = str + '=' + } + return str +} + +function stringtrim (str) { + if (str.trim) return str.trim() + return str.replace(/^\s+|\s+$/g, '') +} + +function toHex (n) { + if (n < 16) return '0' + n.toString(16) + return n.toString(16) +} + +function utf8ToBytes (string, units) { + units = units || Infinity + var codePoint + var length = string.length + var leadSurrogate = null + var bytes = [] + + for (var i = 0; i < length; ++i) { + codePoint = string.charCodeAt(i) + + // is surrogate component + if (codePoint > 0xD7FF && codePoint < 0xE000) { + // last char was a lead + if (!leadSurrogate) { + // no lead yet + if (codePoint > 0xDBFF) { + // unexpected trail + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } else if (i + 1 === length) { + // unpaired lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } + + // valid lead + leadSurrogate = codePoint + + continue + } + + // 2 leads in a row + if (codePoint < 0xDC00) { + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + leadSurrogate = codePoint + continue + } + + // valid surrogate pair + codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000 + } else if (leadSurrogate) { + // valid bmp char, but last char was a lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + } + + leadSurrogate = null + + // encode utf8 + if (codePoint < 0x80) { + if ((units -= 1) < 0) break + bytes.push(codePoint) + } else if (codePoint < 0x800) { + if ((units -= 2) < 0) break + bytes.push( + codePoint >> 0x6 | 0xC0, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x10000) { + if ((units -= 3) < 0) break + bytes.push( + codePoint >> 0xC | 0xE0, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x110000) { + if ((units -= 4) < 0) break + bytes.push( + codePoint >> 0x12 | 0xF0, + codePoint >> 0xC & 0x3F | 0x80, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else { + throw new Error('Invalid code point') + } + } + + return bytes +} + +function asciiToBytes (str) { + var byteArray = [] + for (var i = 0; i < str.length; ++i) { + // Node's code seems to be doing this and not & 0x7F.. + byteArray.push(str.charCodeAt(i) & 0xFF) + } + return byteArray +} + +function utf16leToBytes (str, units) { + var c, hi, lo + var byteArray = [] + for (var i = 0; i < str.length; ++i) { + if ((units -= 2) < 0) break + + c = str.charCodeAt(i) + hi = c >> 8 + lo = c % 256 + byteArray.push(lo) + byteArray.push(hi) + } + + return byteArray +} + +function base64ToBytes (str) { + return base64.toByteArray(base64clean(str)) +} + +function blitBuffer (src, dst, offset, length) { + for (var i = 0; i < length; ++i) { + if ((i + offset >= dst.length) || (i >= src.length)) break + dst[i + offset] = src[i] + } + return i +} + +function isnan (val) { + return val !== val // eslint-disable-line no-self-compare +} + +}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{"base64-js":5,"ieee754":8,"isarray":10}],7:[function(require,module,exports){ +/*global window:false, self:false, define:false, module:false */ + +/** + * @license IDBWrapper - A cross-browser wrapper for IndexedDB + * Version 1.7.1 + * Copyright (c) 2011 - 2016 Jens Arps + * http://jensarps.de/ + * + * Licensed under the MIT (X11) license + */ + +(function (name, definition, global) { + + 'use strict'; + + if (typeof define === 'function') { + define(definition); + } else if (typeof module !== 'undefined' && module.exports) { + module.exports = definition(); + } else { + global[name] = definition(); + } +})('IDBStore', function () { + + 'use strict'; + + var defaultErrorHandler = function (error) { + throw error; + }; + var defaultSuccessHandler = function () { + }; + + var defaults = { + storeName: 'Store', + storePrefix: 'IDBWrapper-', + dbVersion: 1, + keyPath: 'id', + autoIncrement: true, + onStoreReady: function () { + }, + onError: defaultErrorHandler, + indexes: [], + implementationPreference: [ + 'indexedDB', + 'webkitIndexedDB', + 'mozIndexedDB', + 'shimIndexedDB' + ] + }; + + /** + * + * The IDBStore constructor + * + * @constructor + * @name IDBStore + * @version 1.7.1 + * + * @param {Object} [kwArgs] An options object used to configure the store and + * set callbacks + * @param {String} [kwArgs.storeName='Store'] The name of the store + * @param {String} [kwArgs.storePrefix='IDBWrapper-'] A prefix that is + * internally used to construct the name of the database, which will be + * kwArgs.storePrefix + kwArgs.storeName + * @param {Number} [kwArgs.dbVersion=1] The version of the store + * @param {String} [kwArgs.keyPath='id'] The key path to use. If you want to + * setup IDBWrapper to work with out-of-line keys, you need to set this to + * `null` + * @param {Boolean} [kwArgs.autoIncrement=true] If set to true, IDBStore will + * automatically make sure a unique keyPath value is present on each object + * that is stored. + * @param {Function} [kwArgs.onStoreReady] A callback to be called when the + * store is ready to be used. + * @param {Function} [kwArgs.onError=throw] A callback to be called when an + * error occurred during instantiation of the store. + * @param {Array} [kwArgs.indexes=[]] An array of indexData objects + * defining the indexes to use with the store. For every index to be used + * one indexData object needs to be passed in the array. + * An indexData object is defined as follows: + * @param {Object} [kwArgs.indexes.indexData] An object defining the index to + * use + * @param {String} kwArgs.indexes.indexData.name The name of the index + * @param {String} [kwArgs.indexes.indexData.keyPath] The key path of the index + * @param {Boolean} [kwArgs.indexes.indexData.unique] Whether the index is unique + * @param {Boolean} [kwArgs.indexes.indexData.multiEntry] Whether the index is multi entry + * @param {Array} [kwArgs.implementationPreference=['indexedDB','webkitIndexedDB','mozIndexedDB','shimIndexedDB']] An array of strings naming implementations to be used, in order or preference + * @param {Function} [onStoreReady] A callback to be called when the store + * is ready to be used. + * @example + // create a store for customers with an additional index over the + // `lastname` property. + var myCustomerStore = new IDBStore({ + dbVersion: 1, + storeName: 'customer-index', + keyPath: 'customerid', + autoIncrement: true, + onStoreReady: populateTable, + indexes: [ + { name: 'lastname', keyPath: 'lastname', unique: false, multiEntry: false } + ] + }); + * @example + // create a generic store + var myCustomerStore = new IDBStore({ + storeName: 'my-data-store', + onStoreReady: function(){ + // start working with the store. + } + }); + */ + var IDBStore = function (kwArgs, onStoreReady) { + + if (typeof onStoreReady == 'undefined' && typeof kwArgs == 'function') { + onStoreReady = kwArgs; + } + if (Object.prototype.toString.call(kwArgs) != '[object Object]') { + kwArgs = {}; + } + + for (var key in defaults) { + this[key] = typeof kwArgs[key] != 'undefined' ? kwArgs[key] : defaults[key]; + } + + this.dbName = this.storePrefix + this.storeName; + this.dbVersion = parseInt(this.dbVersion, 10) || 1; + + onStoreReady && (this.onStoreReady = onStoreReady); + + var env = typeof window == 'object' ? window : self; + var availableImplementations = this.implementationPreference.filter(function (implName) { + return implName in env; + }); + this.implementation = availableImplementations[0]; + this.idb = env[this.implementation]; + this.keyRange = env.IDBKeyRange || env.webkitIDBKeyRange || env.mozIDBKeyRange; + + this.consts = { + 'READ_ONLY': 'readonly', + 'READ_WRITE': 'readwrite', + 'VERSION_CHANGE': 'versionchange', + 'NEXT': 'next', + 'NEXT_NO_DUPLICATE': 'nextunique', + 'PREV': 'prev', + 'PREV_NO_DUPLICATE': 'prevunique' + }; + + this.openDB(); + }; + + /** @lends IDBStore.prototype */ + var proto = { + + /** + * A pointer to the IDBStore ctor + * + * @private + * @type {Function} + * @constructs + */ + constructor: IDBStore, + + /** + * The version of IDBStore + * + * @type {String} + */ + version: '1.7.1', + + /** + * A reference to the IndexedDB object + * + * @type {IDBDatabase} + */ + db: null, + + /** + * The full name of the IndexedDB used by IDBStore, composed of + * this.storePrefix + this.storeName + * + * @type {String} + */ + dbName: null, + + /** + * The version of the IndexedDB used by IDBStore + * + * @type {Number} + */ + dbVersion: null, + + /** + * A reference to the objectStore used by IDBStore + * + * @type {IDBObjectStore} + */ + store: null, + + /** + * The store name + * + * @type {String} + */ + storeName: null, + + /** + * The prefix to prepend to the store name + * + * @type {String} + */ + storePrefix: null, + + /** + * The key path + * + * @type {String} + */ + keyPath: null, + + /** + * Whether IDBStore uses autoIncrement + * + * @type {Boolean} + */ + autoIncrement: null, + + /** + * The indexes used by IDBStore + * + * @type {Array} + */ + indexes: null, + + /** + * The implemantations to try to use, in order of preference + * + * @type {Array} + */ + implementationPreference: null, + + /** + * The actual implementation being used + * + * @type {String} + */ + implementation: '', + + /** + * The callback to be called when the store is ready to be used + * + * @type {Function} + */ + onStoreReady: null, + + /** + * The callback to be called if an error occurred during instantiation + * of the store + * + * @type {Function} + */ + onError: null, + + /** + * The internal insertID counter + * + * @type {Number} + * @private + */ + _insertIdCount: 0, + + /** + * Opens an IndexedDB; called by the constructor. + * + * Will check if versions match and compare provided index configuration + * with existing ones, and update indexes if necessary. + * + * Will call this.onStoreReady() if everything went well and the store + * is ready to use, and this.onError() is something went wrong. + * + * @private + * + */ + openDB: function () { + + var openRequest = this.idb.open(this.dbName, this.dbVersion); + var preventSuccessCallback = false; + + openRequest.onerror = function (errorEvent) { + + if (hasVersionError(errorEvent)) { + this.onError(new Error('The version number provided is lower than the existing one.')); + } else { + var error; + + if (errorEvent.target.error) { + error = errorEvent.target.error; + } else { + var errorMessage = 'IndexedDB unknown error occurred when opening DB ' + this.dbName + ' version ' + this.dbVersion; + if ('errorCode' in errorEvent.target) { + errorMessage += ' with error code ' + errorEvent.target.errorCode; + } + error = new Error(errorMessage); + } + + this.onError(error); + } + }.bind(this); + + openRequest.onsuccess = function (event) { + + if (preventSuccessCallback) { + return; + } + + if (this.db) { + this.onStoreReady(); + return; + } + + this.db = event.target.result; + + if (typeof this.db.version == 'string') { + this.onError(new Error('The IndexedDB implementation in this browser is outdated. Please upgrade your browser.')); + return; + } + + if (!this.db.objectStoreNames.contains(this.storeName)) { + // We should never ever get here. + // Lets notify the user anyway. + this.onError(new Error('Object store couldn\'t be created.')); + return; + } + + var emptyTransaction = this.db.transaction([this.storeName], this.consts.READ_ONLY); + this.store = emptyTransaction.objectStore(this.storeName); + + // check indexes + var existingIndexes = Array.prototype.slice.call(this.getIndexList()); + this.indexes.forEach(function (indexData) { + var indexName = indexData.name; + + if (!indexName) { + preventSuccessCallback = true; + this.onError(new Error('Cannot create index: No index name given.')); + return; + } + + this.normalizeIndexData(indexData); + + if (this.hasIndex(indexName)) { + // check if it complies + var actualIndex = this.store.index(indexName); + var complies = this.indexComplies(actualIndex, indexData); + if (!complies) { + preventSuccessCallback = true; + this.onError(new Error('Cannot modify index "' + indexName + '" for current version. Please bump version number to ' + ( this.dbVersion + 1 ) + '.')); + } + + existingIndexes.splice(existingIndexes.indexOf(indexName), 1); + } else { + preventSuccessCallback = true; + this.onError(new Error('Cannot create new index "' + indexName + '" for current version. Please bump version number to ' + ( this.dbVersion + 1 ) + '.')); + } + + }, this); + + if (existingIndexes.length) { + preventSuccessCallback = true; + this.onError(new Error('Cannot delete index(es) "' + existingIndexes.toString() + '" for current version. Please bump version number to ' + ( this.dbVersion + 1 ) + '.')); + } + + preventSuccessCallback || this.onStoreReady(); + }.bind(this); + + openRequest.onupgradeneeded = function (/* IDBVersionChangeEvent */ event) { + + this.db = event.target.result; + + if (this.db.objectStoreNames.contains(this.storeName)) { + this.store = event.target.transaction.objectStore(this.storeName); + } else { + var optionalParameters = {autoIncrement: this.autoIncrement}; + if (this.keyPath !== null) { + optionalParameters.keyPath = this.keyPath; + } + this.store = this.db.createObjectStore(this.storeName, optionalParameters); + } + + var existingIndexes = Array.prototype.slice.call(this.getIndexList()); + this.indexes.forEach(function (indexData) { + var indexName = indexData.name; + + if (!indexName) { + preventSuccessCallback = true; + this.onError(new Error('Cannot create index: No index name given.')); + } + + this.normalizeIndexData(indexData); + + if (this.hasIndex(indexName)) { + // check if it complies + var actualIndex = this.store.index(indexName); + var complies = this.indexComplies(actualIndex, indexData); + if (!complies) { + // index differs, need to delete and re-create + this.store.deleteIndex(indexName); + this.store.createIndex(indexName, indexData.keyPath, { + unique: indexData.unique, + multiEntry: indexData.multiEntry + }); + } + + existingIndexes.splice(existingIndexes.indexOf(indexName), 1); + } else { + this.store.createIndex(indexName, indexData.keyPath, { + unique: indexData.unique, + multiEntry: indexData.multiEntry + }); + } + + }, this); + + if (existingIndexes.length) { + existingIndexes.forEach(function (_indexName) { + this.store.deleteIndex(_indexName); + }, this); + } + + }.bind(this); + }, + + /** + * Deletes the database used for this store if the IDB implementations + * provides that functionality. + * + * @param {Function} [onSuccess] A callback that is called if deletion + * was successful. + * @param {Function} [onError] A callback that is called if deletion + * failed. + */ + deleteDatabase: function (onSuccess, onError) { + if (this.idb.deleteDatabase) { + this.db.close(); + var deleteRequest = this.idb.deleteDatabase(this.dbName); + deleteRequest.onsuccess = onSuccess; + deleteRequest.onerror = onError; + } else { + onError(new Error('Browser does not support IndexedDB deleteDatabase!')); + } + }, + + /********************* + * data manipulation * + *********************/ + + /** + * Puts an object into the store. If an entry with the given id exists, + * it will be overwritten. This method has a different signature for inline + * keys and out-of-line keys; please see the examples below. + * + * @param {*} [key] The key to store. This is only needed if IDBWrapper + * is set to use out-of-line keys. For inline keys - the default scenario - + * this can be omitted. + * @param {Object} value The data object to store. + * @param {Function} [onSuccess] A callback that is called if insertion + * was successful. + * @param {Function} [onError] A callback that is called if insertion + * failed. + * @returns {IDBTransaction} The transaction used for this operation. + * @example + // Storing an object, using inline keys (the default scenario): + var myCustomer = { + customerid: 2346223, + lastname: 'Doe', + firstname: 'John' + }; + myCustomerStore.put(myCustomer, mySuccessHandler, myErrorHandler); + // Note that passing success- and error-handlers is optional. + * @example + // Storing an object, using out-of-line keys: + var myCustomer = { + lastname: 'Doe', + firstname: 'John' + }; + myCustomerStore.put(2346223, myCustomer, mySuccessHandler, myErrorHandler); + // Note that passing success- and error-handlers is optional. + */ + put: function (key, value, onSuccess, onError) { + if (this.keyPath !== null) { + onError = onSuccess; + onSuccess = value; + value = key; + } + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + + var hasSuccess = false, + result = null, + putRequest; + + var putTransaction = this.db.transaction([this.storeName], this.consts.READ_WRITE); + putTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + putTransaction.onabort = onError; + putTransaction.onerror = onError; + + if (this.keyPath !== null) { // in-line keys + this._addIdPropertyIfNeeded(value); + putRequest = putTransaction.objectStore(this.storeName).put(value); + } else { // out-of-line keys + putRequest = putTransaction.objectStore(this.storeName).put(value, key); + } + putRequest.onsuccess = function (event) { + hasSuccess = true; + result = event.target.result; + }; + putRequest.onerror = onError; + + return putTransaction; + }, + + /** + * Retrieves an object from the store. If no entry exists with the given id, + * the success handler will be called with null as first and only argument. + * + * @param {*} key The id of the object to fetch. + * @param {Function} [onSuccess] A callback that is called if fetching + * was successful. Will receive the object as only argument. + * @param {Function} [onError] A callback that will be called if an error + * occurred during the operation. + * @returns {IDBTransaction} The transaction used for this operation. + */ + get: function (key, onSuccess, onError) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + + var hasSuccess = false, + result = null; + + var getTransaction = this.db.transaction([this.storeName], this.consts.READ_ONLY); + getTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + getTransaction.onabort = onError; + getTransaction.onerror = onError; + var getRequest = getTransaction.objectStore(this.storeName).get(key); + getRequest.onsuccess = function (event) { + hasSuccess = true; + result = event.target.result; + }; + getRequest.onerror = onError; + + return getTransaction; + }, + + /** + * Removes an object from the store. + * + * @param {*} key The id of the object to remove. + * @param {Function} [onSuccess] A callback that is called if the removal + * was successful. + * @param {Function} [onError] A callback that will be called if an error + * occurred during the operation. + * @returns {IDBTransaction} The transaction used for this operation. + */ + remove: function (key, onSuccess, onError) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + + var hasSuccess = false, + result = null; + + var removeTransaction = this.db.transaction([this.storeName], this.consts.READ_WRITE); + removeTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + removeTransaction.onabort = onError; + removeTransaction.onerror = onError; + + var deleteRequest = removeTransaction.objectStore(this.storeName)['delete'](key); + deleteRequest.onsuccess = function (event) { + hasSuccess = true; + result = event.target.result; + }; + deleteRequest.onerror = onError; + + return removeTransaction; + }, + + /** + * Runs a batch of put and/or remove operations on the store. + * + * @param {Array} dataArray An array of objects containing the operation to run + * and the data object (for put operations). + * @param {Function} [onSuccess] A callback that is called if all operations + * were successful. + * @param {Function} [onError] A callback that is called if an error + * occurred during one of the operations. + * @returns {IDBTransaction} The transaction used for this operation. + */ + batch: function (dataArray, onSuccess, onError) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + + if (Object.prototype.toString.call(dataArray) != '[object Array]') { + onError(new Error('dataArray argument must be of type Array.')); + } else if (dataArray.length === 0) { + return onSuccess(true); + } + + var count = dataArray.length; + var called = false; + var hasSuccess = false; + + var batchTransaction = this.db.transaction([this.storeName], this.consts.READ_WRITE); + batchTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(hasSuccess); + }; + batchTransaction.onabort = onError; + batchTransaction.onerror = onError; + + + var onItemSuccess = function () { + count--; + if (count === 0 && !called) { + called = true; + hasSuccess = true; + } + }; + + dataArray.forEach(function (operation) { + var type = operation.type; + var key = operation.key; + var value = operation.value; + + var onItemError = function (err) { + batchTransaction.abort(); + if (!called) { + called = true; + onError(err, type, key); + } + }; + + if (type == 'remove') { + var deleteRequest = batchTransaction.objectStore(this.storeName)['delete'](key); + deleteRequest.onsuccess = onItemSuccess; + deleteRequest.onerror = onItemError; + } else if (type == 'put') { + var putRequest; + if (this.keyPath !== null) { // in-line keys + this._addIdPropertyIfNeeded(value); + putRequest = batchTransaction.objectStore(this.storeName).put(value); + } else { // out-of-line keys + putRequest = batchTransaction.objectStore(this.storeName).put(value, key); + } + putRequest.onsuccess = onItemSuccess; + putRequest.onerror = onItemError; + } + }, this); + + return batchTransaction; + }, + + /** + * Takes an array of objects and stores them in a single transaction. + * + * @param {Array} dataArray An array of objects to store + * @param {Function} [onSuccess] A callback that is called if all operations + * were successful. + * @param {Function} [onError] A callback that is called if an error + * occurred during one of the operations. + * @returns {IDBTransaction} The transaction used for this operation. + */ + putBatch: function (dataArray, onSuccess, onError) { + var batchData = dataArray.map(function (item) { + return {type: 'put', value: item}; + }); + + return this.batch(batchData, onSuccess, onError); + }, + + /** + * Like putBatch, takes an array of objects and stores them in a single + * transaction, but allows processing of the result values. Returns the + * processed records containing the key for newly created records to the + * onSuccess calllback instead of only returning true or false for success. + * In addition, added the option for the caller to specify a key field that + * should be set to the newly created key. + * + * @param {Array} dataArray An array of objects to store + * @param {Object} [options] An object containing optional options + * @param {String} [options.keyField=this.keyPath] Specifies a field in the record to update + * with the auto-incrementing key. Defaults to the store's keyPath. + * @param {Function} [onSuccess] A callback that is called if all operations + * were successful. + * @param {Function} [onError] A callback that is called if an error + * occurred during one of the operations. + * @returns {IDBTransaction} The transaction used for this operation. + * + */ + upsertBatch: function (dataArray, options, onSuccess, onError) { + // handle `dataArray, onSuccess, onError` signature + if (typeof options == 'function') { + onSuccess = options; + onError = onSuccess; + options = {}; + } + + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + options || (options = {}); + + if (Object.prototype.toString.call(dataArray) != '[object Array]') { + onError(new Error('dataArray argument must be of type Array.')); + } + + var keyField = options.keyField || this.keyPath; + var count = dataArray.length; + var called = false; + var hasSuccess = false; + var index = 0; // assume success callbacks are executed in order + + var batchTransaction = this.db.transaction([this.storeName], this.consts.READ_WRITE); + batchTransaction.oncomplete = function () { + if (hasSuccess) { + onSuccess(dataArray); + } else { + onError(false); + } + }; + batchTransaction.onabort = onError; + batchTransaction.onerror = onError; + + var onItemSuccess = function (event) { + var record = dataArray[index++]; + record[keyField] = event.target.result; + + count--; + if (count === 0 && !called) { + called = true; + hasSuccess = true; + } + }; + + dataArray.forEach(function (record) { + var key = record.key; + + var onItemError = function (err) { + batchTransaction.abort(); + if (!called) { + called = true; + onError(err); + } + }; + + var putRequest; + if (this.keyPath !== null) { // in-line keys + this._addIdPropertyIfNeeded(record); + putRequest = batchTransaction.objectStore(this.storeName).put(record); + } else { // out-of-line keys + putRequest = batchTransaction.objectStore(this.storeName).put(record, key); + } + putRequest.onsuccess = onItemSuccess; + putRequest.onerror = onItemError; + }, this); + + return batchTransaction; + }, + + /** + * Takes an array of keys and removes matching objects in a single + * transaction. + * + * @param {Array} keyArray An array of keys to remove + * @param {Function} [onSuccess] A callback that is called if all operations + * were successful. + * @param {Function} [onError] A callback that is called if an error + * occurred during one of the operations. + * @returns {IDBTransaction} The transaction used for this operation. + */ + removeBatch: function (keyArray, onSuccess, onError) { + var batchData = keyArray.map(function (key) { + return {type: 'remove', key: key}; + }); + + return this.batch(batchData, onSuccess, onError); + }, + + /** + * Takes an array of keys and fetches matching objects + * + * @param {Array} keyArray An array of keys identifying the objects to fetch + * @param {Function} [onSuccess] A callback that is called if all operations + * were successful. + * @param {Function} [onError] A callback that is called if an error + * occurred during one of the operations. + * @param {String} [arrayType='sparse'] The type of array to pass to the + * success handler. May be one of 'sparse', 'dense' or 'skip'. Defaults to + * 'sparse'. This parameter specifies how to handle the situation if a get + * operation did not throw an error, but there was no matching object in + * the database. In most cases, 'sparse' provides the most desired + * behavior. See the examples for details. + * @returns {IDBTransaction} The transaction used for this operation. + * @example + // given that there are two objects in the database with the keypath + // values 1 and 2, and the call looks like this: + myStore.getBatch([1, 5, 2], onError, function (data) { … }, arrayType); + + // this is what the `data` array will be like: + + // arrayType == 'sparse': + // data is a sparse array containing two entries and having a length of 3: + [Object, 2: Object] + 0: Object + 2: Object + length: 3 + // calling forEach on data will result in the callback being called two + // times, with the index parameter matching the index of the key in the + // keyArray. + + // arrayType == 'dense': + // data is a dense array containing three entries and having a length of 3, + // where data[1] is of type undefined: + [Object, undefined, Object] + 0: Object + 1: undefined + 2: Object + length: 3 + // calling forEach on data will result in the callback being called three + // times, with the index parameter matching the index of the key in the + // keyArray, but the second call will have undefined as first argument. + + // arrayType == 'skip': + // data is a dense array containing two entries and having a length of 2: + [Object, Object] + 0: Object + 1: Object + length: 2 + // calling forEach on data will result in the callback being called two + // times, with the index parameter not matching the index of the key in the + // keyArray. + */ + getBatch: function (keyArray, onSuccess, onError, arrayType) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + arrayType || (arrayType = 'sparse'); + + if (Object.prototype.toString.call(keyArray) != '[object Array]') { + onError(new Error('keyArray argument must be of type Array.')); + } else if (keyArray.length === 0) { + return onSuccess([]); + } + + var data = []; + var count = keyArray.length; + var called = false; + var hasSuccess = false; + var result = null; + + var batchTransaction = this.db.transaction([this.storeName], this.consts.READ_ONLY); + batchTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + batchTransaction.onabort = onError; + batchTransaction.onerror = onError; + + var onItemSuccess = function (event) { + if (event.target.result || arrayType == 'dense') { + data.push(event.target.result); + } else if (arrayType == 'sparse') { + data.length++; + } + count--; + if (count === 0) { + called = true; + hasSuccess = true; + result = data; + } + }; + + keyArray.forEach(function (key) { + + var onItemError = function (err) { + called = true; + result = err; + onError(err); + batchTransaction.abort(); + }; + + var getRequest = batchTransaction.objectStore(this.storeName).get(key); + getRequest.onsuccess = onItemSuccess; + getRequest.onerror = onItemError; + + }, this); + + return batchTransaction; + }, + + /** + * Fetches all entries in the store. + * + * @param {Function} [onSuccess] A callback that is called if the operation + * was successful. Will receive an array of objects. + * @param {Function} [onError] A callback that will be called if an error + * occurred during the operation. + * @returns {IDBTransaction} The transaction used for this operation. + */ + getAll: function (onSuccess, onError) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + var getAllTransaction = this.db.transaction([this.storeName], this.consts.READ_ONLY); + var store = getAllTransaction.objectStore(this.storeName); + if (store.getAll) { + this._getAllNative(getAllTransaction, store, onSuccess, onError); + } else { + this._getAllCursor(getAllTransaction, store, onSuccess, onError); + } + + return getAllTransaction; + }, + + /** + * Implements getAll for IDB implementations that have a non-standard + * getAll() method. + * + * @param {IDBTransaction} getAllTransaction An open READ transaction. + * @param {IDBObjectStore} store A reference to the store. + * @param {Function} onSuccess A callback that will be called if the + * operation was successful. + * @param {Function} onError A callback that will be called if an + * error occurred during the operation. + * @private + */ + _getAllNative: function (getAllTransaction, store, onSuccess, onError) { + var hasSuccess = false, + result = null; + + getAllTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + getAllTransaction.onabort = onError; + getAllTransaction.onerror = onError; + + var getAllRequest = store.getAll(); + getAllRequest.onsuccess = function (event) { + hasSuccess = true; + result = event.target.result; + }; + getAllRequest.onerror = onError; + }, + + /** + * Implements getAll for IDB implementations that do not have a getAll() + * method. + * + * @param {IDBTransaction} getAllTransaction An open READ transaction. + * @param {IDBObjectStore} store A reference to the store. + * @param {Function} onSuccess A callback that will be called if the + * operation was successful. + * @param {Function} onError A callback that will be called if an + * error occurred during the operation. + * @private + */ + _getAllCursor: function (getAllTransaction, store, onSuccess, onError) { + var all = [], + hasSuccess = false, + result = null; + + getAllTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + getAllTransaction.onabort = onError; + getAllTransaction.onerror = onError; + + var cursorRequest = store.openCursor(); + cursorRequest.onsuccess = function (event) { + var cursor = event.target.result; + if (cursor) { + all.push(cursor.value); + cursor['continue'](); + } + else { + hasSuccess = true; + result = all; + } + }; + cursorRequest.onError = onError; + }, + + /** + * Clears the store, i.e. deletes all entries in the store. + * + * @param {Function} [onSuccess] A callback that will be called if the + * operation was successful. + * @param {Function} [onError] A callback that will be called if an + * error occurred during the operation. + * @returns {IDBTransaction} The transaction used for this operation. + */ + clear: function (onSuccess, onError) { + onError || (onError = defaultErrorHandler); + onSuccess || (onSuccess = defaultSuccessHandler); + + var hasSuccess = false, + result = null; + + var clearTransaction = this.db.transaction([this.storeName], this.consts.READ_WRITE); + clearTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + clearTransaction.onabort = onError; + clearTransaction.onerror = onError; + + var clearRequest = clearTransaction.objectStore(this.storeName).clear(); + clearRequest.onsuccess = function (event) { + hasSuccess = true; + result = event.target.result; + }; + clearRequest.onerror = onError; + + return clearTransaction; + }, + + /** + * Checks if an id property needs to present on a object and adds one if + * necessary. + * + * @param {Object} dataObj The data object that is about to be stored + * @private + */ + _addIdPropertyIfNeeded: function (dataObj) { + if (typeof dataObj[this.keyPath] == 'undefined') { + dataObj[this.keyPath] = this._insertIdCount++ + Date.now(); + } + }, + + /************ + * indexing * + ************/ + + /** + * Returns a DOMStringList of index names of the store. + * + * @return {DOMStringList} The list of index names + */ + getIndexList: function () { + return this.store.indexNames; + }, + + /** + * Checks if an index with the given name exists in the store. + * + * @param {String} indexName The name of the index to look for + * @return {Boolean} Whether the store contains an index with the given name + */ + hasIndex: function (indexName) { + return this.store.indexNames.contains(indexName); + }, + + /** + * Normalizes an object containing index data and assures that all + * properties are set. + * + * @param {Object} indexData The index data object to normalize + * @param {String} indexData.name The name of the index + * @param {String} [indexData.keyPath] The key path of the index + * @param {Boolean} [indexData.unique] Whether the index is unique + * @param {Boolean} [indexData.multiEntry] Whether the index is multi entry + */ + normalizeIndexData: function (indexData) { + indexData.keyPath = indexData.keyPath || indexData.name; + indexData.unique = !!indexData.unique; + indexData.multiEntry = !!indexData.multiEntry; + }, + + /** + * Checks if an actual index complies with an expected index. + * + * @param {IDBIndex} actual The actual index found in the store + * @param {Object} expected An Object describing an expected index + * @return {Boolean} Whether both index definitions are identical + */ + indexComplies: function (actual, expected) { + var complies = ['keyPath', 'unique', 'multiEntry'].every(function (key) { + // IE10 returns undefined for no multiEntry + if (key == 'multiEntry' && actual[key] === undefined && expected[key] === false) { + return true; + } + // Compound keys + if (key == 'keyPath' && Object.prototype.toString.call(expected[key]) == '[object Array]') { + var exp = expected.keyPath; + var act = actual.keyPath; + + // IE10 can't handle keyPath sequences and stores them as a string. + // The index will be unusable there, but let's still return true if + // the keyPath sequence matches. + if (typeof act == 'string') { + return exp.toString() == act; + } + + // Chrome/Opera stores keyPath squences as DOMStringList, Firefox + // as Array + if (!(typeof act.contains == 'function' || typeof act.indexOf == 'function')) { + return false; + } + + if (act.length !== exp.length) { + return false; + } + + for (var i = 0, m = exp.length; i < m; i++) { + if (!( (act.contains && act.contains(exp[i])) || act.indexOf(exp[i] !== -1) )) { + return false; + } + } + return true; + } + return expected[key] == actual[key]; + }); + return complies; + }, + + /********** + * cursor * + **********/ + + /** + * Iterates over the store using the given options and calling onItem + * for each entry matching the options. + * + * @param {Function} onItem A callback to be called for each match + * @param {Object} [options] An object defining specific options + * @param {String} [options.index=null] A name of an IDBIndex to operate on + * @param {String} [options.order=ASC] The order in which to provide the + * results, can be 'DESC' or 'ASC' + * @param {Boolean} [options.autoContinue=true] Whether to automatically + * iterate the cursor to the next result + * @param {Boolean} [options.filterDuplicates=false] Whether to exclude + * duplicate matches + * @param {IDBKeyRange} [options.keyRange=null] An IDBKeyRange to use + * @param {Boolean} [options.writeAccess=false] Whether grant write access + * to the store in the onItem callback + * @param {Function} [options.onEnd=null] A callback to be called after + * iteration has ended + * @param {Function} [options.onError=throw] A callback to be called + * if an error occurred during the operation. + * @param {Number} [options.limit=Infinity] Limit the number of returned + * results to this number + * @param {Number} [options.offset=0] Skip the provided number of results + * in the resultset + * @param {Boolean} [options.allowItemRejection=false] Allows the onItem + * function to return a Boolean to accept or reject the current item + * @returns {IDBTransaction} The transaction used for this operation. + */ + iterate: function (onItem, options) { + options = mixin({ + index: null, + order: 'ASC', + autoContinue: true, + filterDuplicates: false, + keyRange: null, + writeAccess: false, + onEnd: null, + onError: defaultErrorHandler, + limit: Infinity, + offset: 0, + allowItemRejection: false + }, options || {}); + + var directionType = options.order.toLowerCase() == 'desc' ? 'PREV' : 'NEXT'; + if (options.filterDuplicates) { + directionType += '_NO_DUPLICATE'; + } + + var hasSuccess = false; + var cursorTransaction = this.db.transaction([this.storeName], this.consts[options.writeAccess ? 'READ_WRITE' : 'READ_ONLY']); + var cursorTarget = cursorTransaction.objectStore(this.storeName); + if (options.index) { + cursorTarget = cursorTarget.index(options.index); + } + var recordCount = 0; + + cursorTransaction.oncomplete = function () { + if (!hasSuccess) { + options.onError(null); + return; + } + if (options.onEnd) { + options.onEnd(); + } else { + onItem(null); + } + }; + cursorTransaction.onabort = options.onError; + cursorTransaction.onerror = options.onError; + + var cursorRequest = cursorTarget.openCursor(options.keyRange, this.consts[directionType]); + cursorRequest.onerror = options.onError; + cursorRequest.onsuccess = function (event) { + var cursor = event.target.result; + if (cursor) { + if (options.offset) { + cursor.advance(options.offset); + options.offset = 0; + } else { + var onItemReturn = onItem(cursor.value, cursor, cursorTransaction); + if (!options.allowItemRejection || onItemReturn !== false) { + recordCount++; + } + if (options.autoContinue) { + if (recordCount + options.offset < options.limit) { + cursor['continue'](); + } else { + hasSuccess = true; + } + } + } + } else { + hasSuccess = true; + } + }; + + return cursorTransaction; + }, + + /** + * Runs a query against the store and passes an array containing matched + * objects to the success handler. + * + * @param {Function} onSuccess A callback to be called when the operation + * was successful. + * @param {Object} [options] An object defining specific options + * @param {String} [options.index=null] A name of an IDBIndex to operate on + * @param {String} [options.order=ASC] The order in which to provide the + * results, can be 'DESC' or 'ASC' + * @param {Boolean} [options.filterDuplicates=false] Whether to exclude + * duplicate matches + * @param {IDBKeyRange} [options.keyRange=null] An IDBKeyRange to use + * @param {Function} [options.onError=throw] A callback to be called + * if an error occurred during the operation. + * @param {Number} [options.limit=Infinity] Limit the number of returned + * results to this number + * @param {Number} [options.offset=0] Skip the provided number of results + * in the resultset + * @param {Function} [options.filter=null] A custom filter function to + * apply to query resuts before returning. Must return `false` to reject + * an item. Can be combined with keyRanges. + * @returns {IDBTransaction} The transaction used for this operation. + */ + query: function (onSuccess, options) { + var result = [], + processedItems = 0; + options = options || {}; + options.autoContinue = true; + options.writeAccess = false; + options.allowItemRejection = !!options.filter; + options.onEnd = function () { + onSuccess(result, processedItems); + }; + return this.iterate(function (item) { + processedItems++; + var accept = options.filter ? options.filter(item) : true; + if (accept !== false) { + result.push(item); + } + return accept; + }, options); + }, + + /** + * + * Runs a query against the store, but only returns the number of matches + * instead of the matches itself. + * + * @param {Function} onSuccess A callback to be called if the opration + * was successful. + * @param {Object} [options] An object defining specific options + * @param {String} [options.index=null] A name of an IDBIndex to operate on + * @param {IDBKeyRange} [options.keyRange=null] An IDBKeyRange to use + * @param {Function} [options.onError=throw] A callback to be called if an error + * occurred during the operation. + * @returns {IDBTransaction} The transaction used for this operation. + */ + count: function (onSuccess, options) { + + options = mixin({ + index: null, + keyRange: null + }, options || {}); + + var onError = options.onError || defaultErrorHandler; + + var hasSuccess = false, + result = null; + + var cursorTransaction = this.db.transaction([this.storeName], this.consts.READ_ONLY); + cursorTransaction.oncomplete = function () { + var callback = hasSuccess ? onSuccess : onError; + callback(result); + }; + cursorTransaction.onabort = onError; + cursorTransaction.onerror = onError; + + var cursorTarget = cursorTransaction.objectStore(this.storeName); + if (options.index) { + cursorTarget = cursorTarget.index(options.index); + } + var countRequest = cursorTarget.count(options.keyRange); + countRequest.onsuccess = function (evt) { + hasSuccess = true; + result = evt.target.result; + }; + countRequest.onError = onError; + + return cursorTransaction; + }, + + /**************/ + /* key ranges */ + /**************/ + + /** + * Creates a key range using specified options. This key range can be + * handed over to the count() and iterate() methods. + * + * Note: You must provide at least one or both of "lower" or "upper" value. + * + * @param {Object} options The options for the key range to create + * @param {*} [options.lower] The lower bound + * @param {Boolean} [options.excludeLower] Whether to exclude the lower + * bound passed in options.lower from the key range + * @param {*} [options.upper] The upper bound + * @param {Boolean} [options.excludeUpper] Whether to exclude the upper + * bound passed in options.upper from the key range + * @param {*} [options.only] A single key value. Use this if you need a key + * range that only includes one value for a key. Providing this + * property invalidates all other properties. + * @return {IDBKeyRange} The IDBKeyRange representing the specified options + */ + makeKeyRange: function (options) { + /*jshint onecase:true */ + var keyRange, + hasLower = typeof options.lower != 'undefined', + hasUpper = typeof options.upper != 'undefined', + isOnly = typeof options.only != 'undefined'; + + switch (true) { + case isOnly: + keyRange = this.keyRange.only(options.only); + break; + case hasLower && hasUpper: + keyRange = this.keyRange.bound(options.lower, options.upper, options.excludeLower, options.excludeUpper); + break; + case hasLower: + keyRange = this.keyRange.lowerBound(options.lower, options.excludeLower); + break; + case hasUpper: + keyRange = this.keyRange.upperBound(options.upper, options.excludeUpper); + break; + default: + throw new Error('Cannot create KeyRange. Provide one or both of "lower" or "upper" value, or an "only" value.'); + } + + return keyRange; + + } + + }; + + /** helpers **/ + var empty = {}; + + function mixin (target, source) { + var name, s; + for (name in source) { + s = source[name]; + if (s !== empty[name] && s !== target[name]) { + target[name] = s; + } + } + return target; + } + + function hasVersionError(errorEvent) { + if ('error' in errorEvent.target) { + return errorEvent.target.error.name == 'VersionError'; + } else if ('errorCode' in errorEvent.target) { + return errorEvent.target.errorCode == 12; + } + return false; + } + + IDBStore.prototype = proto; + IDBStore.version = proto.version; + + return IDBStore; + +}, this); + +},{}],8:[function(require,module,exports){ +exports.read = function (buffer, offset, isLE, mLen, nBytes) { + var e, m + var eLen = nBytes * 8 - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var nBits = -7 + var i = isLE ? (nBytes - 1) : 0 + var d = isLE ? -1 : 1 + var s = buffer[offset + i] + + i += d + + e = s & ((1 << (-nBits)) - 1) + s >>= (-nBits) + nBits += eLen + for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8) {} + + m = e & ((1 << (-nBits)) - 1) + e >>= (-nBits) + nBits += mLen + for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8) {} + + if (e === 0) { + e = 1 - eBias + } else if (e === eMax) { + return m ? NaN : ((s ? -1 : 1) * Infinity) + } else { + m = m + Math.pow(2, mLen) + e = e - eBias + } + return (s ? -1 : 1) * m * Math.pow(2, e - mLen) +} + +exports.write = function (buffer, value, offset, isLE, mLen, nBytes) { + var e, m, c + var eLen = nBytes * 8 - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0) + var i = isLE ? 0 : (nBytes - 1) + var d = isLE ? 1 : -1 + var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0 + + value = Math.abs(value) + + if (isNaN(value) || value === Infinity) { + m = isNaN(value) ? 1 : 0 + e = eMax + } else { + e = Math.floor(Math.log(value) / Math.LN2) + if (value * (c = Math.pow(2, -e)) < 1) { + e-- + c *= 2 + } + if (e + eBias >= 1) { + value += rt / c + } else { + value += rt * Math.pow(2, 1 - eBias) + } + if (value * c >= 2) { + e++ + c /= 2 + } + + if (e + eBias >= eMax) { + m = 0 + e = eMax + } else if (e + eBias >= 1) { + m = (value * c - 1) * Math.pow(2, mLen) + e = e + eBias + } else { + m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen) + e = 0 + } + } + + for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {} + + e = (e << mLen) | m + eLen += mLen + for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {} + + buffer[offset + i - d] |= s * 128 +} + +},{}],9:[function(require,module,exports){ +/*! + * Determine if an object is a Buffer + * + * @author Feross Aboukhadijeh + * @license MIT + */ + +// The _isBuffer check is for Safari 5-7 support, because it's missing +// Object.prototype.constructor. Remove this eventually +module.exports = function (obj) { + return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer) +} + +function isBuffer (obj) { + return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj) +} + +// For Node v0.10 support. Remove this eventually. +function isSlowBuffer (obj) { + return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)) +} + +},{}],10:[function(require,module,exports){ +var toString = {}.toString; + +module.exports = Array.isArray || function (arr) { + return toString.call(arr) == '[object Array]'; +}; + +},{}],11:[function(require,module,exports){ +var Buffer = require('buffer').Buffer; + +module.exports = isBuffer; + +function isBuffer (o) { + return Buffer.isBuffer(o) + || /\[object (.+Array|Array.+)\]/.test(Object.prototype.toString.call(o)); +} + +},{"buffer":6}],12:[function(require,module,exports){ +(function (Buffer){ +module.exports = Level + +var IDB = require('idb-wrapper') +var AbstractLevelDOWN = require('abstract-leveldown').AbstractLevelDOWN +var util = require('util') +var Iterator = require('./iterator') +var isBuffer = require('isbuffer') +var xtend = require('xtend') +var toBuffer = require('typedarray-to-buffer') + +function Level(location) { + if (!(this instanceof Level)) return new Level(location) + if (!location) throw new Error("constructor requires at least a location argument") + this.IDBOptions = {} + this.location = location +} + +util.inherits(Level, AbstractLevelDOWN) + +Level.prototype._open = function(options, callback) { + var self = this + + var idbOpts = { + storeName: this.location, + autoIncrement: false, + keyPath: null, + onStoreReady: function () { + callback && callback(null, self.idb) + }, + onError: function(err) { + callback && callback(err) + } + } + + xtend(idbOpts, options) + this.IDBOptions = idbOpts + this.idb = new IDB(idbOpts) +} + +Level.prototype._get = function (key, options, callback) { + this.idb.get(key, function (value) { + if (value === undefined) { + // 'NotFound' error, consistent with LevelDOWN API + return callback(new Error('NotFound')) + } + // by default return buffers, unless explicitly told not to + var asBuffer = true + if (options.asBuffer === false) asBuffer = false + if (options.raw) asBuffer = false + if (asBuffer) { + if (value instanceof Uint8Array) value = toBuffer(value) + else value = new Buffer(String(value)) + } + return callback(null, value, key) + }, callback) +} + +Level.prototype._del = function(id, options, callback) { + this.idb.remove(id, callback, callback) +} + +Level.prototype._put = function (key, value, options, callback) { + if (value instanceof ArrayBuffer) { + value = toBuffer(new Uint8Array(value)) + } + var obj = this.convertEncoding(key, value, options) + if (Buffer.isBuffer(obj.value)) { + if (typeof value.toArrayBuffer === 'function') { + obj.value = new Uint8Array(value.toArrayBuffer()) + } else { + obj.value = new Uint8Array(value) + } + } + this.idb.put(obj.key, obj.value, function() { callback() }, callback) +} + +Level.prototype.convertEncoding = function(key, value, options) { + if (options.raw) return {key: key, value: value} + if (value) { + var stringed = value.toString() + if (stringed === 'NaN') value = 'NaN' + } + var valEnc = options.valueEncoding + var obj = {key: key, value: value} + if (value && (!valEnc || valEnc !== 'binary')) { + if (typeof obj.value !== 'object') { + obj.value = stringed + } + } + return obj +} + +Level.prototype.iterator = function (options) { + if (typeof options !== 'object') options = {} + return new Iterator(this.idb, options) +} + +Level.prototype._batch = function (array, options, callback) { + var op + var i + var k + var copiedOp + var currentOp + var modified = [] + + if (array.length === 0) return setTimeout(callback, 0) + + for (i = 0; i < array.length; i++) { + copiedOp = {} + currentOp = array[i] + modified[i] = copiedOp + + var converted = this.convertEncoding(currentOp.key, currentOp.value, options) + currentOp.key = converted.key + currentOp.value = converted.value + + for (k in currentOp) { + if (k === 'type' && currentOp[k] == 'del') { + copiedOp[k] = 'remove' + } else { + copiedOp[k] = currentOp[k] + } + } + } + + return this.idb.batch(modified, function(){ callback() }, callback) +} + +Level.prototype._close = function (callback) { + this.idb.db.close() + callback() +} + +Level.prototype._approximateSize = function (start, end, callback) { + var err = new Error('Not implemented') + if (callback) + return callback(err) + + throw err +} + +Level.prototype._isBuffer = function (obj) { + return Buffer.isBuffer(obj) +} + +Level.destroy = function (db, callback) { + if (typeof db === 'object') { + var prefix = db.IDBOptions.storePrefix || 'IDBWrapper-' + var dbname = db.location + } else { + var prefix = 'IDBWrapper-' + var dbname = db + } + var request = indexedDB.deleteDatabase(prefix + dbname) + request.onsuccess = function() { + callback() + } + request.onerror = function(err) { + callback(err) + } +} + +var checkKeyValue = Level.prototype._checkKeyValue = function (obj, type) { + if (obj === null || obj === undefined) + return new Error(type + ' cannot be `null` or `undefined`') + if (obj === null || obj === undefined) + return new Error(type + ' cannot be `null` or `undefined`') + if (isBuffer(obj) && obj.byteLength === 0) + return new Error(type + ' cannot be an empty ArrayBuffer') + if (String(obj) === '') + return new Error(type + ' cannot be an empty String') + if (obj.length === 0) + return new Error(type + ' cannot be an empty Array') +} + +}).call(this,require("buffer").Buffer) +},{"./iterator":13,"abstract-leveldown":3,"buffer":6,"idb-wrapper":7,"isbuffer":11,"typedarray-to-buffer":38,"util":41,"xtend":15}],13:[function(require,module,exports){ +var util = require('util') +var AbstractIterator = require('abstract-leveldown').AbstractIterator +var ltgt = require('ltgt') + +module.exports = Iterator + +function Iterator (db, options) { + if (!options) options = {} + this.options = options + AbstractIterator.call(this, db) + this._order = options.reverse ? 'DESC': 'ASC' + this._limit = options.limit + this._count = 0 + this._done = false + var lower = ltgt.lowerBound(options) + var upper = ltgt.upperBound(options) + try { + this._keyRange = lower || upper ? this.db.makeKeyRange({ + lower: lower, + upper: upper, + excludeLower: ltgt.lowerBoundExclusive(options), + excludeUpper: ltgt.upperBoundExclusive(options) + }) : null + } catch (e) { + // The lower key is greater than the upper key. + // IndexedDB throws an error, but we'll just return 0 results. + this._keyRangeError = true + } + this.callback = null +} + +util.inherits(Iterator, AbstractIterator) + +Iterator.prototype.createIterator = function() { + var self = this + + self.iterator = self.db.iterate(function () { + self.onItem.apply(self, arguments) + }, { + keyRange: self._keyRange, + autoContinue: false, + order: self._order, + onError: function(err) { console.log('horrible error', err) }, + }) +} + +// TODO the limit implementation here just ignores all reads after limit has been reached +// it should cancel the iterator instead but I don't know how +Iterator.prototype.onItem = function (value, cursor, cursorTransaction) { + if (!cursor && this.callback) { + this.callback() + this.callback = false + return + } + var shouldCall = true + + if (!!this._limit && this._limit > 0 && this._count++ >= this._limit) + shouldCall = false + + if (shouldCall) this.callback(false, cursor.key, cursor.value) + if (cursor) cursor['continue']() +} + +Iterator.prototype._next = function (callback) { + if (!callback) return new Error('next() requires a callback argument') + if (this._keyRangeError) return callback() + if (!this._started) { + this.createIterator() + this._started = true + } + this.callback = callback +} + +},{"abstract-leveldown":3,"ltgt":16,"util":41}],14:[function(require,module,exports){ +module.exports = hasKeys + +function hasKeys(source) { + return source !== null && + (typeof source === "object" || + typeof source === "function") +} + +},{}],15:[function(require,module,exports){ +var Keys = require("object-keys") +var hasKeys = require("./has-keys") + +module.exports = extend + +function extend() { + var target = {} + + for (var i = 0; i < arguments.length; i++) { + var source = arguments[i] + + if (!hasKeys(source)) { + continue + } + + var keys = Keys(source) + + for (var j = 0; j < keys.length; j++) { + var name = keys[j] + target[name] = source[name] + } + } + + return target +} + +},{"./has-keys":14,"object-keys":18}],16:[function(require,module,exports){ +(function (Buffer){ + +exports.compare = function (a, b) { + + if(Buffer.isBuffer(a)) { + var l = Math.min(a.length, b.length) + for(var i = 0; i < l; i++) { + var cmp = a[i] - b[i] + if(cmp) return cmp + } + return a.length - b.length + } + + return a < b ? -1 : a > b ? 1 : 0 +} + +function has(obj, key) { + return Object.hasOwnProperty.call(obj, key) +} + +// to be compatible with the current abstract-leveldown tests +// nullish or empty strings. +// I could use !!val but I want to permit numbers and booleans, +// if possible. + +function isDef (val) { + return val !== undefined && val !== '' +} + +function has (range, name) { + return Object.hasOwnProperty.call(range, name) +} + +function hasKey(range, name) { + return Object.hasOwnProperty.call(range, name) && name +} + +var lowerBoundKey = exports.lowerBoundKey = function (range) { + return ( + hasKey(range, 'gt') + || hasKey(range, 'gte') + || hasKey(range, 'min') + || (range.reverse ? hasKey(range, 'end') : hasKey(range, 'start')) + || undefined + ) +} + +var lowerBound = exports.lowerBound = function (range) { + var k = lowerBoundKey(range) + return k && range[k] +} + +exports.lowerBoundInclusive = function (range) { + return has(range, 'gt') ? false : true +} + +exports.upperBoundInclusive = + function (range) { + return has(range, 'lt') || !range.minEx ? false : true + } + +var lowerBoundExclusive = exports.lowerBoundExclusive = + function (range) { + return has(range, 'gt') || range.minEx ? true : false + } + +var upperBoundExclusive = exports.upperBoundExclusive = + function (range) { + return has(range, 'lt') ? true : false + } + +var upperBoundKey = exports.upperBoundKey = function (range) { + return ( + hasKey(range, 'lt') + || hasKey(range, 'lte') + || hasKey(range, 'max') + || (range.reverse ? hasKey(range, 'start') : hasKey(range, 'end')) + || undefined + ) +} + +var upperBound = exports.upperBound = function (range) { + var k = upperBoundKey(range) + return k && range[k] +} + +function id (e) { return e } + +exports.toLtgt = function (range, _range, map, lower, upper) { + _range = _range || {} + map = map || id + var defaults = arguments.length > 3 + var lb = exports.lowerBoundKey(range) + var ub = exports.upperBoundKey(range) + if(lb) { + if(lb === 'gt') _range.gt = map(range.gt, false) + else _range.gte = map(range[lb], false) + } + else if(defaults) + _range.gte = map(lower, false) + + if(ub) { + if(ub === 'lt') _range.lt = map(range.lt, true) + else _range.lte = map(range[ub], true) + } + else if(defaults) + _range.lte = map(upper, true) + + if(range.reverse != null) + _range.reverse = !!range.reverse + + //if range was used mutably + //(in level-sublevel it's part of an options object + //that has more properties on it.) + if(has(_range, 'max')) delete _range.max + if(has(_range, 'min')) delete _range.min + if(has(_range, 'start')) delete _range.start + if(has(_range, 'end')) delete _range.end + + return _range +} + +exports.contains = function (range, key, compare) { + compare = compare || exports.compare + + var lb = lowerBound(range) + if(isDef(lb)) { + var cmp = compare(key, lb) + if(cmp < 0 || (cmp === 0 && lowerBoundExclusive(range))) + return false + } + + var ub = upperBound(range) + if(isDef(ub)) { + var cmp = compare(key, ub) + if(cmp > 0 || (cmp === 0) && upperBoundExclusive(range)) + return false + } + + return true +} + +exports.filter = function (range, compare) { + return function (key) { + return exports.contains(range, key, compare) + } +} + +}).call(this,{"isBuffer":require("../is-buffer/index.js")}) +},{"../is-buffer/index.js":9}],17:[function(require,module,exports){ +var hasOwn = Object.prototype.hasOwnProperty; +var toString = Object.prototype.toString; + +var isFunction = function (fn) { + var isFunc = (typeof fn === 'function' && !(fn instanceof RegExp)) || toString.call(fn) === '[object Function]'; + if (!isFunc && typeof window !== 'undefined') { + isFunc = fn === window.setTimeout || fn === window.alert || fn === window.confirm || fn === window.prompt; + } + return isFunc; +}; + +module.exports = function forEach(obj, fn) { + if (!isFunction(fn)) { + throw new TypeError('iterator must be a function'); + } + var i, k, + isString = typeof obj === 'string', + l = obj.length, + context = arguments.length > 2 ? arguments[2] : null; + if (l === +l) { + for (i = 0; i < l; i++) { + if (context === null) { + fn(isString ? obj.charAt(i) : obj[i], i, obj); + } else { + fn.call(context, isString ? obj.charAt(i) : obj[i], i, obj); + } + } + } else { + for (k in obj) { + if (hasOwn.call(obj, k)) { + if (context === null) { + fn(obj[k], k, obj); + } else { + fn.call(context, obj[k], k, obj); + } + } + } + } +}; + + +},{}],18:[function(require,module,exports){ +module.exports = Object.keys || require('./shim'); + + +},{"./shim":20}],19:[function(require,module,exports){ +var toString = Object.prototype.toString; + +module.exports = function isArguments(value) { + var str = toString.call(value); + var isArguments = str === '[object Arguments]'; + if (!isArguments) { + isArguments = str !== '[object Array]' + && value !== null + && typeof value === 'object' + && typeof value.length === 'number' + && value.length >= 0 + && toString.call(value.callee) === '[object Function]'; + } + return isArguments; +}; + + +},{}],20:[function(require,module,exports){ +(function () { + "use strict"; + + // modified from https://github.com/kriskowal/es5-shim + var has = Object.prototype.hasOwnProperty, + toString = Object.prototype.toString, + forEach = require('./foreach'), + isArgs = require('./isArguments'), + hasDontEnumBug = !({'toString': null}).propertyIsEnumerable('toString'), + hasProtoEnumBug = (function () {}).propertyIsEnumerable('prototype'), + dontEnums = [ + "toString", + "toLocaleString", + "valueOf", + "hasOwnProperty", + "isPrototypeOf", + "propertyIsEnumerable", + "constructor" + ], + keysShim; + + keysShim = function keys(object) { + var isObject = object !== null && typeof object === 'object', + isFunction = toString.call(object) === '[object Function]', + isArguments = isArgs(object), + theKeys = []; + + if (!isObject && !isFunction && !isArguments) { + throw new TypeError("Object.keys called on a non-object"); + } + + if (isArguments) { + forEach(object, function (value) { + theKeys.push(value); + }); + } else { + var name, + skipProto = hasProtoEnumBug && isFunction; + + for (name in object) { + if (!(skipProto && name === 'prototype') && has.call(object, name)) { + theKeys.push(name); + } + } + } + + if (hasDontEnumBug) { + var ctor = object.constructor, + skipConstructor = ctor && ctor.prototype === object; + + forEach(dontEnums, function (dontEnum) { + if (!(skipConstructor && dontEnum === 'constructor') && has.call(object, dontEnum)) { + theKeys.push(dontEnum); + } + }); + } + return theKeys; + }; + + module.exports = keysShim; +}()); + + +},{"./foreach":17,"./isArguments":19}],21:[function(require,module,exports){ +// Top level file is just a mixin of submodules & constants +'use strict'; + +var assign = require('./lib/utils/common').assign; + +var deflate = require('./lib/deflate'); +var inflate = require('./lib/inflate'); +var constants = require('./lib/zlib/constants'); + +var pako = {}; + +assign(pako, deflate, inflate, constants); + +module.exports = pako; + +},{"./lib/deflate":22,"./lib/inflate":23,"./lib/utils/common":24,"./lib/zlib/constants":27}],22:[function(require,module,exports){ +'use strict'; + + +var zlib_deflate = require('./zlib/deflate'); +var utils = require('./utils/common'); +var strings = require('./utils/strings'); +var msg = require('./zlib/messages'); +var ZStream = require('./zlib/zstream'); + +var toString = Object.prototype.toString; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +var Z_NO_FLUSH = 0; +var Z_FINISH = 4; + +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_SYNC_FLUSH = 2; + +var Z_DEFAULT_COMPRESSION = -1; + +var Z_DEFAULT_STRATEGY = 0; + +var Z_DEFLATED = 8; + +/* ===========================================================================*/ + + +/** + * class Deflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[deflate]], + * [[deflateRaw]] and [[gzip]]. + **/ + +/* internal + * Deflate.chunks -> Array + * + * Chunks of output data, if [[Deflate#onData]] not overriden. + **/ + +/** + * Deflate.result -> Uint8Array|Array + * + * Compressed result, generated by default [[Deflate#onData]] + * and [[Deflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Deflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ + +/** + * Deflate.err -> Number + * + * Error code after deflate finished. 0 (Z_OK) on success. + * You will not need it in real life, because deflate errors + * are possible only on wrong options or bad `onData` / `onEnd` + * custom handlers. + **/ + +/** + * Deflate.msg -> String + * + * Error message, if [[Deflate.err]] != 0 + **/ + + +/** + * new Deflate(options) + * - options (Object): zlib deflate options. + * + * Creates new deflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `level` + * - `windowBits` + * - `memLevel` + * - `strategy` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw deflate + * - `gzip` (Boolean) - create gzip wrapper + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * - `header` (Object) - custom header for gzip + * - `text` (Boolean) - true if compressed data believed to be text + * - `time` (Number) - modification time, unix timestamp + * - `os` (Number) - operation system code + * - `extra` (Array) - array of bytes with extra data (max 65536) + * - `name` (String) - file name (binary string) + * - `comment` (String) - comment (binary string) + * - `hcrc` (Boolean) - true if header crc should be added + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var deflate = new pako.Deflate({ level: 3}); + * + * deflate.push(chunk1, false); + * deflate.push(chunk2, true); // true -> last chunk + * + * if (deflate.err) { throw new Error(deflate.err); } + * + * console.log(deflate.result); + * ``` + **/ +function Deflate(options) { + if (!(this instanceof Deflate)) return new Deflate(options); + + this.options = utils.assign({ + level: Z_DEFAULT_COMPRESSION, + method: Z_DEFLATED, + chunkSize: 16384, + windowBits: 15, + memLevel: 8, + strategy: Z_DEFAULT_STRATEGY, + to: '' + }, options || {}); + + var opt = this.options; + + if (opt.raw && (opt.windowBits > 0)) { + opt.windowBits = -opt.windowBits; + } + + else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { + opt.windowBits += 16; + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new ZStream(); + this.strm.avail_out = 0; + + var status = zlib_deflate.deflateInit2( + this.strm, + opt.level, + opt.method, + opt.windowBits, + opt.memLevel, + opt.strategy + ); + + if (status !== Z_OK) { + throw new Error(msg[status]); + } + + if (opt.header) { + zlib_deflate.deflateSetHeader(this.strm, opt.header); + } + + if (opt.dictionary) { + var dict; + // Convert data if needed + if (typeof opt.dictionary === 'string') { + // If we need to compress text, change encoding to utf8. + dict = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + dict = new Uint8Array(opt.dictionary); + } else { + dict = opt.dictionary; + } + + status = zlib_deflate.deflateSetDictionary(this.strm, dict); + + if (status !== Z_OK) { + throw new Error(msg[status]); + } + + this._dict_set = true; + } +} + +/** + * Deflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be + * converted to utf8 byte sequence. + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH. + * + * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with + * new compressed chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the compression context. + * + * On fail call [[Deflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * array format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Deflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var status, _mode; + + if (this.ended) { return false; } + + _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH); + + // Convert data if needed + if (typeof data === 'string') { + // If we need to compress text, change encoding to utf8. + strm.input = strings.string2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + status = zlib_deflate.deflate(strm, _mode); /* no bad return value */ + + if (status !== Z_STREAM_END && status !== Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } + if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) { + if (this.options.to === 'string') { + this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out))); + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END); + + // Finalize on the last chunk. + if (_mode === Z_FINISH) { + status = zlib_deflate.deflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === Z_OK; + } + + // callback interim results if Z_SYNC_FLUSH. + if (_mode === Z_SYNC_FLUSH) { + this.onEnd(Z_OK); + strm.avail_out = 0; + return true; + } + + return true; +}; + + +/** + * Deflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): ouput data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Deflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Deflate#onEnd(status) -> Void + * - status (Number): deflate status. 0 (Z_OK) on success, + * other if not. + * + * Called once after you tell deflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Deflate.prototype.onEnd = function (status) { + // On success - join + if (status === Z_OK) { + if (this.options.to === 'string') { + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * deflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * Compress `data` with deflate algorithm and `options`. + * + * Supported options are: + * + * - level + * - windowBits + * - memLevel + * - strategy + * - dictionary + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , data = Uint8Array([1,2,3,4,5,6,7,8,9]); + * + * console.log(pako.deflate(data)); + * ``` + **/ +function deflate(input, options) { + var deflator = new Deflate(options); + + deflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (deflator.err) { throw deflator.msg; } + + return deflator.result; +} + + +/** + * deflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function deflateRaw(input, options) { + options = options || {}; + options.raw = true; + return deflate(input, options); +} + + +/** + * gzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but create gzip wrapper instead of + * deflate one. + **/ +function gzip(input, options) { + options = options || {}; + options.gzip = true; + return deflate(input, options); +} + + +exports.Deflate = Deflate; +exports.deflate = deflate; +exports.deflateRaw = deflateRaw; +exports.gzip = gzip; + +},{"./utils/common":24,"./utils/strings":25,"./zlib/deflate":29,"./zlib/messages":34,"./zlib/zstream":36}],23:[function(require,module,exports){ +'use strict'; + + +var zlib_inflate = require('./zlib/inflate'); +var utils = require('./utils/common'); +var strings = require('./utils/strings'); +var c = require('./zlib/constants'); +var msg = require('./zlib/messages'); +var ZStream = require('./zlib/zstream'); +var GZheader = require('./zlib/gzheader'); + +var toString = Object.prototype.toString; + +/** + * class Inflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[inflate]] + * and [[inflateRaw]]. + **/ + +/* internal + * inflate.chunks -> Array + * + * Chunks of output data, if [[Inflate#onData]] not overriden. + **/ + +/** + * Inflate.result -> Uint8Array|Array|String + * + * Uncompressed result, generated by default [[Inflate#onData]] + * and [[Inflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Inflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ + +/** + * Inflate.err -> Number + * + * Error code after inflate finished. 0 (Z_OK) on success. + * Should be checked if broken data possible. + **/ + +/** + * Inflate.msg -> String + * + * Error message, if [[Inflate.err]] != 0 + **/ + + +/** + * new Inflate(options) + * - options (Object): zlib inflate options. + * + * Creates new inflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `windowBits` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw inflate + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * By default, when no options set, autodetect deflate/gzip data format via + * wrapper header. + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var inflate = new pako.Inflate({ level: 3}); + * + * inflate.push(chunk1, false); + * inflate.push(chunk2, true); // true -> last chunk + * + * if (inflate.err) { throw new Error(inflate.err); } + * + * console.log(inflate.result); + * ``` + **/ +function Inflate(options) { + if (!(this instanceof Inflate)) return new Inflate(options); + + this.options = utils.assign({ + chunkSize: 16384, + windowBits: 0, + to: '' + }, options || {}); + + var opt = this.options; + + // Force window size for `raw` data, if not set directly, + // because we have no header for autodetect. + if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { + opt.windowBits = -opt.windowBits; + if (opt.windowBits === 0) { opt.windowBits = -15; } + } + + // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate + if ((opt.windowBits >= 0) && (opt.windowBits < 16) && + !(options && options.windowBits)) { + opt.windowBits += 32; + } + + // Gzip header has no info about windows size, we can do autodetect only + // for deflate. So, if window size not set, force it to max when gzip possible + if ((opt.windowBits > 15) && (opt.windowBits < 48)) { + // bit 3 (16) -> gzipped data + // bit 4 (32) -> autodetect gzip/deflate + if ((opt.windowBits & 15) === 0) { + opt.windowBits |= 15; + } + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new ZStream(); + this.strm.avail_out = 0; + + var status = zlib_inflate.inflateInit2( + this.strm, + opt.windowBits + ); + + if (status !== c.Z_OK) { + throw new Error(msg[status]); + } + + this.header = new GZheader(); + + zlib_inflate.inflateGetHeader(this.strm, this.header); +} + +/** + * Inflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH. + * + * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with + * new output chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the decompression context. + * + * On fail call [[Inflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Inflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var dictionary = this.options.dictionary; + var status, _mode; + var next_out_utf8, tail, utf8str; + var dict; + + // Flag to properly process Z_BUF_ERROR on testing inflate call + // when we check that all output data was flushed. + var allowBufError = false; + + if (this.ended) { return false; } + _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH); + + // Convert data if needed + if (typeof data === 'string') { + // Only binary strings can be decompressed on practice + strm.input = strings.binstring2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + + status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */ + + if (status === c.Z_NEED_DICT && dictionary) { + // Convert data if needed + if (typeof dictionary === 'string') { + dict = strings.string2buf(dictionary); + } else if (toString.call(dictionary) === '[object ArrayBuffer]') { + dict = new Uint8Array(dictionary); + } else { + dict = dictionary; + } + + status = zlib_inflate.inflateSetDictionary(this.strm, dict); + + } + + if (status === c.Z_BUF_ERROR && allowBufError === true) { + status = c.Z_OK; + allowBufError = false; + } + + if (status !== c.Z_STREAM_END && status !== c.Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } + + if (strm.next_out) { + if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) { + + if (this.options.to === 'string') { + + next_out_utf8 = strings.utf8border(strm.output, strm.next_out); + + tail = strm.next_out - next_out_utf8; + utf8str = strings.buf2string(strm.output, next_out_utf8); + + // move tail + strm.next_out = tail; + strm.avail_out = chunkSize - tail; + if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); } + + this.onData(utf8str); + + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } + + // When no more input data, we should check that internal inflate buffers + // are flushed. The only way to do it when avail_out = 0 - run one more + // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. + // Here we set flag to process this error properly. + // + // NOTE. Deflate does not return error in this case and does not needs such + // logic. + if (strm.avail_in === 0 && strm.avail_out === 0) { + allowBufError = true; + } + + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END); + + if (status === c.Z_STREAM_END) { + _mode = c.Z_FINISH; + } + + // Finalize on the last chunk. + if (_mode === c.Z_FINISH) { + status = zlib_inflate.inflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === c.Z_OK; + } + + // callback interim results if Z_SYNC_FLUSH. + if (_mode === c.Z_SYNC_FLUSH) { + this.onEnd(c.Z_OK); + strm.avail_out = 0; + return true; + } + + return true; +}; + + +/** + * Inflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): ouput data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Inflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Inflate#onEnd(status) -> Void + * - status (Number): inflate status. 0 (Z_OK) on success, + * other if not. + * + * Called either after you tell inflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Inflate.prototype.onEnd = function (status) { + // On success - join + if (status === c.Z_OK) { + if (this.options.to === 'string') { + // Glue & convert here, until we teach pako to send + // utf8 alligned strings to onData + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * inflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Decompress `data` with inflate/ungzip and `options`. Autodetect + * format via wrapper header by default. That's why we don't provide + * separate `ungzip` method. + * + * Supported options are: + * + * - windowBits + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , input = pako.deflate([1,2,3,4,5,6,7,8,9]) + * , output; + * + * try { + * output = pako.inflate(input); + * } catch (err) + * console.log(err); + * } + * ``` + **/ +function inflate(input, options) { + var inflator = new Inflate(options); + + inflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (inflator.err) { throw inflator.msg; } + + return inflator.result; +} + + +/** + * inflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * The same as [[inflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function inflateRaw(input, options) { + options = options || {}; + options.raw = true; + return inflate(input, options); +} + + +/** + * ungzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Just shortcut to [[inflate]], because it autodetects format + * by header.content. Done for convenience. + **/ + + +exports.Inflate = Inflate; +exports.inflate = inflate; +exports.inflateRaw = inflateRaw; +exports.ungzip = inflate; + +},{"./utils/common":24,"./utils/strings":25,"./zlib/constants":27,"./zlib/gzheader":30,"./zlib/inflate":32,"./zlib/messages":34,"./zlib/zstream":36}],24:[function(require,module,exports){ +'use strict'; + + +var TYPED_OK = (typeof Uint8Array !== 'undefined') && + (typeof Uint16Array !== 'undefined') && + (typeof Int32Array !== 'undefined'); + + +exports.assign = function (obj /*from1, from2, from3, ...*/) { + var sources = Array.prototype.slice.call(arguments, 1); + while (sources.length) { + var source = sources.shift(); + if (!source) { continue; } + + if (typeof source !== 'object') { + throw new TypeError(source + 'must be non-object'); + } + + for (var p in source) { + if (source.hasOwnProperty(p)) { + obj[p] = source[p]; + } + } + } + + return obj; +}; + + +// reduce buffer size, avoiding mem copy +exports.shrinkBuf = function (buf, size) { + if (buf.length === size) { return buf; } + if (buf.subarray) { return buf.subarray(0, size); } + buf.length = size; + return buf; +}; + + +var fnTyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + if (src.subarray && dest.subarray) { + dest.set(src.subarray(src_offs, src_offs + len), dest_offs); + return; + } + // Fallback to ordinary array + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + var i, l, len, pos, chunk, result; + + // calculate data length + len = 0; + for (i = 0, l = chunks.length; i < l; i++) { + len += chunks[i].length; + } + + // join chunks + result = new Uint8Array(len); + pos = 0; + for (i = 0, l = chunks.length; i < l; i++) { + chunk = chunks[i]; + result.set(chunk, pos); + pos += chunk.length; + } + + return result; + } +}; + +var fnUntyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + return [].concat.apply([], chunks); + } +}; + + +// Enable/Disable typed arrays use, for testing +// +exports.setTyped = function (on) { + if (on) { + exports.Buf8 = Uint8Array; + exports.Buf16 = Uint16Array; + exports.Buf32 = Int32Array; + exports.assign(exports, fnTyped); + } else { + exports.Buf8 = Array; + exports.Buf16 = Array; + exports.Buf32 = Array; + exports.assign(exports, fnUntyped); + } +}; + +exports.setTyped(TYPED_OK); + +},{}],25:[function(require,module,exports){ +// String encode/decode helpers +'use strict'; + + +var utils = require('./common'); + + +// Quick check if we can use fast array to bin string conversion +// +// - apply(Array) can fail on Android 2.2 +// - apply(Uint8Array) can fail on iOS 5.1 Safary +// +var STR_APPLY_OK = true; +var STR_APPLY_UIA_OK = true; + +try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; } +try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } + + +// Table with utf8 lengths (calculated by first byte of sequence) +// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, +// because max possible codepoint is 0x10ffff +var _utf8len = new utils.Buf8(256); +for (var q = 0; q < 256; q++) { + _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); +} +_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start + + +// convert string to array (typed, when possible) +exports.string2buf = function (str) { + var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; + + // count binary size + for (m_pos = 0; m_pos < str_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; + } + + // allocate buffer + buf = new utils.Buf8(buf_len); + + // convert + for (i = 0, m_pos = 0; i < buf_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + if (c < 0x80) { + /* one byte */ + buf[i++] = c; + } else if (c < 0x800) { + /* two bytes */ + buf[i++] = 0xC0 | (c >>> 6); + buf[i++] = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + /* three bytes */ + buf[i++] = 0xE0 | (c >>> 12); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } else { + /* four bytes */ + buf[i++] = 0xf0 | (c >>> 18); + buf[i++] = 0x80 | (c >>> 12 & 0x3f); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } + } + + return buf; +}; + +// Helper (used in 2 places) +function buf2binstring(buf, len) { + // use fallback for big arrays to avoid stack overflow + if (len < 65537) { + if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) { + return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len)); + } + } + + var result = ''; + for (var i = 0; i < len; i++) { + result += String.fromCharCode(buf[i]); + } + return result; +} + + +// Convert byte array to binary string +exports.buf2binstring = function (buf) { + return buf2binstring(buf, buf.length); +}; + + +// Convert binary string (typed, when possible) +exports.binstring2buf = function (str) { + var buf = new utils.Buf8(str.length); + for (var i = 0, len = buf.length; i < len; i++) { + buf[i] = str.charCodeAt(i); + } + return buf; +}; + + +// convert array to string +exports.buf2string = function (buf, max) { + var i, out, c, c_len; + var len = max || buf.length; + + // Reserve max possible length (2 words per char) + // NB: by unknown reasons, Array is significantly faster for + // String.fromCharCode.apply than Uint16Array. + var utf16buf = new Array(len * 2); + + for (out = 0, i = 0; i < len;) { + c = buf[i++]; + // quick process ascii + if (c < 0x80) { utf16buf[out++] = c; continue; } + + c_len = _utf8len[c]; + // skip 5 & 6 byte codes + if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } + + // apply mask on first byte + c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; + // join the rest + while (c_len > 1 && i < len) { + c = (c << 6) | (buf[i++] & 0x3f); + c_len--; + } + + // terminated by end of string? + if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } + + if (c < 0x10000) { + utf16buf[out++] = c; + } else { + c -= 0x10000; + utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); + utf16buf[out++] = 0xdc00 | (c & 0x3ff); + } + } + + return buf2binstring(utf16buf, out); +}; + + +// Calculate max possible position in utf8 buffer, +// that will not break sequence. If that's not possible +// - (very small limits) return max size as is. +// +// buf[] - utf8 bytes array +// max - length limit (mandatory); +exports.utf8border = function (buf, max) { + var pos; + + max = max || buf.length; + if (max > buf.length) { max = buf.length; } + + // go back from last position, until start of sequence found + pos = max - 1; + while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } + + // Fuckup - very small and broken sequence, + // return max, because we should return something anyway. + if (pos < 0) { return max; } + + // If we came to start of buffer - that means vuffer is too small, + // return max too. + if (pos === 0) { return max; } + + return (pos + _utf8len[buf[pos]] > max) ? pos : max; +}; + +},{"./common":24}],26:[function(require,module,exports){ +'use strict'; + +// Note: adler32 takes 12% for level 0 and 2% for level 6. +// It doesn't worth to make additional optimizationa as in original. +// Small size is preferable. + +function adler32(adler, buf, len, pos) { + var s1 = (adler & 0xffff) |0, + s2 = ((adler >>> 16) & 0xffff) |0, + n = 0; + + while (len !== 0) { + // Set limit ~ twice less than 5552, to keep + // s2 in 31-bits, because we force signed ints. + // in other case %= will fail. + n = len > 2000 ? 2000 : len; + len -= n; + + do { + s1 = (s1 + buf[pos++]) |0; + s2 = (s2 + s1) |0; + } while (--n); + + s1 %= 65521; + s2 %= 65521; + } + + return (s1 | (s2 << 16)) |0; +} + + +module.exports = adler32; + +},{}],27:[function(require,module,exports){ +'use strict'; + + +module.exports = { + + /* Allowed flush values; see deflate() and inflate() below for details */ + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_TREES: 6, + + /* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + //Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + //Z_VERSION_ERROR: -6, + + /* compression levels */ + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, + + + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, + + /* Possible values of the data_type field (though see inflate()) */ + Z_BINARY: 0, + Z_TEXT: 1, + //Z_ASCII: 1, // = Z_TEXT (deprecated) + Z_UNKNOWN: 2, + + /* The deflate compression method */ + Z_DEFLATED: 8 + //Z_NULL: null // Use -1 or null inline, depending on var type +}; + +},{}],28:[function(require,module,exports){ +'use strict'; + +// Note: we can't get significant speed boost here. +// So write code to minimize size - no pregenerated tables +// and array tools dependencies. + + +// Use ordinary array, since untyped makes no boost here +function makeTable() { + var c, table = []; + + for (var n = 0; n < 256; n++) { + c = n; + for (var k = 0; k < 8; k++) { + c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); + } + table[n] = c; + } + + return table; +} + +// Create table on load. Just 255 signed longs. Not a problem. +var crcTable = makeTable(); + + +function crc32(crc, buf, len, pos) { + var t = crcTable, + end = pos + len; + + crc ^= -1; + + for (var i = pos; i < end; i++) { + crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; + } + + return (crc ^ (-1)); // >>> 0; +} + + +module.exports = crc32; + +},{}],29:[function(require,module,exports){ +'use strict'; + +var utils = require('../utils/common'); +var trees = require('./trees'); +var adler32 = require('./adler32'); +var crc32 = require('./crc32'); +var msg = require('./messages'); + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +/* Allowed flush values; see deflate() and inflate() below for details */ +var Z_NO_FLUSH = 0; +var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +//var Z_TREES = 6; + + +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +//var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +//var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; + + +/* compression levels */ +//var Z_NO_COMPRESSION = 0; +//var Z_BEST_SPEED = 1; +//var Z_BEST_COMPRESSION = 9; +var Z_DEFAULT_COMPRESSION = -1; + + +var Z_FILTERED = 1; +var Z_HUFFMAN_ONLY = 2; +var Z_RLE = 3; +var Z_FIXED = 4; +var Z_DEFAULT_STRATEGY = 0; + +/* Possible values of the data_type field (though see inflate()) */ +//var Z_BINARY = 0; +//var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; + + +/* The deflate compression method */ +var Z_DEFLATED = 8; + +/*============================================================================*/ + + +var MAX_MEM_LEVEL = 9; +/* Maximum value for memLevel in deflateInit2 */ +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_MEM_LEVEL = 8; + + +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ +var LITERALS = 256; +/* number of literal bytes 0..255 */ +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ +var D_CODES = 30; +/* number of distance codes */ +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +var MIN_MATCH = 3; +var MAX_MATCH = 258; +var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + +var PRESET_DICT = 0x20; + +var INIT_STATE = 42; +var EXTRA_STATE = 69; +var NAME_STATE = 73; +var COMMENT_STATE = 91; +var HCRC_STATE = 103; +var BUSY_STATE = 113; +var FINISH_STATE = 666; + +var BS_NEED_MORE = 1; /* block not completed, need more input or more output */ +var BS_BLOCK_DONE = 2; /* block flush performed */ +var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ +var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ + +var OS_CODE = 0x03; // Unix :) . Don't detect, use this default. + +function err(strm, errorCode) { + strm.msg = msg[errorCode]; + return errorCode; +} + +function rank(f) { + return ((f) << 1) - ((f) > 4 ? 9 : 0); +} + +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } + + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->output buffer and copying into it. + * (See also read_buf()). + */ +function flush_pending(strm) { + var s = strm.state; + + //_tr_flush_bits(s); + var len = s.pending; + if (len > strm.avail_out) { + len = strm.avail_out; + } + if (len === 0) { return; } + + utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out); + strm.next_out += len; + s.pending_out += len; + strm.total_out += len; + strm.avail_out -= len; + s.pending -= len; + if (s.pending === 0) { + s.pending_out = 0; + } +} + + +function flush_block_only(s, last) { + trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); + s.block_start = s.strstart; + flush_pending(s.strm); +} + + +function put_byte(s, b) { + s.pending_buf[s.pending++] = b; +} + + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +function putShortMSB(s, b) { +// put_byte(s, (Byte)(b >> 8)); +// put_byte(s, (Byte)(b & 0xff)); + s.pending_buf[s.pending++] = (b >>> 8) & 0xff; + s.pending_buf[s.pending++] = b & 0xff; +} + + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->input buffer and copying from it. + * (See also flush_pending()). + */ +function read_buf(strm, buf, start, size) { + var len = strm.avail_in; + + if (len > size) { len = size; } + if (len === 0) { return 0; } + + strm.avail_in -= len; + + // zmemcpy(buf, strm->next_in, len); + utils.arraySet(buf, strm.input, strm.next_in, len, start); + if (strm.state.wrap === 1) { + strm.adler = adler32(strm.adler, buf, len, start); + } + + else if (strm.state.wrap === 2) { + strm.adler = crc32(strm.adler, buf, len, start); + } + + strm.next_in += len; + strm.total_in += len; + + return len; +} + + +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +function longest_match(s, cur_match) { + var chain_length = s.max_chain_length; /* max hash chain length */ + var scan = s.strstart; /* current string */ + var match; /* matched string */ + var len; /* length of current match */ + var best_len = s.prev_length; /* best match length so far */ + var nice_match = s.nice_match; /* stop if match long enough */ + var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? + s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; + + var _win = s.window; // shortcut + + var wmask = s.w_mask; + var prev = s.prev; + + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + + var strend = s.strstart + MAX_MATCH; + var scan_end1 = _win[scan + best_len - 1]; + var scan_end = _win[scan + best_len]; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s.prev_length >= s.good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if (nice_match > s.lookahead) { nice_match = s.lookahead; } + + // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + // Assert(cur_match < s->strstart, "no future"); + match = cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ + + if (_win[match + best_len] !== scan_end || + _win[match + best_len - 1] !== scan_end1 || + _win[match] !== _win[scan] || + _win[++match] !== _win[scan + 1]) { + continue; + } + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2; + match++; + // Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + /*jshint noempty:false*/ + } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + scan < strend); + + // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) { + s.match_start = cur_match; + best_len = len; + if (len >= nice_match) { + break; + } + scan_end1 = _win[scan + best_len - 1]; + scan_end = _win[scan + best_len]; + } + } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); + + if (best_len <= s.lookahead) { + return best_len; + } + return s.lookahead; +} + + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +function fill_window(s) { + var _w_size = s.w_size; + var p, n, m, more, str; + + //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = s.window_size - s.lookahead - s.strstart; + + // JS ints have 32 bit, block below not needed + /* Deal with !@#$% 64K limit: */ + //if (sizeof(int) <= 2) { + // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + // more = wsize; + // + // } else if (more == (unsigned)(-1)) { + // /* Very unlikely, but possible on 16 bit machine if + // * strstart == 0 && lookahead == 1 (input done a byte at time) + // */ + // more--; + // } + //} + + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { + + utils.arraySet(s.window, s.window, _w_size, _w_size, 0); + s.match_start -= _w_size; + s.strstart -= _w_size; + /* we now have strstart >= MAX_DIST */ + s.block_start -= _w_size; + + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ + + n = s.hash_size; + p = n; + do { + m = s.head[--p]; + s.head[p] = (m >= _w_size ? m - _w_size : 0); + } while (--n); + + n = _w_size; + p = n; + do { + m = s.prev[--p]; + s.prev[p] = (m >= _w_size ? m - _w_size : 0); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); + + more += _w_size; + } + if (s.strm.avail_in === 0) { + break; + } + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + //Assert(more >= 2, "more < 2"); + n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); + s.lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s.lookahead + s.insert >= MIN_MATCH) { + str = s.strstart - s.insert; + s.ins_h = s.window[str]; + + /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask; +//#if MIN_MATCH != 3 +// Call update_hash() MIN_MATCH-3 more times +//#endif + while (s.insert) { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = str; + str++; + s.insert--; + if (s.lookahead + s.insert < MIN_MATCH) { + break; + } + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ +// if (s.high_water < s.window_size) { +// var curr = s.strstart + s.lookahead; +// var init = 0; +// +// if (s.high_water < curr) { +// /* Previous high water mark below current data -- zero WIN_INIT +// * bytes or up to end of window, whichever is less. +// */ +// init = s.window_size - curr; +// if (init > WIN_INIT) +// init = WIN_INIT; +// zmemzero(s->window + curr, (unsigned)init); +// s->high_water = curr + init; +// } +// else if (s->high_water < (ulg)curr + WIN_INIT) { +// /* High water mark at or above current data, but below current data +// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up +// * to end of window, whichever is less. +// */ +// init = (ulg)curr + WIN_INIT - s->high_water; +// if (init > s->window_size - s->high_water) +// init = s->window_size - s->high_water; +// zmemzero(s->window + s->high_water, (unsigned)init); +// s->high_water += init; +// } +// } +// +// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, +// "not enough room for search"); +} + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +function deflate_stored(s, flush) { + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + var max_block_size = 0xffff; + + if (max_block_size > s.pending_buf_size - 5) { + max_block_size = s.pending_buf_size - 5; + } + + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s.lookahead <= 1) { + + //Assert(s->strstart < s->w_size+MAX_DIST(s) || + // s->block_start >= (long)s->w_size, "slide too late"); +// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || +// s.block_start >= s.w_size)) { +// throw new Error("slide too late"); +// } + + fill_window(s); + if (s.lookahead === 0 && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + + if (s.lookahead === 0) { + break; + } + /* flush the current block */ + } + //Assert(s->block_start >= 0L, "block gone"); +// if (s.block_start < 0) throw new Error("block gone"); + + s.strstart += s.lookahead; + s.lookahead = 0; + + /* Emit a stored block if pending_buf will be full: */ + var max_start = s.block_start + max_block_size; + + if (s.strstart === 0 || s.strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s.lookahead = s.strstart - max_start; + s.strstart = max_start; + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + + + } + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + + s.insert = 0; + + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + + if (s.strstart > s.block_start) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + return BS_NEED_MORE; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +function deflate_fast(s, flush) { + var hash_head; /* head of the hash chain */ + var bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { + break; /* flush the current block */ + } + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + } + if (s.match_length >= MIN_MATCH) { + // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only + + /*** _tr_tally_dist(s, s.strstart - s.match_start, + s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { + s.match_length--; /* string at strstart already in table */ + do { + s.strstart++; + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s.match_length !== 0); + s.strstart++; + } else + { + s.strstart += s.match_length; + s.match_length = 0; + s.ins_h = s.window[s.strstart]; + /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask; + +//#if MIN_MATCH != 3 +// Call UPDATE_HASH() MIN_MATCH-3 more times +//#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s.window[s.strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +function deflate_slow(s, flush) { + var hash_head; /* head of hash chain */ + var bflush; /* set if current block must be flushed */ + + var max_insert; + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + */ + s.prev_length = s.match_length; + s.prev_match = s.match_start; + s.match_length = MIN_MATCH - 1; + + if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && + s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + + if (s.match_length <= 5 && + (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s.match_length = MIN_MATCH - 1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { + max_insert = s.strstart + s.lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + //check_match(s, s.strstart-1, s.prev_match, s.prev_length); + + /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, + s.prev_length - MIN_MATCH, bflush);***/ + bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s.lookahead -= s.prev_length - 1; + s.prev_length -= 2; + do { + if (++s.strstart <= max_insert) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + } while (--s.prev_length !== 0); + s.match_available = 0; + s.match_length = MIN_MATCH - 1; + s.strstart++; + + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + } else if (s.match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); + + if (bflush) { + /*** FLUSH_BLOCK_ONLY(s, 0) ***/ + flush_block_only(s, false); + /***/ + } + s.strstart++; + s.lookahead--; + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s.match_available = 1; + s.strstart++; + s.lookahead--; + } + } + //Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s.match_available) { + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); + + s.match_available = 0; + } + s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + return BS_BLOCK_DONE; +} + + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +function deflate_rle(s, flush) { + var bflush; /* set if current block must be flushed */ + var prev; /* byte at distance one to match */ + var scan, strend; /* scan goes up to strend for length of run */ + + var _win = s.window; + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s.lookahead <= MAX_MATCH) { + fill_window(s); + if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s.match_length = 0; + if (s.lookahead >= MIN_MATCH && s.strstart > 0) { + scan = s.strstart - 1; + prev = _win[scan]; + if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { + strend = s.strstart + MAX_MATCH; + do { + /*jshint noempty:false*/ + } while (prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + scan < strend); + s.match_length = MAX_MATCH - (strend - scan); + if (s.match_length > s.lookahead) { + s.match_length = s.lookahead; + } + } + //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s.match_length >= MIN_MATCH) { + //check_match(s, s.strstart, s.strstart - 1, s.match_length); + + /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + s.strstart += s.match_length; + s.match_length = 0; + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +function deflate_huff(s, flush) { + var bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s.lookahead === 0) { + fill_window(s); + if (s.lookahead === 0) { + if (flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s.match_length = 0; + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + s.lookahead--; + s.strstart++; + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +function Config(good_length, max_lazy, nice_length, max_chain, func) { + this.good_length = good_length; + this.max_lazy = max_lazy; + this.nice_length = nice_length; + this.max_chain = max_chain; + this.func = func; +} + +var configuration_table; + +configuration_table = [ + /* good lazy nice chain */ + new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ + new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ + new Config(4, 5, 16, 8, deflate_fast), /* 2 */ + new Config(4, 6, 32, 32, deflate_fast), /* 3 */ + + new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ + new Config(8, 16, 32, 32, deflate_slow), /* 5 */ + new Config(8, 16, 128, 128, deflate_slow), /* 6 */ + new Config(8, 32, 128, 256, deflate_slow), /* 7 */ + new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ + new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ +]; + + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +function lm_init(s) { + s.window_size = 2 * s.w_size; + + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + + /* Set the default configuration parameters: + */ + s.max_lazy_match = configuration_table[s.level].max_lazy; + s.good_match = configuration_table[s.level].good_length; + s.nice_match = configuration_table[s.level].nice_length; + s.max_chain_length = configuration_table[s.level].max_chain; + + s.strstart = 0; + s.block_start = 0; + s.lookahead = 0; + s.insert = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + s.ins_h = 0; +} + + +function DeflateState() { + this.strm = null; /* pointer back to this zlib stream */ + this.status = 0; /* as the name implies */ + this.pending_buf = null; /* output still pending */ + this.pending_buf_size = 0; /* size of pending_buf */ + this.pending_out = 0; /* next pending byte to output to the stream */ + this.pending = 0; /* nb of bytes in the pending buffer */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.gzhead = null; /* gzip header information to write */ + this.gzindex = 0; /* where in extra, name, or comment */ + this.method = Z_DEFLATED; /* can only be DEFLATED */ + this.last_flush = -1; /* value of flush param for previous deflate call */ + + this.w_size = 0; /* LZ77 window size (32K by default) */ + this.w_bits = 0; /* log2(w_size) (8..16) */ + this.w_mask = 0; /* w_size - 1 */ + + this.window = null; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. + */ + + this.window_size = 0; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + this.prev = null; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + this.head = null; /* Heads of the hash chains or NIL. */ + + this.ins_h = 0; /* hash index of string to be inserted */ + this.hash_size = 0; /* number of elements in hash table */ + this.hash_bits = 0; /* log2(hash_size) */ + this.hash_mask = 0; /* hash_size-1 */ + + this.hash_shift = 0; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + this.block_start = 0; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + this.match_length = 0; /* length of best match */ + this.prev_match = 0; /* previous match */ + this.match_available = 0; /* set if previous match exists */ + this.strstart = 0; /* start of string to insert */ + this.match_start = 0; /* start of matching string */ + this.lookahead = 0; /* number of valid bytes ahead in window */ + + this.prev_length = 0; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + this.max_chain_length = 0; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + this.max_lazy_match = 0; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ + // That's alias to max_lazy_match, don't use directly + //this.max_insert_length = 0; + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + this.level = 0; /* compression level (1..9) */ + this.strategy = 0; /* favor or force Huffman coding*/ + + this.good_match = 0; + /* Use a faster search when the previous match is longer than this */ + + this.nice_match = 0; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + + /* Didn't use ct_data typedef below to suppress compiler warning */ + + // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + // Use flat array of DOUBLE size, with interleaved fata, + // because JS does not support effective + this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2); + this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2); + this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2); + zero(this.dyn_ltree); + zero(this.dyn_dtree); + zero(this.bl_tree); + + this.l_desc = null; /* desc. for literal tree */ + this.d_desc = null; /* desc. for distance tree */ + this.bl_desc = null; /* desc. for bit length tree */ + + //ush bl_count[MAX_BITS+1]; + this.bl_count = new utils.Buf16(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ + + //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */ + zero(this.heap); + + this.heap_len = 0; /* number of elements in the heap */ + this.heap_max = 0; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; + zero(this.depth); + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + this.l_buf = 0; /* buffer index for literals or lengths */ + + this.lit_bufsize = 0; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + this.last_lit = 0; /* running index in l_buf */ + + this.d_buf = 0; + /* Buffer index for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + this.opt_len = 0; /* bit length of current block with optimal trees */ + this.static_len = 0; /* bit length of current block with static trees */ + this.matches = 0; /* number of string matches in current block */ + this.insert = 0; /* bytes at end of window left to insert */ + + + this.bi_buf = 0; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + this.bi_valid = 0; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + // Used for window memory init. We safely ignore it for JS. That makes + // sense only for pointers and memory check tools. + //this.high_water = 0; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ +} + + +function deflateResetKeep(strm) { + var s; + + if (!strm || !strm.state) { + return err(strm, Z_STREAM_ERROR); + } + + strm.total_in = strm.total_out = 0; + strm.data_type = Z_UNKNOWN; + + s = strm.state; + s.pending = 0; + s.pending_out = 0; + + if (s.wrap < 0) { + s.wrap = -s.wrap; + /* was made negative by deflate(..., Z_FINISH); */ + } + s.status = (s.wrap ? INIT_STATE : BUSY_STATE); + strm.adler = (s.wrap === 2) ? + 0 // crc32(0, Z_NULL, 0) + : + 1; // adler32(0, Z_NULL, 0) + s.last_flush = Z_NO_FLUSH; + trees._tr_init(s); + return Z_OK; +} + + +function deflateReset(strm) { + var ret = deflateResetKeep(strm); + if (ret === Z_OK) { + lm_init(strm.state); + } + return ret; +} + + +function deflateSetHeader(strm, head) { + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; } + strm.state.gzhead = head; + return Z_OK; +} + + +function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { + if (!strm) { // === Z_NULL + return Z_STREAM_ERROR; + } + var wrap = 1; + + if (level === Z_DEFAULT_COMPRESSION) { + level = 6; + } + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } + + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } + + + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED) { + return err(strm, Z_STREAM_ERROR); + } + + + if (windowBits === 8) { + windowBits = 9; + } + /* until 256-byte window bug fixed */ + + var s = new DeflateState(); + + strm.state = s; + s.strm = strm; + + s.wrap = wrap; + s.gzhead = null; + s.w_bits = windowBits; + s.w_size = 1 << s.w_bits; + s.w_mask = s.w_size - 1; + + s.hash_bits = memLevel + 7; + s.hash_size = 1 << s.hash_bits; + s.hash_mask = s.hash_size - 1; + s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); + + s.window = new utils.Buf8(s.w_size * 2); + s.head = new utils.Buf16(s.hash_size); + s.prev = new utils.Buf16(s.w_size); + + // Don't need mem init magic for JS. + //s.high_water = 0; /* nothing written to s->window yet */ + + s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + s.pending_buf_size = s.lit_bufsize * 4; + + //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + //s->pending_buf = (uchf *) overlay; + s.pending_buf = new utils.Buf8(s.pending_buf_size); + + // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) + //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s.d_buf = 1 * s.lit_bufsize; + + //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + s.l_buf = (1 + 2) * s.lit_bufsize; + + s.level = level; + s.strategy = strategy; + s.method = method; + + return deflateReset(strm); +} + +function deflateInit(strm, level) { + return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); +} + + +function deflate(strm, flush) { + var old_flush, s; + var beg, val; // for gzip header write only + + if (!strm || !strm.state || + flush > Z_BLOCK || flush < 0) { + return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR; + } + + s = strm.state; + + if (!strm.output || + (!strm.input && strm.avail_in !== 0) || + (s.status === FINISH_STATE && flush !== Z_FINISH)) { + return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR); + } + + s.strm = strm; /* just in case */ + old_flush = s.last_flush; + s.last_flush = flush; + + /* Write the header */ + if (s.status === INIT_STATE) { + + if (s.wrap === 2) { // GZIP header + strm.adler = 0; //crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (!s.gzhead) { // s->gzhead == Z_NULL + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s.status = BUSY_STATE; + } + else { + put_byte(s, (s.gzhead.text ? 1 : 0) + + (s.gzhead.hcrc ? 2 : 0) + + (!s.gzhead.extra ? 0 : 4) + + (!s.gzhead.name ? 0 : 8) + + (!s.gzhead.comment ? 0 : 16) + ); + put_byte(s, s.gzhead.time & 0xff); + put_byte(s, (s.gzhead.time >> 8) & 0xff); + put_byte(s, (s.gzhead.time >> 16) & 0xff); + put_byte(s, (s.gzhead.time >> 24) & 0xff); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, s.gzhead.os & 0xff); + if (s.gzhead.extra && s.gzhead.extra.length) { + put_byte(s, s.gzhead.extra.length & 0xff); + put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); + } + if (s.gzhead.hcrc) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0); + } + s.gzindex = 0; + s.status = EXTRA_STATE; + } + } + else // DEFLATE header + { + var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8; + var level_flags = -1; + + if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { + level_flags = 0; + } else if (s.level < 6) { + level_flags = 1; + } else if (s.level === 6) { + level_flags = 2; + } else { + level_flags = 3; + } + header |= (level_flags << 6); + if (s.strstart !== 0) { header |= PRESET_DICT; } + header += 31 - (header % 31); + + s.status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s.strstart !== 0) { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + strm.adler = 1; // adler32(0L, Z_NULL, 0); + } + } + +//#ifdef GZIP + if (s.status === EXTRA_STATE) { + if (s.gzhead.extra/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + + while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + break; + } + } + put_byte(s, s.gzhead.extra[s.gzindex] & 0xff); + s.gzindex++; + } + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (s.gzindex === s.gzhead.extra.length) { + s.gzindex = 0; + s.status = NAME_STATE; + } + } + else { + s.status = NAME_STATE; + } + } + if (s.status === NAME_STATE) { + if (s.gzhead.name/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.name.length) { + val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.gzindex = 0; + s.status = COMMENT_STATE; + } + } + else { + s.status = COMMENT_STATE; + } + } + if (s.status === COMMENT_STATE) { + if (s.gzhead.comment/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.comment.length) { + val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.status = HCRC_STATE; + } + } + else { + s.status = HCRC_STATE; + } + } + if (s.status === HCRC_STATE) { + if (s.gzhead.hcrc) { + if (s.pending + 2 > s.pending_buf_size) { + flush_pending(strm); + } + if (s.pending + 2 <= s.pending_buf_size) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + strm.adler = 0; //crc32(0L, Z_NULL, 0); + s.status = BUSY_STATE; + } + } + else { + s.status = BUSY_STATE; + } + } +//#endif + + /* Flush as much pending output as possible */ + if (s.pending !== 0) { + flush_pending(strm); + if (strm.avail_out === 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s.last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && + flush !== Z_FINISH) { + return err(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s.status === FINISH_STATE && strm.avail_in !== 0) { + return err(strm, Z_BUF_ERROR); + } + + /* Start a new block or continue the current one. + */ + if (strm.avail_in !== 0 || s.lookahead !== 0 || + (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { + var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) : + (s.strategy === Z_RLE ? deflate_rle(s, flush) : + configuration_table[s.level].func(s, flush)); + + if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { + s.status = FINISH_STATE; + } + if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { + if (strm.avail_out === 0) { + s.last_flush = -1; + /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate === BS_BLOCK_DONE) { + if (flush === Z_PARTIAL_FLUSH) { + trees._tr_align(s); + } + else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + + trees._tr_stored_block(s, 0, 0, false); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush === Z_FULL_FLUSH) { + /*** CLEAR_HASH(s); ***/ /* forget history */ + zero(s.head); // Fill with NIL (= 0); + + if (s.lookahead === 0) { + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + } + } + flush_pending(strm); + if (strm.avail_out === 0) { + s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + //Assert(strm->avail_out > 0, "bug2"); + //if (strm.avail_out <= 0) { throw new Error("bug2");} + + if (flush !== Z_FINISH) { return Z_OK; } + if (s.wrap <= 0) { return Z_STREAM_END; } + + /* Write the trailer */ + if (s.wrap === 2) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + put_byte(s, (strm.adler >> 16) & 0xff); + put_byte(s, (strm.adler >> 24) & 0xff); + put_byte(s, strm.total_in & 0xff); + put_byte(s, (strm.total_in >> 8) & 0xff); + put_byte(s, (strm.total_in >> 16) & 0xff); + put_byte(s, (strm.total_in >> 24) & 0xff); + } + else + { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s.wrap > 0) { s.wrap = -s.wrap; } + /* write the trailer only once! */ + return s.pending !== 0 ? Z_OK : Z_STREAM_END; +} + +function deflateEnd(strm) { + var status; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + status = strm.state.status; + if (status !== INIT_STATE && + status !== EXTRA_STATE && + status !== NAME_STATE && + status !== COMMENT_STATE && + status !== HCRC_STATE && + status !== BUSY_STATE && + status !== FINISH_STATE + ) { + return err(strm, Z_STREAM_ERROR); + } + + strm.state = null; + + return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK; +} + + +/* ========================================================================= + * Initializes the compression dictionary from the given byte + * sequence without producing any compressed output. + */ +function deflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + + var s; + var str, n; + var wrap; + var avail; + var next; + var input; + var tmpDict; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + s = strm.state; + wrap = s.wrap; + + if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { + return Z_STREAM_ERROR; + } + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap === 1) { + /* adler32(strm->adler, dictionary, dictLength); */ + strm.adler = adler32(strm.adler, dictionary, dictLength, 0); + } + + s.wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s.w_size) { + if (wrap === 0) { /* already empty otherwise */ + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + /* use the tail */ + // dictionary = dictionary.slice(dictLength - s.w_size); + tmpDict = new utils.Buf8(s.w_size); + utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0); + dictionary = tmpDict; + dictLength = s.w_size; + } + /* insert dictionary into window and hash */ + avail = strm.avail_in; + next = strm.next_in; + input = strm.input; + strm.avail_in = dictLength; + strm.next_in = 0; + strm.input = dictionary; + fill_window(s); + while (s.lookahead >= MIN_MATCH) { + str = s.strstart; + n = s.lookahead - (MIN_MATCH - 1); + do { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + + s.head[s.ins_h] = str; + str++; + } while (--n); + s.strstart = str; + s.lookahead = MIN_MATCH - 1; + fill_window(s); + } + s.strstart += s.lookahead; + s.block_start = s.strstart; + s.insert = s.lookahead; + s.lookahead = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + strm.next_in = next; + strm.input = input; + strm.avail_in = avail; + s.wrap = wrap; + return Z_OK; +} + + +exports.deflateInit = deflateInit; +exports.deflateInit2 = deflateInit2; +exports.deflateReset = deflateReset; +exports.deflateResetKeep = deflateResetKeep; +exports.deflateSetHeader = deflateSetHeader; +exports.deflate = deflate; +exports.deflateEnd = deflateEnd; +exports.deflateSetDictionary = deflateSetDictionary; +exports.deflateInfo = 'pako deflate (from Nodeca project)'; + +/* Not implemented +exports.deflateBound = deflateBound; +exports.deflateCopy = deflateCopy; +exports.deflateParams = deflateParams; +exports.deflatePending = deflatePending; +exports.deflatePrime = deflatePrime; +exports.deflateTune = deflateTune; +*/ + +},{"../utils/common":24,"./adler32":26,"./crc32":28,"./messages":34,"./trees":35}],30:[function(require,module,exports){ +'use strict'; + + +function GZheader() { + /* true if compressed data believed to be text */ + this.text = 0; + /* modification time */ + this.time = 0; + /* extra flags (not used when writing a gzip file) */ + this.xflags = 0; + /* operating system */ + this.os = 0; + /* pointer to extra field or Z_NULL if none */ + this.extra = null; + /* extra field length (valid if extra != Z_NULL) */ + this.extra_len = 0; // Actually, we don't need it in JS, + // but leave for few code modifications + + // + // Setup limits is not necessary because in js we should not preallocate memory + // for inflate use constant limit in 65536 bytes + // + + /* space at extra (only when reading header) */ + // this.extra_max = 0; + /* pointer to zero-terminated file name or Z_NULL */ + this.name = ''; + /* space at name (only when reading header) */ + // this.name_max = 0; + /* pointer to zero-terminated comment or Z_NULL */ + this.comment = ''; + /* space at comment (only when reading header) */ + // this.comm_max = 0; + /* true if there was or will be a header crc */ + this.hcrc = 0; + /* true when done reading gzip header (not used when writing a gzip file) */ + this.done = false; +} + +module.exports = GZheader; + +},{}],31:[function(require,module,exports){ +'use strict'; + +// See state defs from inflate.js +var BAD = 30; /* got a data error -- remain here until reset */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state.mode === LEN + strm.avail_in >= 6 + strm.avail_out >= 258 + start >= strm.avail_out + state.bits < 8 + + On return, state.mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm.avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm.avail_out >= 258 for each loop to avoid checking for + output space. + */ +module.exports = function inflate_fast(strm, start) { + var state; + var _in; /* local strm.input */ + var last; /* have enough input while in < last */ + var _out; /* local strm.output */ + var beg; /* inflate()'s initial strm.output */ + var end; /* while out < end, enough space available */ +//#ifdef INFLATE_STRICT + var dmax; /* maximum distance from zlib header */ +//#endif + var wsize; /* window size or zero if not using window */ + var whave; /* valid bytes in the window */ + var wnext; /* window write index */ + // Use `s_window` instead `window`, avoid conflict with instrumentation tools + var s_window; /* allocated sliding window, if wsize != 0 */ + var hold; /* local strm.hold */ + var bits; /* local strm.bits */ + var lcode; /* local strm.lencode */ + var dcode; /* local strm.distcode */ + var lmask; /* mask for first level of length codes */ + var dmask; /* mask for first level of distance codes */ + var here; /* retrieved table entry */ + var op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + var len; /* match length, unused bytes */ + var dist; /* match distance */ + var from; /* where to copy match from */ + var from_source; + + + var input, output; // JS specific, because we have no pointers + + /* copy state to local variables */ + state = strm.state; + //here = state.here; + _in = strm.next_in; + input = strm.input; + last = _in + (strm.avail_in - 5); + _out = strm.next_out; + output = strm.output; + beg = _out - (start - strm.avail_out); + end = _out + (strm.avail_out - 257); +//#ifdef INFLATE_STRICT + dmax = state.dmax; +//#endif + wsize = state.wsize; + whave = state.whave; + wnext = state.wnext; + s_window = state.window; + hold = state.hold; + bits = state.bits; + lcode = state.lencode; + dcode = state.distcode; + lmask = (1 << state.lenbits) - 1; + dmask = (1 << state.distbits) - 1; + + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + top: + do { + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + + here = lcode[hold & lmask]; + + dolen: + for (;;) { // Goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + if (op === 0) { /* literal */ + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + output[_out++] = here & 0xffff/*here.val*/; + } + else if (op & 16) { /* length base */ + len = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + len += hold & ((1 << op) - 1); + hold >>>= op; + bits -= op; + } + //Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + here = dcode[hold & dmask]; + + dodist: + for (;;) { // goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + + if (op & 16) { /* distance base */ + dist = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + } + dist += hold & ((1 << op) - 1); +//#ifdef INFLATE_STRICT + if (dist > dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; + } +//#endif + hold >>>= op; + bits -= op; + //Tracevv((stderr, "inflate: distance %u\n", dist)); + op = _out - beg; /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; + } + +// (!) This block is disabled in zlib defailts, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// if (len <= op - whave) { +// do { +// output[_out++] = 0; +// } while (--len); +// continue top; +// } +// len -= op - whave; +// do { +// output[_out++] = 0; +// } while (--op > whave); +// if (op === 0) { +// from = _out - dist; +// do { +// output[_out++] = output[from++]; +// } while (--len); +// continue top; +// } +//#endif + } + from = 0; // window index + from_source = s_window; + if (wnext === 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = 0; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + while (len > 2) { + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + len -= 3; + } + if (len) { + output[_out++] = from_source[from++]; + if (len > 1) { + output[_out++] = from_source[from++]; + } + } + } + else { + from = _out - dist; /* copy direct from output */ + do { /* minimum length is three */ + output[_out++] = output[from++]; + output[_out++] = output[from++]; + output[_out++] = output[from++]; + len -= 3; + } while (len > 2); + if (len) { + output[_out++] = output[from++]; + if (len > 1) { + output[_out++] = output[from++]; + } + } + } + } + else if ((op & 64) === 0) { /* 2nd level distance code */ + here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dodist; + } + else { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" + } + } + else if ((op & 64) === 0) { /* 2nd level length code */ + here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dolen; + } + else if (op & 32) { /* end-of-block */ + //Tracevv((stderr, "inflate: end of block\n")); + state.mode = TYPE; + break top; + } + else { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" + } + } while (_in < last && _out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + _in -= len; + bits -= len << 3; + hold &= (1 << bits) - 1; + + /* update state and return */ + strm.next_in = _in; + strm.next_out = _out; + strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); + strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); + state.hold = hold; + state.bits = bits; + return; +}; + +},{}],32:[function(require,module,exports){ +'use strict'; + + +var utils = require('../utils/common'); +var adler32 = require('./adler32'); +var crc32 = require('./crc32'); +var inflate_fast = require('./inffast'); +var inflate_table = require('./inftrees'); + +var CODES = 0; +var LENS = 1; +var DISTS = 2; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +/* Allowed flush values; see deflate() and inflate() below for details */ +//var Z_NO_FLUSH = 0; +//var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +//var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +var Z_TREES = 6; + + +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; + +/* The deflate compression method */ +var Z_DEFLATED = 8; + + +/* STATES ====================================================================*/ +/* ===========================================================================*/ + + +var HEAD = 1; /* i: waiting for magic header */ +var FLAGS = 2; /* i: waiting for method and flags (gzip) */ +var TIME = 3; /* i: waiting for modification time (gzip) */ +var OS = 4; /* i: waiting for extra flags and operating system (gzip) */ +var EXLEN = 5; /* i: waiting for extra length (gzip) */ +var EXTRA = 6; /* i: waiting for extra bytes (gzip) */ +var NAME = 7; /* i: waiting for end of file name (gzip) */ +var COMMENT = 8; /* i: waiting for end of comment (gzip) */ +var HCRC = 9; /* i: waiting for header crc (gzip) */ +var DICTID = 10; /* i: waiting for dictionary check value */ +var DICT = 11; /* waiting for inflateSetDictionary() call */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ +var TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */ +var STORED = 14; /* i: waiting for stored size (length and complement) */ +var COPY_ = 15; /* i/o: same as COPY below, but only first time in */ +var COPY = 16; /* i/o: waiting for input or output to copy stored block */ +var TABLE = 17; /* i: waiting for dynamic block table lengths */ +var LENLENS = 18; /* i: waiting for code length code lengths */ +var CODELENS = 19; /* i: waiting for length/lit and distance code lengths */ +var LEN_ = 20; /* i: same as LEN below, but only first time in */ +var LEN = 21; /* i: waiting for length/lit/eob code */ +var LENEXT = 22; /* i: waiting for length extra bits */ +var DIST = 23; /* i: waiting for distance code */ +var DISTEXT = 24; /* i: waiting for distance extra bits */ +var MATCH = 25; /* o: waiting for output space to copy string */ +var LIT = 26; /* o: waiting for output space to write literal */ +var CHECK = 27; /* i: waiting for 32-bit check value */ +var LENGTH = 28; /* i: waiting for 32-bit length (gzip) */ +var DONE = 29; /* finished check, done -- remain here until reset */ +var BAD = 30; /* got a data error -- remain here until reset */ +var MEM = 31; /* got an inflate() memory error -- remain here until reset */ +var SYNC = 32; /* looking for synchronization bytes to restart inflate() */ + +/* ===========================================================================*/ + + + +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_WBITS = MAX_WBITS; + + +function zswap32(q) { + return (((q >>> 24) & 0xff) + + ((q >>> 8) & 0xff00) + + ((q & 0xff00) << 8) + + ((q & 0xff) << 24)); +} + + +function InflateState() { + this.mode = 0; /* current inflate mode */ + this.last = false; /* true if processing last block */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.havedict = false; /* true if dictionary provided */ + this.flags = 0; /* gzip header method and flags (0 if zlib) */ + this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ + this.check = 0; /* protected copy of check value */ + this.total = 0; /* protected copy of output count */ + // TODO: may be {} + this.head = null; /* where to save gzip header information */ + + /* sliding window */ + this.wbits = 0; /* log base 2 of requested window size */ + this.wsize = 0; /* window size or zero if not using window */ + this.whave = 0; /* valid bytes in the window */ + this.wnext = 0; /* window write index */ + this.window = null; /* allocated sliding window, if needed */ + + /* bit accumulator */ + this.hold = 0; /* input bit accumulator */ + this.bits = 0; /* number of bits in "in" */ + + /* for string and stored block copying */ + this.length = 0; /* literal or length of data to copy */ + this.offset = 0; /* distance back to copy string from */ + + /* for table and code decoding */ + this.extra = 0; /* extra bits needed */ + + /* fixed and dynamic code tables */ + this.lencode = null; /* starting table for length/literal codes */ + this.distcode = null; /* starting table for distance codes */ + this.lenbits = 0; /* index bits for lencode */ + this.distbits = 0; /* index bits for distcode */ + + /* dynamic table building */ + this.ncode = 0; /* number of code length code lengths */ + this.nlen = 0; /* number of length code lengths */ + this.ndist = 0; /* number of distance code lengths */ + this.have = 0; /* number of code lengths in lens[] */ + this.next = null; /* next available space in codes[] */ + + this.lens = new utils.Buf16(320); /* temporary storage for code lengths */ + this.work = new utils.Buf16(288); /* work area for code table building */ + + /* + because we don't have pointers in js, we use lencode and distcode directly + as buffers so we don't need codes + */ + //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */ + this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ + this.distdyn = null; /* dynamic table for distance codes (JS specific) */ + this.sane = 0; /* if false, allow invalid distance too far */ + this.back = 0; /* bits back of last unprocessed length/lit */ + this.was = 0; /* initial length of match */ +} + +function inflateResetKeep(strm) { + var state; + + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + strm.total_in = strm.total_out = state.total = 0; + strm.msg = ''; /*Z_NULL*/ + if (state.wrap) { /* to support ill-conceived Java test suite */ + strm.adler = state.wrap & 1; + } + state.mode = HEAD; + state.last = 0; + state.havedict = 0; + state.dmax = 32768; + state.head = null/*Z_NULL*/; + state.hold = 0; + state.bits = 0; + //state.lencode = state.distcode = state.next = state.codes; + state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS); + state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS); + + state.sane = 1; + state.back = -1; + //Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} + +function inflateReset(strm) { + var state; + + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + state.wsize = 0; + state.whave = 0; + state.wnext = 0; + return inflateResetKeep(strm); + +} + +function inflateReset2(strm, windowBits) { + var wrap; + var state; + + /* get the state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; + if (windowBits < 48) { + windowBits &= 15; + } + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) { + return Z_STREAM_ERROR; + } + if (state.window !== null && state.wbits !== windowBits) { + state.window = null; + } + + /* update state and reset the rest of it */ + state.wrap = wrap; + state.wbits = windowBits; + return inflateReset(strm); +} + +function inflateInit2(strm, windowBits) { + var ret; + var state; + + if (!strm) { return Z_STREAM_ERROR; } + //strm.msg = Z_NULL; /* in case we return an error */ + + state = new InflateState(); + + //if (state === Z_NULL) return Z_MEM_ERROR; + //Tracev((stderr, "inflate: allocated\n")); + strm.state = state; + state.window = null/*Z_NULL*/; + ret = inflateReset2(strm, windowBits); + if (ret !== Z_OK) { + strm.state = null/*Z_NULL*/; + } + return ret; +} + +function inflateInit(strm) { + return inflateInit2(strm, DEF_WBITS); +} + + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +var virgin = true; + +var lenfix, distfix; // We have no pointers in JS, so keep tables separate + +function fixedtables(state) { + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + var sym; + + lenfix = new utils.Buf32(512); + distfix = new utils.Buf32(32); + + /* literal/length table */ + sym = 0; + while (sym < 144) { state.lens[sym++] = 8; } + while (sym < 256) { state.lens[sym++] = 9; } + while (sym < 280) { state.lens[sym++] = 7; } + while (sym < 288) { state.lens[sym++] = 8; } + + inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); + + /* distance table */ + sym = 0; + while (sym < 32) { state.lens[sym++] = 5; } + + inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); + + /* do this just once */ + virgin = false; + } + + state.lencode = lenfix; + state.lenbits = 9; + state.distcode = distfix; + state.distbits = 5; +} + + +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. + + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +function updatewindow(strm, src, end, copy) { + var dist; + var state = strm.state; + + /* if it hasn't been done already, allocate space for the window */ + if (state.window === null) { + state.wsize = 1 << state.wbits; + state.wnext = 0; + state.whave = 0; + + state.window = new utils.Buf8(state.wsize); + } + + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state.wsize) { + utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0); + state.wnext = 0; + state.whave = state.wsize; + } + else { + dist = state.wsize - state.wnext; + if (dist > copy) { + dist = copy; + } + //zmemcpy(state->window + state->wnext, end - copy, dist); + utils.arraySet(state.window, src, end - copy, dist, state.wnext); + copy -= dist; + if (copy) { + //zmemcpy(state->window, end - copy, copy); + utils.arraySet(state.window, src, end - copy, copy, 0); + state.wnext = copy; + state.whave = state.wsize; + } + else { + state.wnext += dist; + if (state.wnext === state.wsize) { state.wnext = 0; } + if (state.whave < state.wsize) { state.whave += dist; } + } + } + return 0; +} + +function inflate(strm, flush) { + var state; + var input, output; // input/output buffers + var next; /* next input INDEX */ + var put; /* next output INDEX */ + var have, left; /* available input and output */ + var hold; /* bit buffer */ + var bits; /* bits in bit buffer */ + var _in, _out; /* save starting available input and output */ + var copy; /* number of stored or match bytes to copy */ + var from; /* where to copy match bytes from */ + var from_source; + var here = 0; /* current decoding table entry */ + var here_bits, here_op, here_val; // paked "here" denormalized (JS specific) + //var last; /* parent table entry */ + var last_bits, last_op, last_val; // paked "last" denormalized (JS specific) + var len; /* length to copy for repeats, bits to drop */ + var ret; /* return code */ + var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */ + var opts; + + var n; // temporary var for NEED_BITS + + var order = /* permutation of code lengths */ + [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; + + + if (!strm || !strm.state || !strm.output || + (!strm.input && strm.avail_in !== 0)) { + return Z_STREAM_ERROR; + } + + state = strm.state; + if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ + + + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + _in = have; + _out = left; + ret = Z_OK; + + inf_leave: // goto emulation + for (;;) { + switch (state.mode) { + case HEAD: + if (state.wrap === 0) { + state.mode = TYPEDO; + break; + } + //=== NEEDBITS(16); + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ + state.check = 0/*crc32(0L, Z_NULL, 0)*/; + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = FLAGS; + break; + } + state.flags = 0; /* expect zlib header */ + if (state.head) { + state.head.done = false; + } + if (!(state.wrap & 1) || /* check if zlib header allowed */ + (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { + strm.msg = 'incorrect header check'; + state.mode = BAD; + break; + } + if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// + len = (hold & 0x0f)/*BITS(4)*/ + 8; + if (state.wbits === 0) { + state.wbits = len; + } + else if (len > state.wbits) { + strm.msg = 'invalid window size'; + state.mode = BAD; + break; + } + state.dmax = 1 << len; + //Tracev((stderr, "inflate: zlib header ok\n")); + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = hold & 0x200 ? DICTID : TYPE; + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + break; + case FLAGS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.flags = hold; + if ((state.flags & 0xff) !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + if (state.flags & 0xe000) { + strm.msg = 'unknown header flags set'; + state.mode = BAD; + break; + } + if (state.head) { + state.head.text = ((hold >> 8) & 1); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = TIME; + /* falls through */ + case TIME: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.time = hold; + } + if (state.flags & 0x0200) { + //=== CRC4(state.check, hold) + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + hbuf[2] = (hold >>> 16) & 0xff; + hbuf[3] = (hold >>> 24) & 0xff; + state.check = crc32(state.check, hbuf, 4, 0); + //=== + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = OS; + /* falls through */ + case OS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.xflags = (hold & 0xff); + state.head.os = (hold >> 8); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = EXLEN; + /* falls through */ + case EXLEN: + if (state.flags & 0x0400) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length = hold; + if (state.head) { + state.head.extra_len = hold; + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + else if (state.head) { + state.head.extra = null/*Z_NULL*/; + } + state.mode = EXTRA; + /* falls through */ + case EXTRA: + if (state.flags & 0x0400) { + copy = state.length; + if (copy > have) { copy = have; } + if (copy) { + if (state.head) { + len = state.head.extra_len - state.length; + if (!state.head.extra) { + // Use untyped array for more conveniend processing later + state.head.extra = new Array(state.head.extra_len); + } + utils.arraySet( + state.head.extra, + input, + next, + // extra field is limited to 65536 bytes + // - no need for additional size check + copy, + /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ + len + ); + //zmemcpy(state.head.extra + len, next, + // len + copy > state.head.extra_max ? + // state.head.extra_max - len : copy); + } + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + state.length -= copy; + } + if (state.length) { break inf_leave; } + } + state.length = 0; + state.mode = NAME; + /* falls through */ + case NAME: + if (state.flags & 0x0800) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + // TODO: 2 or 1 bytes? + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.name_max*/)) { + state.head.name += String.fromCharCode(len); + } + } while (len && copy < have); + + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.name = null; + } + state.length = 0; + state.mode = COMMENT; + /* falls through */ + case COMMENT: + if (state.flags & 0x1000) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.comm_max*/)) { + state.head.comment += String.fromCharCode(len); + } + } while (len && copy < have); + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.comment = null; + } + state.mode = HCRC; + /* falls through */ + case HCRC: + if (state.flags & 0x0200) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.check & 0xffff)) { + strm.msg = 'header crc mismatch'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + if (state.head) { + state.head.hcrc = ((state.flags >> 9) & 1); + state.head.done = true; + } + strm.adler = state.check = 0; + state.mode = TYPE; + break; + case DICTID: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + strm.adler = state.check = zswap32(hold); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = DICT; + /* falls through */ + case DICT: + if (state.havedict === 0) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + return Z_NEED_DICT; + } + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = TYPE; + /* falls through */ + case TYPE: + if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } + /* falls through */ + case TYPEDO: + if (state.last) { + //--- BYTEBITS() ---// + hold >>>= bits & 7; + bits -= bits & 7; + //---// + state.mode = CHECK; + break; + } + //=== NEEDBITS(3); */ + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.last = (hold & 0x01)/*BITS(1)*/; + //--- DROPBITS(1) ---// + hold >>>= 1; + bits -= 1; + //---// + + switch ((hold & 0x03)/*BITS(2)*/) { + case 0: /* stored block */ + //Tracev((stderr, "inflate: stored block%s\n", + // state.last ? " (last)" : "")); + state.mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + //Tracev((stderr, "inflate: fixed codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = LEN_; /* decode codes */ + if (flush === Z_TREES) { + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break inf_leave; + } + break; + case 2: /* dynamic block */ + //Tracev((stderr, "inflate: dynamic codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = TABLE; + break; + case 3: + strm.msg = 'invalid block type'; + state.mode = BAD; + } + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break; + case STORED: + //--- BYTEBITS() ---// /* go to byte boundary */ + hold >>>= bits & 7; + bits -= bits & 7; + //---// + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { + strm.msg = 'invalid stored block lengths'; + state.mode = BAD; + break; + } + state.length = hold & 0xffff; + //Tracev((stderr, "inflate: stored length %u\n", + // state.length)); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = COPY_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case COPY_: + state.mode = COPY; + /* falls through */ + case COPY: + copy = state.length; + if (copy) { + if (copy > have) { copy = have; } + if (copy > left) { copy = left; } + if (copy === 0) { break inf_leave; } + //--- zmemcpy(put, next, copy); --- + utils.arraySet(output, input, next, copy, put); + //---// + have -= copy; + next += copy; + left -= copy; + put += copy; + state.length -= copy; + break; + } + //Tracev((stderr, "inflate: stored end\n")); + state.mode = TYPE; + break; + case TABLE: + //=== NEEDBITS(14); */ + while (bits < 14) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// +//#ifndef PKZIP_BUG_WORKAROUND + if (state.nlen > 286 || state.ndist > 30) { + strm.msg = 'too many length or distance symbols'; + state.mode = BAD; + break; + } +//#endif + //Tracev((stderr, "inflate: table sizes ok\n")); + state.have = 0; + state.mode = LENLENS; + /* falls through */ + case LENLENS: + while (state.have < state.ncode) { + //=== NEEDBITS(3); + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + while (state.have < 19) { + state.lens[order[state.have++]] = 0; + } + // We have separate tables & no pointers. 2 commented lines below not needed. + //state.next = state.codes; + //state.lencode = state.next; + // Switch to use dynamic table + state.lencode = state.lendyn; + state.lenbits = 7; + + opts = { bits: state.lenbits }; + ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); + state.lenbits = opts.bits; + + if (ret) { + strm.msg = 'invalid code lengths set'; + state.mode = BAD; + break; + } + //Tracev((stderr, "inflate: code lengths ok\n")); + state.have = 0; + state.mode = CODELENS; + /* falls through */ + case CODELENS: + while (state.have < state.nlen + state.ndist) { + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_val < 16) { + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.lens[state.have++] = here_val; + } + else { + if (here_val === 16) { + //=== NEEDBITS(here.bits + 2); + n = here_bits + 2; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + if (state.have === 0) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + len = state.lens[state.have - 1]; + copy = 3 + (hold & 0x03);//BITS(2); + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + } + else if (here_val === 17) { + //=== NEEDBITS(here.bits + 3); + n = here_bits + 3; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 3 + (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + else { + //=== NEEDBITS(here.bits + 7); + n = here_bits + 7; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 11 + (hold & 0x7f);//BITS(7); + //--- DROPBITS(7) ---// + hold >>>= 7; + bits -= 7; + //---// + } + if (state.have + copy > state.nlen + state.ndist) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + while (copy--) { + state.lens[state.have++] = len; + } + } + } + + /* handle error breaks in while */ + if (state.mode === BAD) { break; } + + /* check for end-of-block code (better have one) */ + if (state.lens[256] === 0) { + strm.msg = 'invalid code -- missing end-of-block'; + state.mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state.lenbits = 9; + + opts = { bits: state.lenbits }; + ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.lenbits = opts.bits; + // state.lencode = state.next; + + if (ret) { + strm.msg = 'invalid literal/lengths set'; + state.mode = BAD; + break; + } + + state.distbits = 6; + //state.distcode.copy(state.codes); + // Switch to use dynamic table + state.distcode = state.distdyn; + opts = { bits: state.distbits }; + ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.distbits = opts.bits; + // state.distcode = state.next; + + if (ret) { + strm.msg = 'invalid distances set'; + state.mode = BAD; + break; + } + //Tracev((stderr, 'inflate: codes ok\n')); + state.mode = LEN_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case LEN_: + state.mode = LEN; + /* falls through */ + case LEN: + if (have >= 6 && left >= 258) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + inflate_fast(strm, _out); + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + if (state.mode === TYPE) { + state.back = -1; + } + break; + } + state.back = 0; + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if (here_bits <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_op && (here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.lencode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + state.length = here_val; + if (here_op === 0) { + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + state.mode = LIT; + break; + } + if (here_op & 32) { + //Tracevv((stderr, "inflate: end of block\n")); + state.back = -1; + state.mode = TYPE; + break; + } + if (here_op & 64) { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break; + } + state.extra = here_op & 15; + state.mode = LENEXT; + /* falls through */ + case LENEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } + //Tracevv((stderr, "inflate: length %u\n", state.length)); + state.was = state.length; + state.mode = DIST; + /* falls through */ + case DIST: + for (;;) { + here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if ((here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.distcode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + if (here_op & 64) { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break; + } + state.offset = here_val; + state.extra = (here_op) & 15; + state.mode = DISTEXT; + /* falls through */ + case DISTEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } +//#ifdef INFLATE_STRICT + if (state.offset > state.dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +//#endif + //Tracevv((stderr, "inflate: distance %u\n", state.offset)); + state.mode = MATCH; + /* falls through */ + case MATCH: + if (left === 0) { break inf_leave; } + copy = _out - left; + if (state.offset > copy) { /* copy from window */ + copy = state.offset - copy; + if (copy > state.whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +// (!) This block is disabled in zlib defailts, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// Trace((stderr, "inflate.c too far\n")); +// copy -= state.whave; +// if (copy > state.length) { copy = state.length; } +// if (copy > left) { copy = left; } +// left -= copy; +// state.length -= copy; +// do { +// output[put++] = 0; +// } while (--copy); +// if (state.length === 0) { state.mode = LEN; } +// break; +//#endif + } + if (copy > state.wnext) { + copy -= state.wnext; + from = state.wsize - copy; + } + else { + from = state.wnext - copy; + } + if (copy > state.length) { copy = state.length; } + from_source = state.window; + } + else { /* copy from output */ + from_source = output; + from = put - state.offset; + copy = state.length; + } + if (copy > left) { copy = left; } + left -= copy; + state.length -= copy; + do { + output[put++] = from_source[from++]; + } while (--copy); + if (state.length === 0) { state.mode = LEN; } + break; + case LIT: + if (left === 0) { break inf_leave; } + output[put++] = state.length; + left--; + state.mode = LEN; + break; + case CHECK: + if (state.wrap) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + // Use '|' insdead of '+' to make sure that result is signed + hold |= input[next++] << bits; + bits += 8; + } + //===// + _out -= left; + strm.total_out += _out; + state.total += _out; + if (_out) { + strm.adler = state.check = + /*UPDATE(state.check, put - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out)); + + } + _out = left; + // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too + if ((state.flags ? hold : zswap32(hold)) !== state.check) { + strm.msg = 'incorrect data check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: check matches trailer\n")); + } + state.mode = LENGTH; + /* falls through */ + case LENGTH: + if (state.wrap && state.flags) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.total & 0xffffffff)) { + strm.msg = 'incorrect length check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: length matches trailer\n")); + } + state.mode = DONE; + /* falls through */ + case DONE: + ret = Z_STREAM_END; + break inf_leave; + case BAD: + ret = Z_DATA_ERROR; + break inf_leave; + case MEM: + return Z_MEM_ERROR; + case SYNC: + /* falls through */ + default: + return Z_STREAM_ERROR; + } + } + + // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" + + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ + + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + + if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && + (state.mode < CHECK || flush !== Z_FINISH))) { + if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) { + state.mode = MEM; + return Z_MEM_ERROR; + } + } + _in -= strm.avail_in; + _out -= strm.avail_out; + strm.total_in += _in; + strm.total_out += _out; + state.total += _out; + if (state.wrap && _out) { + strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out)); + } + strm.data_type = state.bits + (state.last ? 64 : 0) + + (state.mode === TYPE ? 128 : 0) + + (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); + if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) { + ret = Z_BUF_ERROR; + } + return ret; +} + +function inflateEnd(strm) { + + if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) { + return Z_STREAM_ERROR; + } + + var state = strm.state; + if (state.window) { + state.window = null; + } + strm.state = null; + return Z_OK; +} + +function inflateGetHeader(strm, head) { + var state; + + /* check state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; } + + /* save header structure */ + state.head = head; + head.done = false; + return Z_OK; +} + +function inflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + + var state; + var dictid; + var ret; + + /* check state */ + if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; } + state = strm.state; + + if (state.wrap !== 0 && state.mode !== DICT) { + return Z_STREAM_ERROR; + } + + /* check for correct dictionary identifier */ + if (state.mode === DICT) { + dictid = 1; /* adler32(0, null, 0)*/ + /* dictid = adler32(dictid, dictionary, dictLength); */ + dictid = adler32(dictid, dictionary, dictLength, 0); + if (dictid !== state.check) { + return Z_DATA_ERROR; + } + } + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary, dictLength, dictLength); + if (ret) { + state.mode = MEM; + return Z_MEM_ERROR; + } + state.havedict = 1; + // Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK; +} + +exports.inflateReset = inflateReset; +exports.inflateReset2 = inflateReset2; +exports.inflateResetKeep = inflateResetKeep; +exports.inflateInit = inflateInit; +exports.inflateInit2 = inflateInit2; +exports.inflate = inflate; +exports.inflateEnd = inflateEnd; +exports.inflateGetHeader = inflateGetHeader; +exports.inflateSetDictionary = inflateSetDictionary; +exports.inflateInfo = 'pako inflate (from Nodeca project)'; + +/* Not implemented +exports.inflateCopy = inflateCopy; +exports.inflateGetDictionary = inflateGetDictionary; +exports.inflateMark = inflateMark; +exports.inflatePrime = inflatePrime; +exports.inflateSync = inflateSync; +exports.inflateSyncPoint = inflateSyncPoint; +exports.inflateUndermine = inflateUndermine; +*/ + +},{"../utils/common":24,"./adler32":26,"./crc32":28,"./inffast":31,"./inftrees":33}],33:[function(require,module,exports){ +'use strict'; + + +var utils = require('../utils/common'); + +var MAXBITS = 15; +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +var CODES = 0; +var LENS = 1; +var DISTS = 2; + +var lbase = [ /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 +]; + +var lext = [ /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 +]; + +var dbase = [ /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0 +]; + +var dext = [ /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64 +]; + +module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts) +{ + var bits = opts.bits; + //here = opts.here; /* table entry for duplication */ + + var len = 0; /* a code's length in bits */ + var sym = 0; /* index of code symbols */ + var min = 0, max = 0; /* minimum and maximum code lengths */ + var root = 0; /* number of index bits for root table */ + var curr = 0; /* number of index bits for current table */ + var drop = 0; /* code bits to drop for sub-table */ + var left = 0; /* number of prefix codes available */ + var used = 0; /* code entries in table used */ + var huff = 0; /* Huffman code */ + var incr; /* for incrementing code, index */ + var fill; /* index for replicating entries */ + var low; /* low bits for current root entry */ + var mask; /* mask for low root bits */ + var next; /* next available space in table */ + var base = null; /* base value table to use */ + var base_index = 0; +// var shoextra; /* extra bits table to use */ + var end; /* use base and extra for symbol > end */ + var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ + var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ + var extra = null; + var extra_index = 0; + + var here_bits, here_op, here_val; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) { + count[len] = 0; + } + for (sym = 0; sym < codes; sym++) { + count[lens[lens_index + sym]]++; + } + + /* bound code lengths, force root to be within code lengths */ + root = bits; + for (max = MAXBITS; max >= 1; max--) { + if (count[max] !== 0) { break; } + } + if (root > max) { + root = max; + } + if (max === 0) { /* no symbols to code at all */ + //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ + //table.bits[opts.table_index] = 1; //here.bits = (var char)1; + //table.val[opts.table_index++] = 0; //here.val = (var short)0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + + //table.op[opts.table_index] = 64; + //table.bits[opts.table_index] = 1; + //table.val[opts.table_index++] = 0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + opts.bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) { + if (count[min] !== 0) { break; } + } + if (root < min) { + root = min; + } + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) { + return -1; + } /* over-subscribed */ + } + if (left > 0 && (type === CODES || max !== 1)) { + return -1; /* incomplete set */ + } + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) { + offs[len + 1] = offs[len] + count[len]; + } + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) { + if (lens[lens_index + sym] !== 0) { + work[offs[lens[lens_index + sym]]++] = sym; + } + } + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + // poor man optimization - use if-else instead of switch, + // to avoid deopts in old v8 + if (type === CODES) { + base = extra = work; /* dummy value--not used */ + end = 19; + + } else if (type === LENS) { + base = lbase; + base_index -= 257; + extra = lext; + extra_index -= 257; + end = 256; + + } else { /* DISTS */ + base = dbase; + extra = dext; + end = -1; + } + + /* initialize opts for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = table_index; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = -1; /* trigger new sub-table when len > root */ + used = 1 << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; + } + + var i = 0; + /* process all codes and make table entries */ + for (;;) { + i++; + /* create table entry */ + here_bits = len - drop; + if (work[sym] < end) { + here_op = 0; + here_val = work[sym]; + } + else if (work[sym] > end) { + here_op = extra[extra_index + work[sym]]; + here_val = base[base_index + work[sym]]; + } + else { + here_op = 32 + 64; /* end of block */ + here_val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1 << (len - drop); + fill = 1 << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; + } while (fill !== 0); + + /* backwards increment the len-bit code huff */ + incr = 1 << (len - 1); + while (huff & incr) { + incr >>= 1; + } + if (incr !== 0) { + huff &= incr - 1; + huff += incr; + } else { + huff = 0; + } + + /* go to next symbol, update count, len */ + sym++; + if (--count[len] === 0) { + if (len === max) { break; } + len = lens[lens_index + work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) !== low) { + /* if first time, transition to sub-tables */ + if (drop === 0) { + drop = root; + } + + /* increment past last table */ + next += min; /* here min is 1 << curr */ + + /* determine length of next table */ + curr = len - drop; + left = 1 << curr; + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) { break; } + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1 << curr; + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; + } + + /* point entry in root table to sub-table */ + low = huff & mask; + /*table.op[low] = curr; + table.bits[low] = root; + table.val[low] = next - opts.table_index;*/ + table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; + } + } + + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff !== 0) { + //table.op[next + huff] = 64; /* invalid code marker */ + //table.bits[next + huff] = len - drop; + //table.val[next + huff] = 0; + table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; + } + + /* set return parameters */ + //opts.table_index += used; + opts.bits = root; + return 0; +}; + +},{"../utils/common":24}],34:[function(require,module,exports){ +'use strict'; + +module.exports = { + 2: 'need dictionary', /* Z_NEED_DICT 2 */ + 1: 'stream end', /* Z_STREAM_END 1 */ + 0: '', /* Z_OK 0 */ + '-1': 'file error', /* Z_ERRNO (-1) */ + '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ + '-3': 'data error', /* Z_DATA_ERROR (-3) */ + '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ + '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ + '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ +}; + +},{}],35:[function(require,module,exports){ +'use strict'; + + +var utils = require('../utils/common'); + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +//var Z_FILTERED = 1; +//var Z_HUFFMAN_ONLY = 2; +//var Z_RLE = 3; +var Z_FIXED = 4; +//var Z_DEFAULT_STRATEGY = 0; + +/* Possible values of the data_type field (though see inflate()) */ +var Z_BINARY = 0; +var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; + +/*============================================================================*/ + + +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } + +// From zutil.h + +var STORED_BLOCK = 0; +var STATIC_TREES = 1; +var DYN_TREES = 2; +/* The three kinds of block type */ + +var MIN_MATCH = 3; +var MAX_MATCH = 258; +/* The minimum and maximum match lengths */ + +// From deflate.h +/* =========================================================================== + * Internal compression state. + */ + +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ + +var LITERALS = 256; +/* number of literal bytes 0..255 */ + +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ + +var D_CODES = 30; +/* number of distance codes */ + +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ + +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ + +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +var Buf_size = 16; +/* size of bit buffer in bi_buf */ + + +/* =========================================================================== + * Constants + */ + +var MAX_BL_BITS = 7; +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +var END_BLOCK = 256; +/* end of block literal code */ + +var REP_3_6 = 16; +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +var REPZ_3_10 = 17; +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +var REPZ_11_138 = 18; +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +/* eslint-disable comma-spacing,array-bracket-spacing */ +var extra_lbits = /* extra bits for each length code */ + [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]; + +var extra_dbits = /* extra bits for each distance code */ + [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]; + +var extra_blbits = /* extra bits for each bit length code */ + [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]; + +var bl_order = + [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]; +/* eslint-enable comma-spacing,array-bracket-spacing */ + +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +// We pre-fill arrays with 0 to avoid uninitialized gaps + +var DIST_CODE_LEN = 512; /* see definition of array dist_code below */ + +// !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1 +var static_ltree = new Array((L_CODES + 2) * 2); +zero(static_ltree); +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +var static_dtree = new Array(D_CODES * 2); +zero(static_dtree); +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +var _dist_code = new Array(DIST_CODE_LEN); +zero(_dist_code); +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1); +zero(_length_code); +/* length code for each normalized match length (0 == MIN_MATCH) */ + +var base_length = new Array(LENGTH_CODES); +zero(base_length); +/* First normalized length for each code (0 = MIN_MATCH) */ + +var base_dist = new Array(D_CODES); +zero(base_dist); +/* First normalized distance for each code (0 = distance of 1) */ + + +function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { + + this.static_tree = static_tree; /* static tree or NULL */ + this.extra_bits = extra_bits; /* extra bits for each code or NULL */ + this.extra_base = extra_base; /* base index for extra_bits */ + this.elems = elems; /* max number of elements in the tree */ + this.max_length = max_length; /* max bit length for the codes */ + + // show if `static_tree` has data or dummy - needed for monomorphic objects + this.has_stree = static_tree && static_tree.length; +} + + +var static_l_desc; +var static_d_desc; +var static_bl_desc; + + +function TreeDesc(dyn_tree, stat_desc) { + this.dyn_tree = dyn_tree; /* the dynamic tree */ + this.max_code = 0; /* largest code with non zero frequency */ + this.stat_desc = stat_desc; /* the corresponding static tree */ +} + + + +function d_code(dist) { + return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; +} + + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +function put_short(s, w) { +// put_byte(s, (uch)((w) & 0xff)); +// put_byte(s, (uch)((ush)(w) >> 8)); + s.pending_buf[s.pending++] = (w) & 0xff; + s.pending_buf[s.pending++] = (w >>> 8) & 0xff; +} + + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +function send_bits(s, value, length) { + if (s.bi_valid > (Buf_size - length)) { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + put_short(s, s.bi_buf); + s.bi_buf = value >> (Buf_size - s.bi_valid); + s.bi_valid += length - Buf_size; + } else { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + s.bi_valid += length; + } +} + + +function send_code(s, c, tree) { + send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); +} + + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +function bi_reverse(code, len) { + var res = 0; + do { + res |= code & 1; + code >>>= 1; + res <<= 1; + } while (--len > 0); + return res >>> 1; +} + + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +function bi_flush(s) { + if (s.bi_valid === 16) { + put_short(s, s.bi_buf); + s.bi_buf = 0; + s.bi_valid = 0; + + } else if (s.bi_valid >= 8) { + s.pending_buf[s.pending++] = s.bi_buf & 0xff; + s.bi_buf >>= 8; + s.bi_valid -= 8; + } +} + + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +function gen_bitlen(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var max_code = desc.max_code; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var extra = desc.stat_desc.extra_bits; + var base = desc.stat_desc.extra_base; + var max_length = desc.stat_desc.max_length; + var h; /* heap index */ + var n, m; /* iterate over the tree elements */ + var bits; /* bit length */ + var xbits; /* extra bits */ + var f; /* frequency */ + var overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) { + s.bl_count[bits] = 0; + } + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ + + for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; + if (bits > max_length) { + bits = max_length; + overflow++; + } + tree[n * 2 + 1]/*.Len*/ = bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) { continue; } /* not a leaf node */ + + s.bl_count[bits]++; + xbits = 0; + if (n >= base) { + xbits = extra[n - base]; + } + f = tree[n * 2]/*.Freq*/; + s.opt_len += f * (bits + xbits); + if (has_stree) { + s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); + } + } + if (overflow === 0) { return; } + + // Trace((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length - 1; + while (s.bl_count[bits] === 0) { bits--; } + s.bl_count[bits]--; /* move one leaf down the tree */ + s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ + s.bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits !== 0; bits--) { + n = s.bl_count[bits]; + while (n !== 0) { + m = s.heap[--h]; + if (m > max_code) { continue; } + if (tree[m * 2 + 1]/*.Len*/ !== bits) { + // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; + tree[m * 2 + 1]/*.Len*/ = bits; + } + n--; + } + } +} + + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +function gen_codes(tree, max_code, bl_count) +// ct_data *tree; /* the tree to decorate */ +// int max_code; /* largest code with non zero frequency */ +// ushf *bl_count; /* number of codes at each bit length */ +{ + var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */ + var code = 0; /* running code value */ + var bits; /* bit index */ + var n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + bl_count[bits - 1]) << 1; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES - 1; code++) { + base_length[code] = length; + for (n = 0; n < (1 << extra_lbits[code]); n++) { + _length_code[length++] = code; + } + } + //Assert (length == 256, "tr_static_init: length != 256"); + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + _length_code[length - 1] = code; + + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1 << extra_dbits[code]); n++) { + _dist_code[dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: dist != 256"); + dist >>= 7; /* from now on, all distances are divided by 128 */ + for (; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { + _dist_code[256 + dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) { + bl_count[bits] = 0; + } + + n = 0; + while (n <= 143) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + while (n <= 255) { + static_ltree[n * 2 + 1]/*.Len*/ = 9; + n++; + bl_count[9]++; + } + while (n <= 279) { + static_ltree[n * 2 + 1]/*.Len*/ = 7; + n++; + bl_count[7]++; + } + while (n <= 287) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes(static_ltree, L_CODES + 1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n * 2 + 1]/*.Len*/ = 5; + static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); + } + + // Now data ready and we can init static trees + static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS); + static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS); + static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS); + + //static_init_done = true; +} + + +/* =========================================================================== + * Initialize a new block. + */ +function init_block(s) { + var n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } + + s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; + s.opt_len = s.static_len = 0; + s.last_lit = s.matches = 0; +} + + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +function bi_windup(s) +{ + if (s.bi_valid > 8) { + put_short(s, s.bi_buf); + } else if (s.bi_valid > 0) { + //put_byte(s, (Byte)s->bi_buf); + s.pending_buf[s.pending++] = s.bi_buf; + } + s.bi_buf = 0; + s.bi_valid = 0; +} + +/* =========================================================================== + * Copy a stored block, storing first the length and its + * one's complement if requested. + */ +function copy_block(s, buf, len, header) +//DeflateState *s; +//charf *buf; /* the input data */ +//unsigned len; /* its length */ +//int header; /* true if block header must be written */ +{ + bi_windup(s); /* align on byte boundary */ + + if (header) { + put_short(s, len); + put_short(s, ~len); + } +// while (len--) { +// put_byte(s, *buf++); +// } + utils.arraySet(s.pending_buf, s.window, buf, len, s.pending); + s.pending += len; +} + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +function smaller(tree, n, m, depth) { + var _n2 = n * 2; + var _m2 = m * 2; + return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || + (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); +} + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +function pqdownheap(s, tree, k) +// deflate_state *s; +// ct_data *tree; /* the tree to restore */ +// int k; /* node to move down */ +{ + var v = s.heap[k]; + var j = k << 1; /* left son of k */ + while (j <= s.heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s.heap_len && + smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s.heap[j], s.depth)) { break; } + + /* Exchange v with the smallest son */ + s.heap[k] = s.heap[j]; + k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s.heap[k] = v; +} + + +// inlined manually +// var SMALLEST = 1; + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +function compress_block(s, ltree, dtree) +// deflate_state *s; +// const ct_data *ltree; /* literal tree */ +// const ct_data *dtree; /* distance tree */ +{ + var dist; /* distance of matched string */ + var lc; /* match length or unmatched char (if dist == 0) */ + var lx = 0; /* running index in l_buf */ + var code; /* the code to send */ + var extra; /* number of extra bits to send */ + + if (s.last_lit !== 0) { + do { + dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]); + lc = s.pending_buf[s.l_buf + lx]; + lx++; + + if (dist === 0) { + send_code(s, lc, ltree); /* send a literal byte */ + //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code + LITERALS + 1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra !== 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + //Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra !== 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, + // "pendingBuf overflow"); + + } while (lx < s.last_lit); + } + + send_code(s, END_BLOCK, ltree); +} + + +/* =========================================================================== + * Construct one Huffman tree and assigns the code bit strings and lengths. + * Update the total bit length for the current block. + * IN assertion: the field freq is set for all tree elements. + * OUT assertions: the fields len and code are set to the optimal bit length + * and corresponding code. The length opt_len is updated; static_len is + * also updated if stree is not null. The field max_code is set. + */ +function build_tree(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var elems = desc.stat_desc.elems; + var n, m; /* iterate over heap elements */ + var max_code = -1; /* largest code with non zero frequency */ + var node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s.heap_len = 0; + s.heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n * 2]/*.Freq*/ !== 0) { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; + + } else { + tree[n * 2 + 1]/*.Len*/ = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s.heap_len < 2) { + node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); + tree[node * 2]/*.Freq*/ = 1; + s.depth[node] = 0; + s.opt_len--; + + if (has_stree) { + s.static_len -= stree[node * 2 + 1]/*.Len*/; + } + /* node is 0 or 1 so it does not have extra bits */ + } + desc.max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + //pqremove(s, tree, n); /* n = node of least frequency */ + /*** pqremove ***/ + n = s.heap[1/*SMALLEST*/]; + s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; + pqdownheap(s, tree, 1/*SMALLEST*/); + /***/ + + m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ + + s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ + s.heap[--s.heap_max] = m; + + /* Create a new node father of n and m */ + tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; + s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; + tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; + + /* and insert the new node in the heap */ + s.heap[1/*SMALLEST*/] = node++; + pqdownheap(s, tree, 1/*SMALLEST*/); + + } while (s.heap_len >= 2); + + s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes(tree, max_code, s.bl_count); +} + + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +function scan_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ + + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + s.bl_tree[curlen * 2]/*.Freq*/ += count; + + } else if (curlen !== 0) { + + if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } + s.bl_tree[REP_3_6 * 2]/*.Freq*/++; + + } else if (count <= 10) { + s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; + + } else { + s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; + } + + count = 0; + prevlen = curlen; + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +} + + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +function send_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ + + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); + + } else if (curlen !== 0) { + if (curlen !== prevlen) { + send_code(s, curlen, s.bl_tree); + count--; + } + //Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s.bl_tree); + send_bits(s, count - 3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s.bl_tree); + send_bits(s, count - 3, 3); + + } else { + send_code(s, REPZ_11_138, s.bl_tree); + send_bits(s, count - 11, 7); + } + + count = 0; + prevlen = curlen; + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +} + + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +function build_bl_tree(s) { + var max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, s.dyn_ltree, s.l_desc.max_code); + scan_tree(s, s.dyn_dtree, s.d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, s.bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { + if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { + break; + } + } + /* Update opt_len to include the bit length tree and counts */ + s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + // s->opt_len, s->static_len)); + + return max_blindex; +} + + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +function send_all_trees(s, lcodes, dcodes, blcodes) +// deflate_state *s; +// int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + var rank; /* index in bl_order */ + + //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + // "too many codes"); + //Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes - 1, 5); + send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); + } + //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ + //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ + //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} + + +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +function detect_data_type(s) { + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + var black_mask = 0xf3ffc07f; + var n; + + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>>= 1) { + if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + return Z_BINARY; + } + } + + /* Check for textual ("white-listed") bytes. */ + if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || + s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + for (n = 32; n < LITERALS; n++) { + if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + } + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +} + + +var static_init_done = false; + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +function _tr_init(s) +{ + + if (!static_init_done) { + tr_static_init(); + static_init_done = true; + } + + s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); + s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); + s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); + + s.bi_buf = 0; + s.bi_valid = 0; + + /* Initialize the first block of the first file: */ + init_block(s); +} + + +/* =========================================================================== + * Send a stored block + */ +function _tr_stored_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ + copy_block(s, buf, stored_len, true); /* with header */ +} + + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +function _tr_align(s) { + send_bits(s, STATIC_TREES << 1, 3); + send_code(s, END_BLOCK, static_ltree); + bi_flush(s); +} + + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. + */ +function _tr_flush_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block, or NULL if too old */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + var opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + var max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s.level > 0) { + + /* Check if the file is binary or text */ + if (s.strm.data_type === Z_UNKNOWN) { + s.strm.data_type = detect_data_type(s); + } + + /* Construct the literal and distance trees */ + build_tree(s, s.l_desc); + // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + + build_tree(s, s.d_desc); + // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s.opt_len + 3 + 7) >>> 3; + static_lenb = (s.static_len + 3 + 7) >>> 3; + + // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + // s->last_lit)); + + if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } + + } else { + // Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + + if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { + /* 4: two words for the lengths */ + + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, last); + + } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { + + send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); + compress_block(s, static_ltree, static_dtree); + + } else { + send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); + send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1); + compress_block(s, s.dyn_ltree, s.dyn_dtree); + } + // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); + + if (last) { + bi_windup(s); + } + // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + // s->compressed_len-7*last)); +} + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +function _tr_tally(s, dist, lc) +// deflate_state *s; +// unsigned dist; /* distance of matched string */ +// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + //var out_length, in_length, dcode; + + s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; + s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; + + s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff; + s.last_lit++; + + if (dist === 0) { + /* lc is the unmatched char */ + s.dyn_ltree[lc * 2]/*.Freq*/++; + } else { + s.matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + //Assert((ush)dist < (ush)MAX_DIST(s) && + // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++; + s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; + } + +// (!) This block is disabled in zlib defailts, +// don't enable it for binary compatibility + +//#ifdef TRUNCATE_BLOCK +// /* Try to guess if it is profitable to stop the current block here */ +// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { +// /* Compute an upper bound for the compressed length */ +// out_length = s.last_lit*8; +// in_length = s.strstart - s.block_start; +// +// for (dcode = 0; dcode < D_CODES; dcode++) { +// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); +// } +// out_length >>>= 3; +// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +// // s->last_lit, in_length, out_length, +// // 100L - out_length*100L/in_length)); +// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { +// return true; +// } +// } +//#endif + + return (s.last_lit === s.lit_bufsize - 1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +exports._tr_init = _tr_init; +exports._tr_stored_block = _tr_stored_block; +exports._tr_flush_block = _tr_flush_block; +exports._tr_tally = _tr_tally; +exports._tr_align = _tr_align; + +},{"../utils/common":24}],36:[function(require,module,exports){ +'use strict'; + + +function ZStream() { + /* next input byte */ + this.input = null; // JS specific, because we have no pointers + this.next_in = 0; + /* number of bytes available at input */ + this.avail_in = 0; + /* total number of input bytes read so far */ + this.total_in = 0; + /* next output byte should be put there */ + this.output = null; // JS specific, because we have no pointers + this.next_out = 0; + /* remaining free space at output */ + this.avail_out = 0; + /* total number of bytes output so far */ + this.total_out = 0; + /* last error message, NULL if no error */ + this.msg = ''/*Z_NULL*/; + /* not visible by applications */ + this.state = null; + /* best guess about the data type: binary or text */ + this.data_type = 2/*Z_UNKNOWN*/; + /* adler32 value of the uncompressed data */ + this.adler = 0; +} + +module.exports = ZStream; + +},{}],37:[function(require,module,exports){ +// shim for using process in browser +var process = module.exports = {}; + +// cached from whatever global is present so that test runners that stub it +// don't break things. But we need to wrap it in a try catch in case it is +// wrapped in strict mode code which doesn't define any globals. It's inside a +// function because try/catches deoptimize in certain engines. + +var cachedSetTimeout; +var cachedClearTimeout; + +function defaultSetTimout() { + throw new Error('setTimeout has not been defined'); +} +function defaultClearTimeout () { + throw new Error('clearTimeout has not been defined'); +} +(function () { + try { + if (typeof setTimeout === 'function') { + cachedSetTimeout = setTimeout; + } else { + cachedSetTimeout = defaultSetTimout; + } + } catch (e) { + cachedSetTimeout = defaultSetTimout; + } + try { + if (typeof clearTimeout === 'function') { + cachedClearTimeout = clearTimeout; + } else { + cachedClearTimeout = defaultClearTimeout; + } + } catch (e) { + cachedClearTimeout = defaultClearTimeout; + } +} ()) +function runTimeout(fun) { + if (cachedSetTimeout === setTimeout) { + //normal enviroments in sane situations + return setTimeout(fun, 0); + } + // if setTimeout wasn't available but was latter defined + if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) { + cachedSetTimeout = setTimeout; + return setTimeout(fun, 0); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedSetTimeout(fun, 0); + } catch(e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedSetTimeout.call(null, fun, 0); + } catch(e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error + return cachedSetTimeout.call(this, fun, 0); + } + } + + +} +function runClearTimeout(marker) { + if (cachedClearTimeout === clearTimeout) { + //normal enviroments in sane situations + return clearTimeout(marker); + } + // if clearTimeout wasn't available but was latter defined + if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) { + cachedClearTimeout = clearTimeout; + return clearTimeout(marker); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedClearTimeout(marker); + } catch (e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedClearTimeout.call(null, marker); + } catch (e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error. + // Some versions of I.E. have different rules for clearTimeout vs setTimeout + return cachedClearTimeout.call(this, marker); + } + } + + + +} +var queue = []; +var draining = false; +var currentQueue; +var queueIndex = -1; + +function cleanUpNextTick() { + if (!draining || !currentQueue) { + return; + } + draining = false; + if (currentQueue.length) { + queue = currentQueue.concat(queue); + } else { + queueIndex = -1; + } + if (queue.length) { + drainQueue(); + } +} + +function drainQueue() { + if (draining) { + return; + } + var timeout = runTimeout(cleanUpNextTick); + draining = true; + + var len = queue.length; + while(len) { + currentQueue = queue; + queue = []; + while (++queueIndex < len) { + if (currentQueue) { + currentQueue[queueIndex].run(); + } + } + queueIndex = -1; + len = queue.length; + } + currentQueue = null; + draining = false; + runClearTimeout(timeout); +} + +process.nextTick = function (fun) { + var args = new Array(arguments.length - 1); + if (arguments.length > 1) { + for (var i = 1; i < arguments.length; i++) { + args[i - 1] = arguments[i]; + } + } + queue.push(new Item(fun, args)); + if (queue.length === 1 && !draining) { + runTimeout(drainQueue); + } +}; + +// v8 likes predictible objects +function Item(fun, array) { + this.fun = fun; + this.array = array; +} +Item.prototype.run = function () { + this.fun.apply(null, this.array); +}; +process.title = 'browser'; +process.browser = true; +process.env = {}; +process.argv = []; +process.version = ''; // empty string to avoid regexp issues +process.versions = {}; + +function noop() {} + +process.on = noop; +process.addListener = noop; +process.once = noop; +process.off = noop; +process.removeListener = noop; +process.removeAllListeners = noop; +process.emit = noop; + +process.binding = function (name) { + throw new Error('process.binding is not supported'); +}; + +process.cwd = function () { return '/' }; +process.chdir = function (dir) { + throw new Error('process.chdir is not supported'); +}; +process.umask = function() { return 0; }; + +},{}],38:[function(require,module,exports){ +(function (Buffer){ +/** + * Convert a typed array to a Buffer without a copy + * + * Author: Feross Aboukhadijeh + * License: MIT + * + * `npm install typedarray-to-buffer` + */ + +module.exports = function (arr) { + if (typeof Buffer._augment === 'function' && Buffer.TYPED_ARRAY_SUPPORT) { + // If `Buffer` is from the `buffer` module and this browser supports typed arrays, + // then augment it with all the `Buffer` methods. + return Buffer._augment(arr) + } else { + // Otherwise, fallback to creating a `Buffer` with a copy. + return new Buffer(arr) + } +} + +}).call(this,require("buffer").Buffer) +},{"buffer":6}],39:[function(require,module,exports){ +if (typeof Object.create === 'function') { + // implementation from standard node.js 'util' module + module.exports = function inherits(ctor, superCtor) { + ctor.super_ = superCtor + ctor.prototype = Object.create(superCtor.prototype, { + constructor: { + value: ctor, + enumerable: false, + writable: true, + configurable: true + } + }); + }; +} else { + // old school shim for old browsers + module.exports = function inherits(ctor, superCtor) { + ctor.super_ = superCtor + var TempCtor = function () {} + TempCtor.prototype = superCtor.prototype + ctor.prototype = new TempCtor() + ctor.prototype.constructor = ctor + } +} + +},{}],40:[function(require,module,exports){ +module.exports = function isBuffer(arg) { + return arg && typeof arg === 'object' + && typeof arg.copy === 'function' + && typeof arg.fill === 'function' + && typeof arg.readUInt8 === 'function'; +} +},{}],41:[function(require,module,exports){ +(function (process,global){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +var formatRegExp = /%[sdj%]/g; +exports.format = function(f) { + if (!isString(f)) { + var objects = []; + for (var i = 0; i < arguments.length; i++) { + objects.push(inspect(arguments[i])); + } + return objects.join(' '); + } + + var i = 1; + var args = arguments; + var len = args.length; + var str = String(f).replace(formatRegExp, function(x) { + if (x === '%%') return '%'; + if (i >= len) return x; + switch (x) { + case '%s': return String(args[i++]); + case '%d': return Number(args[i++]); + case '%j': + try { + return JSON.stringify(args[i++]); + } catch (_) { + return '[Circular]'; + } + default: + return x; + } + }); + for (var x = args[i]; i < len; x = args[++i]) { + if (isNull(x) || !isObject(x)) { + str += ' ' + x; + } else { + str += ' ' + inspect(x); + } + } + return str; +}; + + +// Mark that a method should not be used. +// Returns a modified function which warns once by default. +// If --no-deprecation is set, then it is a no-op. +exports.deprecate = function(fn, msg) { + // Allow for deprecating things in the process of starting up. + if (isUndefined(global.process)) { + return function() { + return exports.deprecate(fn, msg).apply(this, arguments); + }; + } + + if (process.noDeprecation === true) { + return fn; + } + + var warned = false; + function deprecated() { + if (!warned) { + if (process.throwDeprecation) { + throw new Error(msg); + } else if (process.traceDeprecation) { + console.trace(msg); + } else { + console.error(msg); + } + warned = true; + } + return fn.apply(this, arguments); + } + + return deprecated; +}; + + +var debugs = {}; +var debugEnviron; +exports.debuglog = function(set) { + if (isUndefined(debugEnviron)) + debugEnviron = process.env.NODE_DEBUG || ''; + set = set.toUpperCase(); + if (!debugs[set]) { + if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { + var pid = process.pid; + debugs[set] = function() { + var msg = exports.format.apply(exports, arguments); + console.error('%s %d: %s', set, pid, msg); + }; + } else { + debugs[set] = function() {}; + } + } + return debugs[set]; +}; + + +/** + * Echos the value of a value. Trys to print the value out + * in the best way possible given the different types. + * + * @param {Object} obj The object to print out. + * @param {Object} opts Optional options object that alters the output. + */ +/* legacy: obj, showHidden, depth, colors*/ +function inspect(obj, opts) { + // default options + var ctx = { + seen: [], + stylize: stylizeNoColor + }; + // legacy... + if (arguments.length >= 3) ctx.depth = arguments[2]; + if (arguments.length >= 4) ctx.colors = arguments[3]; + if (isBoolean(opts)) { + // legacy... + ctx.showHidden = opts; + } else if (opts) { + // got an "options" object + exports._extend(ctx, opts); + } + // set default options + if (isUndefined(ctx.showHidden)) ctx.showHidden = false; + if (isUndefined(ctx.depth)) ctx.depth = 2; + if (isUndefined(ctx.colors)) ctx.colors = false; + if (isUndefined(ctx.customInspect)) ctx.customInspect = true; + if (ctx.colors) ctx.stylize = stylizeWithColor; + return formatValue(ctx, obj, ctx.depth); +} +exports.inspect = inspect; + + +// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics +inspect.colors = { + 'bold' : [1, 22], + 'italic' : [3, 23], + 'underline' : [4, 24], + 'inverse' : [7, 27], + 'white' : [37, 39], + 'grey' : [90, 39], + 'black' : [30, 39], + 'blue' : [34, 39], + 'cyan' : [36, 39], + 'green' : [32, 39], + 'magenta' : [35, 39], + 'red' : [31, 39], + 'yellow' : [33, 39] +}; + +// Don't use 'blue' not visible on cmd.exe +inspect.styles = { + 'special': 'cyan', + 'number': 'yellow', + 'boolean': 'yellow', + 'undefined': 'grey', + 'null': 'bold', + 'string': 'green', + 'date': 'magenta', + // "name": intentionally not styling + 'regexp': 'red' +}; + + +function stylizeWithColor(str, styleType) { + var style = inspect.styles[styleType]; + + if (style) { + return '\u001b[' + inspect.colors[style][0] + 'm' + str + + '\u001b[' + inspect.colors[style][1] + 'm'; + } else { + return str; + } +} + + +function stylizeNoColor(str, styleType) { + return str; +} + + +function arrayToHash(array) { + var hash = {}; + + array.forEach(function(val, idx) { + hash[val] = true; + }); + + return hash; +} + + +function formatValue(ctx, value, recurseTimes) { + // Provide a hook for user-specified inspect functions. + // Check that value is an object with an inspect function on it + if (ctx.customInspect && + value && + isFunction(value.inspect) && + // Filter out the util module, it's inspect function is special + value.inspect !== exports.inspect && + // Also filter out any prototype objects using the circular check. + !(value.constructor && value.constructor.prototype === value)) { + var ret = value.inspect(recurseTimes, ctx); + if (!isString(ret)) { + ret = formatValue(ctx, ret, recurseTimes); + } + return ret; + } + + // Primitive types cannot have properties + var primitive = formatPrimitive(ctx, value); + if (primitive) { + return primitive; + } + + // Look up the keys of the object. + var keys = Object.keys(value); + var visibleKeys = arrayToHash(keys); + + if (ctx.showHidden) { + keys = Object.getOwnPropertyNames(value); + } + + // IE doesn't make error fields non-enumerable + // http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx + if (isError(value) + && (keys.indexOf('message') >= 0 || keys.indexOf('description') >= 0)) { + return formatError(value); + } + + // Some type of object without properties can be shortcutted. + if (keys.length === 0) { + if (isFunction(value)) { + var name = value.name ? ': ' + value.name : ''; + return ctx.stylize('[Function' + name + ']', 'special'); + } + if (isRegExp(value)) { + return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); + } + if (isDate(value)) { + return ctx.stylize(Date.prototype.toString.call(value), 'date'); + } + if (isError(value)) { + return formatError(value); + } + } + + var base = '', array = false, braces = ['{', '}']; + + // Make Array say that they are Array + if (isArray(value)) { + array = true; + braces = ['[', ']']; + } + + // Make functions say that they are functions + if (isFunction(value)) { + var n = value.name ? ': ' + value.name : ''; + base = ' [Function' + n + ']'; + } + + // Make RegExps say that they are RegExps + if (isRegExp(value)) { + base = ' ' + RegExp.prototype.toString.call(value); + } + + // Make dates with properties first say the date + if (isDate(value)) { + base = ' ' + Date.prototype.toUTCString.call(value); + } + + // Make error with message first say the error + if (isError(value)) { + base = ' ' + formatError(value); + } + + if (keys.length === 0 && (!array || value.length == 0)) { + return braces[0] + base + braces[1]; + } + + if (recurseTimes < 0) { + if (isRegExp(value)) { + return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); + } else { + return ctx.stylize('[Object]', 'special'); + } + } + + ctx.seen.push(value); + + var output; + if (array) { + output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); + } else { + output = keys.map(function(key) { + return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); + }); + } + + ctx.seen.pop(); + + return reduceToSingleString(output, base, braces); +} + + +function formatPrimitive(ctx, value) { + if (isUndefined(value)) + return ctx.stylize('undefined', 'undefined'); + if (isString(value)) { + var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') + .replace(/'/g, "\\'") + .replace(/\\"/g, '"') + '\''; + return ctx.stylize(simple, 'string'); + } + if (isNumber(value)) + return ctx.stylize('' + value, 'number'); + if (isBoolean(value)) + return ctx.stylize('' + value, 'boolean'); + // For some reason typeof null is "object", so special case here. + if (isNull(value)) + return ctx.stylize('null', 'null'); +} + + +function formatError(value) { + return '[' + Error.prototype.toString.call(value) + ']'; +} + + +function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { + var output = []; + for (var i = 0, l = value.length; i < l; ++i) { + if (hasOwnProperty(value, String(i))) { + output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, + String(i), true)); + } else { + output.push(''); + } + } + keys.forEach(function(key) { + if (!key.match(/^\d+$/)) { + output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, + key, true)); + } + }); + return output; +} + + +function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { + var name, str, desc; + desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; + if (desc.get) { + if (desc.set) { + str = ctx.stylize('[Getter/Setter]', 'special'); + } else { + str = ctx.stylize('[Getter]', 'special'); + } + } else { + if (desc.set) { + str = ctx.stylize('[Setter]', 'special'); + } + } + if (!hasOwnProperty(visibleKeys, key)) { + name = '[' + key + ']'; + } + if (!str) { + if (ctx.seen.indexOf(desc.value) < 0) { + if (isNull(recurseTimes)) { + str = formatValue(ctx, desc.value, null); + } else { + str = formatValue(ctx, desc.value, recurseTimes - 1); + } + if (str.indexOf('\n') > -1) { + if (array) { + str = str.split('\n').map(function(line) { + return ' ' + line; + }).join('\n').substr(2); + } else { + str = '\n' + str.split('\n').map(function(line) { + return ' ' + line; + }).join('\n'); + } + } + } else { + str = ctx.stylize('[Circular]', 'special'); + } + } + if (isUndefined(name)) { + if (array && key.match(/^\d+$/)) { + return str; + } + name = JSON.stringify('' + key); + if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { + name = name.substr(1, name.length - 2); + name = ctx.stylize(name, 'name'); + } else { + name = name.replace(/'/g, "\\'") + .replace(/\\"/g, '"') + .replace(/(^"|"$)/g, "'"); + name = ctx.stylize(name, 'string'); + } + } + + return name + ': ' + str; +} + + +function reduceToSingleString(output, base, braces) { + var numLinesEst = 0; + var length = output.reduce(function(prev, cur) { + numLinesEst++; + if (cur.indexOf('\n') >= 0) numLinesEst++; + return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; + }, 0); + + if (length > 60) { + return braces[0] + + (base === '' ? '' : base + '\n ') + + ' ' + + output.join(',\n ') + + ' ' + + braces[1]; + } + + return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; +} + + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && + (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +exports.isBuffer = require('./support/isBuffer'); + +function objectToString(o) { + return Object.prototype.toString.call(o); +} + + +function pad(n) { + return n < 10 ? '0' + n.toString(10) : n.toString(10); +} + + +var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', + 'Oct', 'Nov', 'Dec']; + +// 26 Feb 16:19:34 +function timestamp() { + var d = new Date(); + var time = [pad(d.getHours()), + pad(d.getMinutes()), + pad(d.getSeconds())].join(':'); + return [d.getDate(), months[d.getMonth()], time].join(' '); +} + + +// log is just a thin wrapper to console.log that prepends a timestamp +exports.log = function() { + console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); +}; + + +/** + * Inherit the prototype methods from one constructor into another. + * + * The Function.prototype.inherits from lang.js rewritten as a standalone + * function (not on Function.prototype). NOTE: If this file is to be loaded + * during bootstrapping this function needs to be rewritten using some native + * functions as prototype setup using normal JavaScript does not work as + * expected during bootstrapping (see mirror.js in r114903). + * + * @param {function} ctor Constructor function which needs to inherit the + * prototype. + * @param {function} superCtor Constructor function to inherit prototype from. + */ +exports.inherits = require('inherits'); + +exports._extend = function(origin, add) { + // Don't do anything if add isn't an object + if (!add || !isObject(add)) return origin; + + var keys = Object.keys(add); + var i = keys.length; + while (i--) { + origin[keys[i]] = add[keys[i]]; + } + return origin; +}; + +function hasOwnProperty(obj, prop) { + return Object.prototype.hasOwnProperty.call(obj, prop); +} + +}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{"./support/isBuffer":40,"_process":37,"inherits":39}],42:[function(require,module,exports){ +'use strict'; + +var leveljs = require('level-js'); +var db = typeof indexedDB === 'undefined' ? { open: function open(_, cb) { + return cb(true); + } } : leveljs('./tessdata2'); + +var langdata = require('../common/langdata.json'); + +module.exports = function getLanguageData(req, res, cb) { + var lang = req.options.lang; + + function saveDataFile(data) { + db.put(lang, data, function (err) { + return console.log('cached', lang, err); + }); + cb(data); + } + + db.open({ compression: false }, function (err) { + if (err) return fetchLanguageData(req, res, cb); + db.get(lang, function (err, data) { + if (err) return fetchLanguageData(req, res, saveDataFile); + res.progress({ status: 'found in cache ' + lang + '.traineddata' }); + cb(data); + }); + }); +}; + +var ungzip = require('pako').ungzip; + +function fetchLanguageData(req, res, cb) { + var lang = req.options.lang; + var langfile = lang + '.traineddata.gz'; + var url = req.workerOptions.langPath + langfile; + + var xhr = new XMLHttpRequest(); + xhr.responseType = 'arraybuffer'; + xhr.open('GET', url, true); + xhr.onerror = function (e) { + xhr.onprogress = xhr.onload = null; + cb(xhr, null); + }; + xhr.onprogress = function (e) { + return res.progress({ + status: 'downloading ' + langfile, + loaded: e.loaded, + progress: Math.min(1, e.loaded / langdata[lang]) + }); + }; + + xhr.onload = function (e) { + if (!(xhr.status == 200 || xhr.status == 0 && xhr.response)) return res.reject('Error downloading language ' + url); + res.progress({ status: 'unzipping ' + langfile }); + + // in case the gzips are already ungzipped or extra gzipped + var response = new Uint8Array(xhr.response); + try { + while (response[0] == 0x1f && response[1] == 0x8b) { + response = ungzip(response); + } + } catch (err) { + return res.reject('Error unzipping language file ' + langfile + '\n' + err.message); + } + + cb(response); + }; + xhr.send(); +} + +},{"../common/langdata.json":46,"level-js":12,"pako":21}],43:[function(require,module,exports){ +(function (global){ +"use strict"; + +var workerUtils = require('../common/worker.js'); + +global.addEventListener('message', function (e) { + var packet = e.data; + workerUtils.dispatchHandlers(packet, function (obj) { + return postMessage(obj); + }); +}); + +exports.getLanguageData = require('./lang.js'); + +exports.getCore = function (req, res) { + if (!global.TesseractCore) { + res.progress({ status: 'loading tesseract core' }); + importScripts(req.workerOptions.tesseractPath); + res.progress({ status: 'loaded tesseract core' }); + } + return TesseractCore; +}; + +workerUtils.setAdapter(module.exports); + +}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{"../common/worker.js":47,"./lang.js":42}],44:[function(require,module,exports){ +'use strict'; + +module.exports = function desaturate(image) { + var width, height; + if (image.data) { + var src = image.data; + width = image.width, height = image.height; + var dst = new Uint8Array(width * height); + var srcLength = src.length | 0, + srcLength_16 = srcLength - 16 | 0; + + for (var i = 0, j = 0; i <= srcLength_16; i += 16, j += 4) { + // convert to grayscale 4 pixels at a time; eveything with alpha gets put in front of 50% gray + dst[j] = (src[i] * 77 + src[i + 1] * 151 + src[i + 2] * 28) * src[i + 3] + (255 - src[i + 3] << 15) + 32768 >> 16; + dst[j + 1] = (src[i + 4] * 77 + src[i + 5] * 151 + src[i + 6] * 28) * src[i + 7] + (255 - src[i + 7] << 15) + 32768 >> 16; + dst[j + 2] = (src[i + 8] * 77 + src[i + 9] * 151 + src[i + 10] * 28) * src[i + 11] + (255 - src[i + 11] << 15) + 32768 >> 16; + dst[j + 3] = (src[i + 12] * 77 + src[i + 13] * 151 + src[i + 14] * 28) * src[i + 15] + (255 - src[i + 15] << 15) + 32768 >> 16; + } + for (; i < srcLength; i += 4, ++j) { + //finish up + dst[j] = (src[i] * 77 + src[i + 1] * 151 + src[i + 2] * 28) * src[i + 3] + (255 - src[i + 3] << 15) + 32768 >> 16; + }image = dst; + } else { + throw 'Expected ImageData'; + } + return image; +}; + +},{}],45:[function(require,module,exports){ +"use strict"; + +function deindent(html) { + var lines = html.split('\n'); + if (lines[0].substring(0, 2) === " ") { + for (var i = 0; i < lines.length; i++) { + if (lines[i].substring(0, 2) === " ") { + lines[i] = lines[i].slice(2); + } + }; + } + return lines.join('\n'); +} + +module.exports = function DumpLiterallyEverything(Module, base) { + var ri = base.GetIterator(); + var blocks = []; + var block, para, textline, word, symbol; + + function enumToString(value, prefix) { + return Object.keys(Module).filter(function (e) { + return e.substr(0, prefix.length + 1) == prefix + '_'; + }).filter(function (e) { + return Module[e] === value; + }).map(function (e) { + return e.slice(prefix.length + 1); + })[0]; + } + + ri.Begin(); + do { + if (ri.IsAtBeginningOf(Module.RIL_BLOCK)) { + var poly = ri.BlockPolygon(); + var polygon = null; + // BlockPolygon() returns null when automatic page segmentation is off + if (Module.getPointer(poly) > 0) { + var n = poly.get_n(), + px = poly.get_x(), + py = poly.get_y(), + polygon = []; + for (var i = 0; i < n; i++) { + polygon.push([px.getValue(i), py.getValue(i)]); + } + Module._ptaDestroy(Module.getPointer(poly)); + } + + block = { + paragraphs: [], + + text: ri.GetUTF8Text(Module.RIL_BLOCK), + confidence: ri.Confidence(Module.RIL_BLOCK), + baseline: ri.getBaseline(Module.RIL_BLOCK), + bbox: ri.getBoundingBox(Module.RIL_BLOCK), + + blocktype: enumToString(ri.BlockType(), 'PT'), + polygon: polygon + }; + blocks.push(block); + } + if (ri.IsAtBeginningOf(Module.RIL_PARA)) { + para = { + lines: [], + + text: ri.GetUTF8Text(Module.RIL_PARA), + confidence: ri.Confidence(Module.RIL_PARA), + baseline: ri.getBaseline(Module.RIL_PARA), + bbox: ri.getBoundingBox(Module.RIL_PARA), + + is_ltr: !!ri.ParagraphIsLtr() + }; + block.paragraphs.push(para); + } + if (ri.IsAtBeginningOf(Module.RIL_TEXTLINE)) { + textline = { + words: [], + + text: ri.GetUTF8Text(Module.RIL_TEXTLINE), + confidence: ri.Confidence(Module.RIL_TEXTLINE), + baseline: ri.getBaseline(Module.RIL_TEXTLINE), + bbox: ri.getBoundingBox(Module.RIL_TEXTLINE) + }; + para.lines.push(textline); + } + if (ri.IsAtBeginningOf(Module.RIL_WORD)) { + var fontInfo = ri.getWordFontAttributes(), + wordDir = ri.WordDirection(); + word = { + symbols: [], + choices: [], + + text: ri.GetUTF8Text(Module.RIL_WORD), + confidence: ri.Confidence(Module.RIL_WORD), + baseline: ri.getBaseline(Module.RIL_WORD), + bbox: ri.getBoundingBox(Module.RIL_WORD), + + is_numeric: !!ri.WordIsNumeric(), + in_dictionary: !!ri.WordIsFromDictionary(), + direction: enumToString(wordDir, 'DIR'), + language: ri.WordRecognitionLanguage(), + + is_bold: fontInfo.is_bold, + is_italic: fontInfo.is_italic, + is_underlined: fontInfo.is_underlined, + is_monospace: fontInfo.is_monospace, + is_serif: fontInfo.is_serif, + is_smallcaps: fontInfo.is_smallcaps, + font_size: fontInfo.pointsize, + font_id: fontInfo.font_id, + font_name: fontInfo.font_name + }; + var wc = new Module.WordChoiceIterator(ri); + do { + word.choices.push({ + text: wc.GetUTF8Text(), + confidence: wc.Confidence() + }); + } while (wc.Next()); + Module.destroy(wc); + textline.words.push(word); + } + + var image = null; + // var pix = ri.GetBinaryImage(Module.RIL_SYMBOL) + // var image = pix2array(pix); + // // for some reason it seems that things stop working if you destroy pics + // Module._pixDestroy(Module.getPointer(pix)); + if (ri.IsAtBeginningOf(Module.RIL_SYMBOL)) { + symbol = { + choices: [], + image: image, + + text: ri.GetUTF8Text(Module.RIL_SYMBOL), + confidence: ri.Confidence(Module.RIL_SYMBOL), + baseline: ri.getBaseline(Module.RIL_SYMBOL), + bbox: ri.getBoundingBox(Module.RIL_SYMBOL), + + is_superscript: !!ri.SymbolIsSuperscript(), + is_subscript: !!ri.SymbolIsSubscript(), + is_dropcap: !!ri.SymbolIsDropcap() + }; + word.symbols.push(symbol); + var ci = new Module.ChoiceIterator(ri); + do { + symbol.choices.push({ + text: ci.GetUTF8Text(), + confidence: ci.Confidence() + }); + } while (ci.Next()); + Module.destroy(ci); + } + } while (ri.Next(Module.RIL_SYMBOL)); + Module.destroy(ri); + + return { + text: base.GetUTF8Text(), + html: deindent(base.GetHOCRText()), + + confidence: base.MeanTextConf(), + + blocks: blocks, + + psm: enumToString(base.GetPageSegMode(), 'PSM'), + oem: enumToString(base.oem(), 'OEM'), + version: base.Version() + }; +}; + +},{}],46:[function(require,module,exports){ +module.exports={"afr": 1079573, "ara": 1701536, "aze": 1420865, "bel": 1276820, "ben": 6772012, "bul": 1605615, "cat": 1652368, "ces": 1035441, "chi_sim": 17710414, "chi_tra": 24717749, "chr": 320649, "dan-frak": 677656, "dan": 1972936, "deu-frak": 822644, "deu": 991656, "ell": 859719, "eng": 9453554, "enm": 619254, "epo": 1241212, "equ": 821130, "est": 1905040, "eus": 1641190, "fin": 979418, "fra": 1376221, "frk": 5912963, "frm": 5147082, "glg": 1674938, "grc": 3012615, "heb": 1051501, "hin": 6590065, "hrv": 1926995, "hun": 3074473, "ind": 1874776, "isl": 1634041, "ita": 948593, "ita_old": 3436571, "jpn": 13507168, "kan": 4390317, "kor": 5353098, "lav": 1843944, "lit": 1779240, "mal": 5966263, "meme": 88453, "mkd": 1163087, "mlt": 1463001, "msa": 1665427, "nld": 1134708, "nor": 2191610, "osd": 4274649, "pol": 7024662, "por": 909359, "ron": 915680, "rus": 5969957, "slk-frak": 289885, "slk": 2217342, "slv": 1611338, "spa": 883170, "spa_old": 5647453, "sqi": 1667041, "srp": 1770244, "swa": 757916, "swe": 2451917, "tam": 3498763, "tel": 5795246, "tgl": 1496256, "tha": 3811136, "tur": 3563264, "ukr": 937566, "vie": 2195922} +},{}],47:[function(require,module,exports){ +'use strict'; + +var latestJob; +var Module; +var base; +var adapter = {}; + +function dispatchHandlers(packet, send) { + function respond(status, data) { + send({ + jobId: packet.jobId, + status: status, + data: data + }); + } + respond.resolve = respond.bind(this, 'resolve'); + respond.reject = respond.bind(this, 'reject'); + respond.progress = respond.bind(this, 'progress'); + + latestJob = respond; + + if (packet.action === 'recognize') { + handleRecognize(packet.payload, respond); + } else if (packet.action === 'detect') { + handleDetect(packet.payload, respond); + } +} +exports.dispatchHandlers = dispatchHandlers; + +exports.setAdapter = function setAdapter(impl) { + adapter = impl; +}; + +function handleInit(req, res) { + if (!Module) { + var Core = adapter.getCore(req, res); + + res.progress({ status: 'initializing tesseract api' }); + Module = Core({ + TOTAL_MEMORY: req.memory, + TesseractProgress: function TesseractProgress(percent) { + latestJob.progress({ status: 'recognizing text', progress: Math.max(0, (percent - 30) / 70) }); + }, + onRuntimeInitialized: function onRuntimeInitialized() {} + }); + Module.FS_createPath("/", "tessdata", true, true); + base = new Module.TessBaseAPI(); + res.progress({ status: 'initialized tesseract api' }); + } +} + +var dump = require('./dump.js'); +var desaturate = require('./desaturate.js'); + +function setImage(Module, base, image) { + var imgbin = desaturate(image), + width = image.width, + height = image.height; + + var ptr = Module.allocate(imgbin, 'i8', Module.ALLOC_NORMAL); + base.SetImage(Module.wrapPointer(ptr), width, height, 1, width); + base.SetRectangle(0, 0, width, height); + return ptr; +} + +function loadLanguage(req, res, cb) { + var lang = req.options.lang; + + if (!Module._loadedLanguages) Module._loadedLanguages = {}; + if (lang in Module._loadedLanguages) return cb(); + + adapter.getLanguageData(req, res, function (data) { + Module.FS_createDataFile('tessdata', lang + ".traineddata", data, true, false); + res.progress({ status: 'loaded ' + lang + '.traineddata' }); + Module._loadedLanguages[lang] = true; + cb(); + }); +} + +function handleRecognize(req, res) { + handleInit(req, res); + + loadLanguage(req, res, function () { + var lang = req.options.lang; + + base.Init(null, lang); + res.progress({ status: 'initialized with language' }); + + var ptr = setImage(Module, base, req.image); + base.Recognize(null); + + var result = dump(Module, base); + + base.End(); + Module._free(ptr); + + res.resolve(result); + }); +} + +function handleDetect(req, res) { + handleInit(req, res); + req.options.lang = 'osd'; + loadLanguage(req, res, function () { + + base.Init(null, 'osd'); + base.SetPageSegMode(Module.PSM_OSD_ONLY); + + var ptr = setImage(Module, base, req.image); + + var results = new Module.OSResults(); + var success = base.DetectOS(results); + if (!success) { + base.End(); + Module._free(ptr); + res.reject("failed to detect os"); + } else { + var charset = results.get_unicharset(); + + var best = results.get_best_result(); + var oid = best.get_orientation_id(), + sid = best.get_script_id(); + + var result = { + tesseract_script_id: sid, + script: charset.get_script_from_script_id(sid), + script_confidence: best.get_sconfidence(), + orientation_degrees: [0, 270, 180, 90][oid], + orientation_confidence: best.get_oconfidence() + }; + + base.End(); + Module._free(ptr); + + res.resolve(result); + } + }); +} + +},{"./desaturate.js":44,"./dump.js":45}]},{},[43]); diff --git a/tesseract_lang_list.md b/docs/tesseract_lang_list.md similarity index 100% rename from tesseract_lang_list.md rename to docs/tesseract_lang_list.md diff --git a/tesseract_parameters.md b/docs/tesseract_parameters.md similarity index 100% rename from tesseract_parameters.md rename to docs/tesseract_parameters.md diff --git a/examples/file-input/demo.html b/examples/file-input/demo.html new file mode 100644 index 0000000..5ddc52a --- /dev/null +++ b/examples/file-input/demo.html @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/examples/node/test.js b/examples/node/test.js new file mode 100644 index 0000000..ba4e144 --- /dev/null +++ b/examples/node/test.js @@ -0,0 +1,23 @@ +var Tesseract = require('./src/index.js') +global.Tesseract = Tesseract; + +// Tesseract.recognize('yolop.png', { +// lang: 'eng' +// }).progress(function(info){ +// console.log('--', info) +// }) +// .then(function(data){ +// console.log('--', data) +// }) + + +// Tesseract.recognize('cosmic.jpg', { +// lang: 'eng' +// }) +Tesseract.detect('cosmic.jpg') +.progress(function(info){ + console.log(info) +}) +.then(function(data){ + console.log('done', data) +}) \ No newline at end of file diff --git a/index.html b/index.html deleted file mode 100644 index 5d8d42e..0000000 --- a/index.html +++ /dev/null @@ -1,30 +0,0 @@ - - - \ No newline at end of file diff --git a/package.json b/package.json index def39d0..000e12d 100644 --- a/package.json +++ b/package.json @@ -2,34 +2,31 @@ "name": "tesseract.js", "version": "1.0.0", "description": "", - "main": "Tesseract.js", + "main": "index.js", "scripts": { - "start": "node devServer.js", - "build": "webpack --config webpack.config.prod.js" + "test": "echo \"Error: no test specified\" & exit 1", + "start": "watchify src/index.js -o dist/tesseract.js --standalone Tesseract & watchify src/browser/worker.js -o dist/worker.js & http-server -p 7355", + "build": "browserify src/index.js -t [ babelify --presets [ es2015 ] ] -o dist/tesseract.js --standalone Tesseract && browserify src/browser/worker.js -t [ babelify --presets [ es2015 ] ] -o dist/worker.js" }, - "dependencies": { - "level-js": "^2.1.6", - "pako": "^0.2.7", - "tesseract.js-core": "^1.0.0" - }, - "devDependencies": { - "babel": "^6.5.2", - "babel-core": "^6.7.0", - "babel-loader": "^6.2.4", - "express": "^4.13.4", - "webpack": "^1.13.0", - "webpack-dev-middleware": "^1.5.1", - "babel-preset-stage-1": "^6.5.0", - "babel-preset-es2015": "^6.6.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/naptha/tesseract.js.git" + "browser": { + "./src/node/index.js": "./src/browser/index.js" }, "author": "", - "license": "MIT", - "bugs": { - "url": "https://github.com/naptha/tesseract.js/issues" + "license": "ISC", + "devDependencies": { + "babel-preset-es2015": "^6.16.0", + "babel-preset-react": "^6.16.0", + "babelify": "^7.3.0", + "browserify": "^13.1.0", + "http-server": "^0.9.0", + "watchify": "^3.7.0" }, - "homepage": "https://github.com/naptha/tesseract.js" + "dependencies": { + "file-type": "^3.8.0", + "jpeg-js": "^0.2.0", + "level-js": "^2.2.4", + "pako": "^1.0.3", + "png.js": "^0.2.1", + "tesseract.js-core": "^1.0.2" + } } diff --git a/src/browser/index.js b/src/browser/index.js index 39071d2..9702741 100644 --- a/src/browser/index.js +++ b/src/browser/index.js @@ -1,106 +1,64 @@ -var coreUrl = 'https://cdn.rawgit.com/naptha/tesseract.js-core/master/index.js', - workerUrl = 'https://cdn.rawgit.com/naptha/tesseract.js/8b915dc/dist/tesseract.worker.js', - langUrl = 'https://cdn.rawgit.com/naptha/tessdata/gh-pages/3.02/', - worker; - -function recognize(image, options){ - if(!worker) worker = createWorker( Tesseract.coreUrl, Tesseract.workerUrl, Tesseract.langUrl ) - return worker.recognize(image, options) +exports.defaultOptions = { + langPath: 'https://cdn.rawgit.com/naptha/tessdata/gh-pages/3.02/', + workerPath: 'dist/worker.js', + tesseractPath: 'https://cdn.rawgit.com/naptha/tesseract.js-core/0.1.0/index.js', } -function detect(image){ - if(!worker) worker = createWorker( Tesseract.coreUrl, Tesseract.workerUrl, Tesseract.langUrl ) - return worker.detect(image) +exports.spawnWorker = function spawnWorker(instance, workerOptions){ + var worker = new Worker(workerOptions.workerPath) + worker.onmessage = function(e){ + instance._recv(e.data) + } + return worker } -function createWorker(coreUrl=Tesseract.coreUrl, workerUrl=Tesseract.workerUrl, langUrl=Tesseract.langUrl){ - - var blob = new Blob([`importScripts('${coreUrl}'); - importScripts('${workerUrl}');`]) - - var worker = new Worker(window.URL.createObjectURL(blob)); - - var bigworker = false - var jobCounter = 0 - var handlers = {} - - function runAsync(action, args){ - - var jobId = jobCounter++ - handlers[jobId] = {} - - var waitingCount = 0 - Object.getOwnPropertyNames(args) - .filter(name => typeof args[name] === 'function') - .forEach(name => { - waitingCount++ - args[name](value => { - args[name] = value - if(--waitingCount == 0) worker.postMessage({jobId, action, args}) - }) - }) - - if(waitingCount == 0) worker.postMessage({jobId, action, args}) - return { - then (f){ handlers[jobId].result = f; return this}, - error (f){ handlers[jobId].error = f; return this}, - progress(f){ handlers[jobId].progress = f; return this} - } - } - - worker.onmessage = function(e){ - var {jobId, progress, error, result} = e.data - var handler = handlers[jobId] - if(progress && handler.progress) handler.progress(progress); - if(error && handler.error) handler.error(error); - if(result && handler.result) handler.result(result); - } - - function convertToImageData(image){ - if(image.match && image.match(/^https?:\/\//)) { - return function thunk(cb){ - var img = new Image() - img.src = image - img.onload = () => cb(convertToImageData(img)) - } - } - - if(typeof image === 'string') image = document.querySelector(image) - if(image.getContext) image = image.getContext('2d'); - else if(image.tagName == "IMG" || image.tagName == "VIDEO"){ - var c = document.createElement('canvas'); - c.width = image.naturalWidth || image.videoWidth; - c.height = image.naturalHeight || image.videoHeight; - var ctx = c.getContext('2d'); - ctx.drawImage(image, 0, 0); - image = ctx; - } - if(image.getImageData) image = image.getImageData(0, 0, image.canvas.width, image.canvas.height); - return image - } - - runAsync('init', {mem: (1<<24) * 6, langUrl}) - - return { - detect(image){ - return runAsync('detect', {image: convertToImageData(image)}) - }, - - recognize(image, options='eng'){ - - if (typeof options === 'string') options = {lang: options}; - else options.lang = options.lang || 'eng'; - - if (!bigworker && ['chi_sim', 'chi_tra', 'jpn'].indexOf(options.lang) != -1){ - runAsync('init', {mem: (1<<24) * 10, langUrl}) - bigworker = true - } +exports.terminateWorker = function(instance){ + instance.worker.terminate() +} - return runAsync('recognize', {options, image: convertToImageData(image)}) - } - } +exports.sendPacket = function sendPacket(instance, packet){ + loadImage(packet.payload.image, function(img){ + packet.payload.image = img + instance.worker.postMessage(packet) + }) } -var Tesseract = {coreUrl, workerUrl, langUrl, recognize, detect, createWorker} -module.exports = Tesseract \ No newline at end of file +function loadImage(image, cb){ + if(typeof image === 'string'){ + if(/^\#/.test(image)){ + // element css selector + return loadImage(document.querySelector(image), cb) + }else{ + // url or path + var im = new Image + im.src = image; + im.onload = e => loadImage(im, cb); + return + } + }else if(image instanceof File){ + // files + var fr = new FileReader() + fr.onload = e => loadImage(fr.result, cb); + fr.readAsDataURL(image) + return + }else if(image instanceof Blob){ + return loadImage(URL.createObjectURL(image), cb) + }else if(image.getContext){ + // canvas element + return loadImage(image.getContext('2d'), cb) + }else if(image.tagName == "IMG" || image.tagName == "VIDEO"){ + // image element or video element + var c = document.createElement('canvas'); + c.width = image.naturalWidth || image.videoWidth; + c.height = image.naturalHeight || image.videoHeight; + var ctx = c.getContext('2d'); + ctx.drawImage(image, 0, 0); + return loadImage(ctx, cb) + }else if(image.getImageData){ + // canvas context + var data = image.getImageData(0, 0, image.canvas.width, image.canvas.height); + return loadImage(data, cb) + } + cb(image) +} diff --git a/src/browser/lang.js b/src/browser/lang.js new file mode 100644 index 0000000..00a6078 --- /dev/null +++ b/src/browser/lang.js @@ -0,0 +1,61 @@ +var leveljs = require('level-js') +var db = typeof indexedDB === 'undefined' ? { open: (_, cb) => cb(true) } : leveljs('./tessdata2') + +var langdata = require('../common/langdata.json') + +module.exports = function getLanguageData(req, res, cb){ + var lang = req.options.lang; + + function saveDataFile(data){ + db.put(lang, data, err => console.log('cached', lang, err)) + cb(data) + } + + db.open({ compression: false }, err => { + if (err) return fetchLanguageData(req, res, cb); + db.get(lang, (err, data) => { + if (err) return fetchLanguageData(req, res, saveDataFile); + res.progress({ status: 'found in cache ' + lang + '.traineddata' }) + cb(data) + }) + }) +} + + +var ungzip = require('pako').ungzip; + +function fetchLanguageData(req, res, cb){ + var lang = req.options.lang; + var langfile = lang + '.traineddata.gz'; + var url = req.workerOptions.langPath + langfile; + + var xhr = new XMLHttpRequest(); + xhr.responseType = 'arraybuffer'; + xhr.open('GET', url, true); + xhr.onerror = e => { + xhr.onprogress = xhr.onload = null + cb(xhr, null) + } + xhr.onprogress = e => + res.progress({ + status: 'downloading ' + langfile, + loaded: e.loaded, + progress: Math.min(1, e.loaded / langdata[lang]) + }); + + xhr.onload = e => { + if (!(xhr.status == 200 || (xhr.status == 0 && xhr.response))) return res.reject('Error downloading language ' + url); + res.progress({ status: 'unzipping ' + langfile }) + + // in case the gzips are already ungzipped or extra gzipped + var response = new Uint8Array(xhr.response) + try { + while(response[0] == 0x1f && response[1] == 0x8b) response = ungzip(response); + } catch (err) { + return res.reject('Error unzipping language file ' + langfile + '\n' + err.message) + } + + cb(response) + } + xhr.send() +} diff --git a/src/browser/worker.js b/src/browser/worker.js new file mode 100644 index 0000000..58d8bb6 --- /dev/null +++ b/src/browser/worker.js @@ -0,0 +1,21 @@ +"use strict"; + +var workerUtils = require('../common/worker.js') + +global.addEventListener('message', function(e){ + var packet = e.data; + workerUtils.dispatchHandlers(packet, obj => postMessage(obj)) +}) + +exports.getLanguageData = require('./lang.js') + +exports.getCore = function(req, res){ + if(!global.TesseractCore){ + res.progress({ status: 'loading tesseract core' }) + importScripts(req.workerOptions.tesseractPath) + res.progress({ status: 'loaded tesseract core' }) + } + return TesseractCore +} + +workerUtils.setAdapter(module.exports); diff --git a/src/shared/circularize.js b/src/common/circularize.js similarity index 97% rename from src/shared/circularize.js rename to src/common/circularize.js index 4ad180f..91d3ab2 100644 --- a/src/shared/circularize.js +++ b/src/common/circularize.js @@ -1,4 +1,4 @@ -export default function circularize(page){ +module.exports = function circularize(page){ page.paragraphs = [] page.lines = [] page.words = [] diff --git a/src/common/desaturate.js b/src/common/desaturate.js new file mode 100644 index 0000000..8b7a909 --- /dev/null +++ b/src/common/desaturate.js @@ -0,0 +1,23 @@ +module.exports = function desaturate(image){ + var width, height; + if(image.data){ + var src = image.data; + width = image.width, height = image.height; + var dst = new Uint8Array(width * height); + var srcLength = src.length | 0, srcLength_16 = (srcLength - 16) | 0; + + for (var i = 0, j = 0; i <= srcLength_16; i += 16, j += 4) { + // convert to grayscale 4 pixels at a time; eveything with alpha gets put in front of 50% gray + dst[j] = (((src[i] * 77 + src[i+1] * 151 + src[i+2] * 28) * src[i+3]) + ((255-src[i+3]) << 15) + 32768) >> 16 + dst[j+1] = (((src[i+4] * 77 + src[i+5] * 151 + src[i+6] * 28) * src[i+7]) + ((255-src[i+7]) << 15) + 32768) >> 16 + dst[j+2] = (((src[i+8] * 77 + src[i+9] * 151 + src[i+10] * 28) * src[i+11]) + ((255-src[i+11]) << 15) + 32768) >> 16 + dst[j+3] = (((src[i+12] * 77 + src[i+13] * 151 + src[i+14] * 28) * src[i+15]) + ((255-src[i+15]) << 15) + 32768) >> 16 + } + for (; i < srcLength; i += 4, ++j) //finish up + dst[j] = (((src[i] * 77 + src[i+1] * 151 + src[i+2] * 28) * src[i+3]) + ((255-src[i+3]) << 15) + 32768) >> 16 + image = dst; + } else { + throw 'Expected ImageData' + } + return image +} \ No newline at end of file diff --git a/src/common/dump.js b/src/common/dump.js new file mode 100644 index 0000000..b04ec09 --- /dev/null +++ b/src/common/dump.js @@ -0,0 +1,161 @@ +function deindent(html){ + var lines = html.split('\n') + if(lines[0].substring(0,2) === " "){ + for (var i = 0; i < lines.length; i++) { + if (lines[i].substring(0,2) === " ") { + lines[i] = lines[i].slice(2) + } + }; + } + return lines.join('\n') +} + +module.exports = function DumpLiterallyEverything(Module, base){ + var ri = base.GetIterator(); + var blocks = []; + var block, para, textline, word, symbol; + + function enumToString(value, prefix){ + return (Object.keys(Module) + .filter(function(e){ return e.substr(0, prefix.length + 1) == prefix + '_' }) + .filter(function(e){ return Module[e] === value }) + .map(function(e){ return e.slice(prefix.length + 1) })[0]) + } + + ri.Begin() + do { + if(ri.IsAtBeginningOf(Module.RIL_BLOCK)){ + var poly = ri.BlockPolygon(); + var polygon = null; + // BlockPolygon() returns null when automatic page segmentation is off + if(Module.getPointer(poly) > 0){ + var n = poly.get_n(), + px = poly.get_x(), + py = poly.get_y(), + polygon = []; + for(var i = 0; i < n; i++){ + polygon.push([px.getValue(i), py.getValue(i)]); + } + Module._ptaDestroy(Module.getPointer(poly)); + } + + block = { + paragraphs: [], + + text: ri.GetUTF8Text(Module.RIL_BLOCK), + confidence: ri.Confidence(Module.RIL_BLOCK), + baseline: ri.getBaseline(Module.RIL_BLOCK), + bbox: ri.getBoundingBox(Module.RIL_BLOCK), + + blocktype: enumToString(ri.BlockType(), 'PT'), + polygon: polygon + } + blocks.push(block) + } + if(ri.IsAtBeginningOf(Module.RIL_PARA)){ + para = { + lines: [], + + text: ri.GetUTF8Text(Module.RIL_PARA), + confidence: ri.Confidence(Module.RIL_PARA), + baseline: ri.getBaseline(Module.RIL_PARA), + bbox: ri.getBoundingBox(Module.RIL_PARA), + + is_ltr: !!ri.ParagraphIsLtr() + } + block.paragraphs.push(para) + } + if(ri.IsAtBeginningOf(Module.RIL_TEXTLINE)){ + textline = { + words: [], + + text: ri.GetUTF8Text(Module.RIL_TEXTLINE), + confidence: ri.Confidence(Module.RIL_TEXTLINE), + baseline: ri.getBaseline(Module.RIL_TEXTLINE), + bbox: ri.getBoundingBox(Module.RIL_TEXTLINE) + } + para.lines.push(textline) + } + if(ri.IsAtBeginningOf(Module.RIL_WORD)){ + var fontInfo = ri.getWordFontAttributes(), + wordDir = ri.WordDirection(); + word = { + symbols: [], + choices: [], + + text: ri.GetUTF8Text(Module.RIL_WORD), + confidence: ri.Confidence(Module.RIL_WORD), + baseline: ri.getBaseline(Module.RIL_WORD), + bbox: ri.getBoundingBox(Module.RIL_WORD), + + is_numeric: !!ri.WordIsNumeric(), + in_dictionary: !!ri.WordIsFromDictionary(), + direction: enumToString(wordDir, 'DIR'), + language: ri.WordRecognitionLanguage(), + + is_bold: fontInfo.is_bold, + is_italic: fontInfo.is_italic, + is_underlined: fontInfo.is_underlined, + is_monospace: fontInfo.is_monospace, + is_serif: fontInfo.is_serif, + is_smallcaps: fontInfo.is_smallcaps, + font_size: fontInfo.pointsize, + font_id: fontInfo.font_id, + font_name: fontInfo.font_name, + } + var wc = new Module.WordChoiceIterator(ri); + do { + word.choices.push({ + text: wc.GetUTF8Text(), + confidence: wc.Confidence() + }) + } while (wc.Next()); + Module.destroy(wc) + textline.words.push(word) + } + + var image = null; + // var pix = ri.GetBinaryImage(Module.RIL_SYMBOL) + // var image = pix2array(pix); + // // for some reason it seems that things stop working if you destroy pics + // Module._pixDestroy(Module.getPointer(pix)); + if(ri.IsAtBeginningOf(Module.RIL_SYMBOL)){ + symbol = { + choices: [], + image: image, + + text: ri.GetUTF8Text(Module.RIL_SYMBOL), + confidence: ri.Confidence(Module.RIL_SYMBOL), + baseline: ri.getBaseline(Module.RIL_SYMBOL), + bbox: ri.getBoundingBox(Module.RIL_SYMBOL), + + is_superscript: !!ri.SymbolIsSuperscript(), + is_subscript: !!ri.SymbolIsSubscript(), + is_dropcap: !!ri.SymbolIsDropcap(), + } + word.symbols.push(symbol) + var ci = new Module.ChoiceIterator(ri); + do { + symbol.choices.push({ + text: ci.GetUTF8Text(), + confidence: ci.Confidence() + }) + } while (ci.Next()); + Module.destroy(ci) + } + } while (ri.Next(Module.RIL_SYMBOL)); + Module.destroy(ri) + + return { + text: base.GetUTF8Text(), + html: deindent(base.GetHOCRText()), + + confidence: base.MeanTextConf(), + + blocks: blocks, + + psm: enumToString(base.GetPageSegMode(), 'PSM'), + oem: enumToString(base.oem(), 'OEM'), + version: base.Version(), + } +} \ No newline at end of file diff --git a/src/common/langdata.json b/src/common/langdata.json new file mode 100644 index 0000000..bc51489 --- /dev/null +++ b/src/common/langdata.json @@ -0,0 +1 @@ +{"afr": 1079573, "ara": 1701536, "aze": 1420865, "bel": 1276820, "ben": 6772012, "bul": 1605615, "cat": 1652368, "ces": 1035441, "chi_sim": 17710414, "chi_tra": 24717749, "chr": 320649, "dan-frak": 677656, "dan": 1972936, "deu-frak": 822644, "deu": 991656, "ell": 859719, "eng": 9453554, "enm": 619254, "epo": 1241212, "equ": 821130, "est": 1905040, "eus": 1641190, "fin": 979418, "fra": 1376221, "frk": 5912963, "frm": 5147082, "glg": 1674938, "grc": 3012615, "heb": 1051501, "hin": 6590065, "hrv": 1926995, "hun": 3074473, "ind": 1874776, "isl": 1634041, "ita": 948593, "ita_old": 3436571, "jpn": 13507168, "kan": 4390317, "kor": 5353098, "lav": 1843944, "lit": 1779240, "mal": 5966263, "meme": 88453, "mkd": 1163087, "mlt": 1463001, "msa": 1665427, "nld": 1134708, "nor": 2191610, "osd": 4274649, "pol": 7024662, "por": 909359, "ron": 915680, "rus": 5969957, "slk-frak": 289885, "slk": 2217342, "slv": 1611338, "spa": 883170, "spa_old": 5647453, "sqi": 1667041, "srp": 1770244, "swa": 757916, "swe": 2451917, "tam": 3498763, "tel": 5795246, "tgl": 1496256, "tha": 3811136, "tur": 3563264, "ukr": 937566, "vie": 2195922} \ No newline at end of file diff --git a/src/common/worker.js b/src/common/worker.js new file mode 100644 index 0000000..7b4ac3f --- /dev/null +++ b/src/common/worker.js @@ -0,0 +1,143 @@ +var latestJob; +var Module; +var base; +var adapter = {}; + +function dispatchHandlers(packet, send){ + function respond(status, data){ + send({ + jobId: packet.jobId, + status: status, + data: data + }) + } + respond.resolve = respond.bind(this, 'resolve') + respond.reject = respond.bind(this, 'reject') + respond.progress = respond.bind(this, 'progress') + + latestJob = respond; + + if(packet.action === 'recognize'){ + handleRecognize(packet.payload, respond) + }else if(packet.action === 'detect'){ + handleDetect(packet.payload, respond) + } +} +exports.dispatchHandlers = dispatchHandlers; + +exports.setAdapter = function setAdapter(impl){ + adapter = impl; +} + + +function handleInit(req, res){ + if(!Module){ + var Core = adapter.getCore(req, res); + + res.progress({ status: 'initializing tesseract api' }) + Module = Core({ + TOTAL_MEMORY: req.memory, + TesseractProgress(percent){ + latestJob.progress({ status: 'recognizing text', progress: Math.max(0, (percent-30)/70) }) + }, + onRuntimeInitialized() {} + }) + Module.FS_createPath("/", "tessdata", true, true) + base = new Module.TessBaseAPI() + res.progress({ status: 'initialized tesseract api' }) + } +} + + + +var dump = require('./dump.js') +var desaturate = require('./desaturate.js') + + +function setImage(Module, base, image){ + var imgbin = desaturate(image), + width = image.width, + height = image.height; + + var ptr = Module.allocate(imgbin, 'i8', Module.ALLOC_NORMAL); + base.SetImage(Module.wrapPointer(ptr), width, height, 1, width); + base.SetRectangle(0, 0, width, height) + return ptr; +} + +function loadLanguage(req, res, cb){ + var lang = req.options.lang; + + if(!Module._loadedLanguages) Module._loadedLanguages = {}; + if(lang in Module._loadedLanguages) return cb(); + + adapter.getLanguageData(req, res, function(data){ + Module.FS_createDataFile('tessdata', lang + ".traineddata", data, true, false); + res.progress({ status: 'loaded ' + lang + '.traineddata' }) + Module._loadedLanguages[lang] = true; + cb() + }) +} + + + +function handleRecognize(req, res){ + handleInit(req, res) + + loadLanguage(req, res, function(){ + var lang = req.options.lang; + + base.Init(null, lang) + res.progress({ status: 'initialized with language' }) + + var ptr = setImage(Module, base, req.image); + base.Recognize(null) + + var result = dump(Module, base) + + base.End(); + Module._free(ptr); + + res.resolve(result); + }) +} + + +function handleDetect(req, res){ + handleInit(req, res) + req.options.lang = 'osd'; + loadLanguage(req, res, function(){ + + base.Init(null, 'osd') + base.SetPageSegMode(Module.PSM_OSD_ONLY) + + var ptr = setImage(Module, base, req.image); + + var results = new Module.OSResults(); + var success = base.DetectOS(results); + if(!success){ + base.End(); + Module._free(ptr); + res.reject("failed to detect os") + } else { + var charset = results.get_unicharset() + + var best = results.get_best_result() + var oid = best.get_orientation_id(), + sid = best.get_script_id(); + + var result = { + tesseract_script_id: sid, + script: charset.get_script_from_script_id(sid), + script_confidence: best.get_sconfidence(), + orientation_degrees: [0, 270, 180, 90][oid], + orientation_confidence: best.get_oconfidence() + } + + base.End(); + Module._free(ptr); + + res.resolve(result) + } + }) +} diff --git a/src/index.js b/src/index.js new file mode 100644 index 0000000..ecf5239 --- /dev/null +++ b/src/index.js @@ -0,0 +1,137 @@ +"use strict"; + +var adapter = require('./node/index.js') + +function createWorker(workerOptions){ + return new TesseractWorker(workerOptions) +} + +class TesseractWorker { + constructor(workerOptions){ + this.worker = null; + this.workerOptions = workerOptions; + this._currentJob = null; + this._queue = [] + } + + recognize(image, options){ + return this._delay(job => { + options = options || {} + options.lang = options.lang || 'eng'; + job._send('recognize', { image: image, options: options, workerOptions: this.workerOptions }) + }) + } + detect(image, options){ + options = options || {} + return this._delay(job => { + job._send('detect', { image: image, options: options, workerOptions: this.workerOptions }) + }) + } + + terminate(){ + if(this.worker) adapter.terminateWorker(this); + this.worker = null; + } + + _delay(fn){ + if(!this.worker) this.worker = adapter.spawnWorker(this, this.workerOptions); + + var job = new TesseractJob(this); + this._queue.push(e => { + this._queue.shift() + this._currentJob = job; + fn(job) + }) + if(!this._currentJob) this._dequeue(); + return job + } + + _dequeue(){ + this._currentJob = null; + if(this._queue.length > 0){ + this._queue[0]() + } + } + + _recv(packet){ + if(this._currentJob.id === packet.jobId){ + this._currentJob._handle(packet) + }else{ + console.warn('Job ID ' + packet.jobId + ' not known.') + } + } +} + +var jobCounter = 0; + +class TesseractJob { + constructor(instance){ + this.id = 'Job-' + (++jobCounter) + '-' + Math.random().toString(16).slice(3, 8) + + this._instance = instance; + this._resolve = [] + this._reject = [] + this._progress = [] + } + + then(resolve, reject){ + if(this._resolve.push){ + this._resolve.push(resolve) + }else{ + resolve(this._resolve) + } + + if(reject) this.catch(reject); + return this; + } + catch(reject){ + if(this._reject.push){ + this._reject.push(reject) + }else{ + reject(this._reject) + } + return this; + } + progress(fn){ + this._progress.push(fn) + return this; + } + _send(action, payload){ + adapter.sendPacket(this._instance, { + jobId: this.id, + action: action, + payload: payload + }) + } + + _handle(packet){ + var data = packet.data; + if(packet.status === 'resolve'){ + if(this._resolve.length === 0) console.debug(data); + this._resolve.forEach(fn => { + var ret = fn(data); + if(ret && typeof ret.then == 'function'){ + console.warn('TesseractJob instances do not chain like ES6 Promises. To convert it into a real promise, use Promise.resolve.') + } + }) + this._resolve = data; + this._instance._dequeue() + }else if(packet.status === 'reject'){ + if(this._reject.length === 0) console.error(data); + this._reject.forEach(fn => fn(data)) + this._reject = data; + this._instance._dequeue() + }else if(packet.status === 'progress'){ + this._progress.forEach(fn => fn(data)) + }else{ + console.warn('Message type unknown', packet.status) + } + } +} + + +var DefaultTesseract = createWorker(adapter.defaultOptions) +DefaultTesseract.createWorker = createWorker; + +module.exports = DefaultTesseract + diff --git a/src/node/index.js b/src/node/index.js new file mode 100644 index 0000000..17e4db8 --- /dev/null +++ b/src/node/index.js @@ -0,0 +1,83 @@ +var path = require('path') +exports.defaultOptions = { + workerPath: path.join(__dirname, 'worker.js'), + langPath: 'http://cdn.rawgit.com/naptha/tessdata/gh-pages/3.02/', +} + +var fork = require('child_process').fork; +var fs = require('fs') + +exports.spawnWorker = function spawnWorker(instance, workerOptions){ + var cp = fork(workerOptions.workerPath); + cp.on('message', function(packet){ + instance._recv(packet) + }) + return cp; +} + +exports.terminateWorker = function(instance){ + instance.worker.kill() +} + +exports.sendPacket = function sendPacket(instance, packet){ + loadImage(packet.payload.image, function(img){ + packet.payload.image = img + instance.worker.send(packet) + }) +} + + +function loadImage(image, cb){ + if(typeof image === 'string'){ + fs.readFile(image, function(err, buffer){ + loadImage(buffer, cb) + }) + return + }else if(image instanceof Buffer){ + var fileType = require('file-type'); + var mime = fileType(image).mime + + + if(mime === 'image/png'){ + var PNGReader = require('png.js'); + var reader = new PNGReader(image); + reader.parse(function(err, png){ + if (err) throw err; + + var image = { + width: png.getWidth(), + height: png.getHeight() + } + image.data = new Uint8Array(image.width * image.height * 4) + for(var j = 0; j < image.height; j++){ + for(var i = 0; i < image.width; i++){ + var offset = 4 * (i + j * image.width), + pix = png.getPixel(i, j); + + image.data[offset] = pix[0] + image.data[offset + 1] = pix[1] + image.data[offset + 2] = pix[2] + image.data[offset + 3] = pix[3]; + } + } + // console.log(image) + loadImage(image, cb) + }); + return + }else if(mime === 'image/jpeg'){ + var jpeg = require('jpeg-js'); + loadImage(jpeg.decode(image), cb) + return + } + + // TODO: support for TIFF, NetPBM, BMP, etc. + } + + // node uses json.stringify for ipc which means we need to turn + // fancy arrays into raw arrays + if(image && image.data && image.data.length && !Array.isArray(image.data)){ + image.data = Array.from(image.data) + return loadImage(image, cb) + } + cb(image) +} \ No newline at end of file diff --git a/src/node/lang.js b/src/node/lang.js new file mode 100644 index 0000000..3046990 --- /dev/null +++ b/src/node/lang.js @@ -0,0 +1,36 @@ +var http = require("http"), + zlib = require("zlib"), + fs = require("fs"), + path = require("path"); + +var langdata = require('../common/langdata.json') + +function getLanguageData(req, res, cb){ + var lang = req.options.lang; + var langfile = lang + '.traineddata.gz'; + var url = req.workerOptions.langPath + langfile; + + fs.readFile(lang + '.traineddata', function (err, data) { + if(!err) return cb(new Uint8Array(data)); + + http.get(url, function(stream){ + var received_bytes = 0; + stream.on('data', function(chunk) { + received_bytes += chunk.length; + res.progress({ + status: 'downloading ' + langfile, + loaded: received_bytes, + progress: Math.min(1, received_bytes / langdata[lang]) + }); + + }); + + var gunzip = zlib.createGunzip(); + stream.pipe(gunzip).pipe(fs.createWriteStream(lang + '.traineddata')) + gunzip.on('end', function(){ getLanguageData(req, stream, cb) }) + }) + }); +} + + +module.exports = getLanguageData; \ No newline at end of file diff --git a/src/node/worker.js b/src/node/worker.js new file mode 100644 index 0000000..e05be34 --- /dev/null +++ b/src/node/worker.js @@ -0,0 +1,22 @@ +"use strict"; + +var workerUtils = require('../common/worker.js') + +process.on('message', function(packet){ + workerUtils.dispatchHandlers(packet, obj => process.send(obj)) +}) + +exports.getLanguageData = require('./lang.js') + + +var TesseractCore; +exports.getCore = function(req, res){ + if(!TesseractCore){ + res.progress({ status: 'loading tesseract core' }) + TesseractCore = require('tesseract.js-core') + res.progress({ status: 'loaded tesseract core' }) + } + return TesseractCore +} + +workerUtils.setAdapter(module.exports); diff --git a/src/shared/desaturate.js b/src/shared/desaturate.js deleted file mode 100644 index d027e68..0000000 --- a/src/shared/desaturate.js +++ /dev/null @@ -1,26 +0,0 @@ -export default function desaturate(image){ - var width, height; - if(image.data){ - var src = image.data; - width = image.width, height = image.height; - var dst = new Uint8Array(width * height); - var srcLength = src.length | 0, srcLength_16 = (srcLength - 16) | 0; - - for (var i = 0, j = 0; i <= srcLength_16; i += 16, j += 4) { - // convert to grayscale 4 pixels at a time; eveything with alpha gets put in front of 50% gray - dst[j] = (((src[i] * 77 + src[i+1] * 151 + src[i+2] * 28) * src[i+3]) + ((255-src[i+3]) << 15) + 32768) >> 16 - dst[j+1] = (((src[i+4] * 77 + src[i+5] * 151 + src[i+6] * 28) * src[i+7]) + ((255-src[i+7]) << 15) + 32768) >> 16 - dst[j+2] = (((src[i+8] * 77 + src[i+9] * 151 + src[i+10] * 28) * src[i+11]) + ((255-src[i+11]) << 15) + 32768) >> 16 - dst[j+3] = (((src[i+12] * 77 + src[i+13] * 151 + src[i+14] * 28) * src[i+15]) + ((255-src[i+15]) << 15) + 32768) >> 16 - - } - for (; i < srcLength; i += 4, ++j) //finish up - dst[j] = (((src[i] * 77 + src[i+1] * 151 + src[i+2] * 28) * src[i+3]) + ((255-src[i+3]) << 15) + 32768) >> 16 - - image = dst; - } - else { - throw 'Expected ImageData' - } - return image -} \ No newline at end of file diff --git a/src/shared/dump.js b/src/shared/dump.js deleted file mode 100644 index 6ee4883..0000000 --- a/src/shared/dump.js +++ /dev/null @@ -1,166 +0,0 @@ -function deindent(html){ - var lines = html.split('\n') - if(lines[0].substring(0,2) === " "){ - for (var i = 0; i < lines.length; i++) { - if (lines[i].substring(0,2) === " ") { - lines[i] = lines[i].slice(2) - } - }; - } - return lines.join('\n') -} - -export default function DumpLiterallyEverything(){ - - var {module, base} = self - - var ri = base.GetIterator(); - var blocks = []; - var block, para, textline, word, symbol; - - function enumToString(value, prefix){ - return (Object.keys(module) - .filter(function(e){ return e.substr(0, prefix.length + 1) == prefix + '_' }) - .filter(function(e){ return module[e] === value }) - .map(function(e){ return e.slice(prefix.length + 1) })[0]) - } - - const {RIL_BLOCK, RIL_PARA, RIL_TEXTLINE, RIL_WORD, RIL_SYMBOL} = module - - ri.Begin() - do { - if(ri.IsAtBeginningOf(RIL_BLOCK)){ - var poly = ri.BlockPolygon(); - var polygon = null; - // BlockPolygon() returns null when automatic page segmentation is off - if(module.getPointer(poly) > 0){ - var n = poly.get_n(), - px = poly.get_x(), - py = poly.get_y(), - polygon = []; - for(var i = 0; i < n; i++){ - polygon.push([px.getValue(i), py.getValue(i)]); - } - module._ptaDestroy(module.getPointer(poly)); - } - - block = { - paragraphs: [], - - text: ri.GetUTF8Text(RIL_BLOCK), - confidence: ri.Confidence(RIL_BLOCK), - baseline: ri.getBaseline(RIL_BLOCK), - bbox: ri.getBoundingBox(RIL_BLOCK), - - blocktype: enumToString(ri.BlockType(), 'PT'), - polygon: polygon - } - blocks.push(block) - } - if(ri.IsAtBeginningOf(RIL_PARA)){ - para = { - lines: [], - - text: ri.GetUTF8Text(RIL_PARA), - confidence: ri.Confidence(RIL_PARA), - baseline: ri.getBaseline(RIL_PARA), - bbox: ri.getBoundingBox(RIL_PARA), - - is_ltr: !!ri.ParagraphIsLtr() - } - block.paragraphs.push(para) - } - if(ri.IsAtBeginningOf(RIL_TEXTLINE)){ - textline = { - words: [], - - text: ri.GetUTF8Text(RIL_TEXTLINE), - confidence: ri.Confidence(RIL_TEXTLINE), - baseline: ri.getBaseline(RIL_TEXTLINE), - bbox: ri.getBoundingBox(RIL_TEXTLINE) - } - para.lines.push(textline) - } - if(ri.IsAtBeginningOf(RIL_WORD)){ - var fontInfo = ri.getWordFontAttributes(), - wordDir = ri.WordDirection(); - word = { - symbols: [], - choices: [], - - text: ri.GetUTF8Text(RIL_WORD), - confidence: ri.Confidence(RIL_WORD), - baseline: ri.getBaseline(RIL_WORD), - bbox: ri.getBoundingBox(RIL_WORD), - - is_numeric: !!ri.WordIsNumeric(), - in_dictionary: !!ri.WordIsFromDictionary(), - direction: enumToString(wordDir, 'DIR'), - language: ri.WordRecognitionLanguage(), - - is_bold: fontInfo.is_bold, - is_italic: fontInfo.is_italic, - is_underlined: fontInfo.is_underlined, - is_monospace: fontInfo.is_monospace, - is_serif: fontInfo.is_serif, - is_smallcaps: fontInfo.is_smallcaps, - font_size: fontInfo.pointsize, - font_id: fontInfo.font_id, - font_name: fontInfo.font_name, - } - var wc = new module.WordChoiceIterator(ri); - do { - word.choices.push({ - text: wc.GetUTF8Text(), - confidence: wc.Confidence() - }) - } while (wc.Next()); - module.destroy(wc) - textline.words.push(word) - } - - var image = null; - // var pix = ri.GetBinaryImage(RIL_SYMBOL) - // var image = pix2array(pix); - // // for some reason it seems that things stop working if you destroy pics - // module._pixDestroy(module.getPointer(pix)); - if(ri.IsAtBeginningOf(RIL_SYMBOL)){ - symbol = { - choices: [], - image: image, - - text: ri.GetUTF8Text(RIL_SYMBOL), - confidence: ri.Confidence(RIL_SYMBOL), - baseline: ri.getBaseline(RIL_SYMBOL), - bbox: ri.getBoundingBox(RIL_SYMBOL), - - is_superscript: !!ri.SymbolIsSuperscript(), - is_subscript: !!ri.SymbolIsSubscript(), - is_dropcap: !!ri.SymbolIsDropcap(), - } - word.symbols.push(symbol) - var ci = new module.ChoiceIterator(ri); - do { - symbol.choices.push({ - text: ci.GetUTF8Text(), - confidence: ci.Confidence() - }) - } while (ci.Next()); - module.destroy(ci) - } - } while (ri.Next(RIL_SYMBOL)); - module.destroy(ri) - - return { - text: base.GetUTF8Text(), - html: deindent(base.GetHOCRText()), - - confidence: base.MeanTextConf(), - - blocks: blocks, - - psm: enumToString(base.GetPageSegMode(), 'PSM'), - oem: enumToString(base.oem(), 'OEM'), - version: base.Version(), - } -} \ No newline at end of file diff --git a/src/shared/fileSizes.js b/src/shared/fileSizes.js deleted file mode 100644 index 7efb8f8..0000000 --- a/src/shared/fileSizes.js +++ /dev/null @@ -1,2 +0,0 @@ -const fileSizes = {"afr": 1079573, "ara": 1701536, "aze": 1420865, "bel": 1276820, "ben": 6772012, "bul": 1605615, "cat": 1652368, "ces": 1035441, "chi_sim": 17710414, "chi_tra": 24717749, "chr": 320649, "dan-frak": 677656, "dan": 1972936, "deu-frak": 822644, "deu": 991656, "ell": 859719, "eng": 9453554, "enm": 619254, "epo": 1241212, "equ": 821130, "est": 1905040, "eus": 1641190, "fin": 979418, "fra": 1376221, "frk": 5912963, "frm": 5147082, "glg": 1674938, "grc": 3012615, "heb": 1051501, "hin": 6590065, "hrv": 1926995, "hun": 3074473, "ind": 1874776, "isl": 1634041, "ita": 948593, "ita_old": 3436571, "jpn": 13507168, "kan": 4390317, "kor": 5353098, "lav": 1843944, "lit": 1779240, "mal": 5966263, "meme": 88453, "mkd": 1163087, "mlt": 1463001, "msa": 1665427, "nld": 1134708, "nor": 2191610, "osd": 4274649, "pol": 7024662, "por": 909359, "ron": 915680, "rus": 5969957, "slk-frak": 289885, "slk": 2217342, "slv": 1611338, "spa": 883170, "spa_old": 5647453, "sqi": 1667041, "srp": 1770244, "swa": 757916, "swe": 2451917, "tam": 3498763, "tel": 5795246, "tgl": 1496256, "tha": 3811136, "tur": 3563264, "ukr": 937566, "vie": 2195922} -export default fileSizes; \ No newline at end of file diff --git a/src/worker/db.js b/src/worker/db.js deleted file mode 100644 index a33c458..0000000 --- a/src/worker/db.js +++ /dev/null @@ -1,3 +0,0 @@ -import leveljs from 'level-js' -var db = typeof indexedDB === 'undefined' ? { open: (_, cb) => cb(true) } : leveljs('./tessdata') -export default db \ No newline at end of file diff --git a/src/worker/detect.js b/src/worker/detect.js deleted file mode 100644 index 902097f..0000000 --- a/src/worker/detect.js +++ /dev/null @@ -1,53 +0,0 @@ -import desaturate from '../shared/desaturate' -import loadLanguage from './loadLanguage' - -export default function detect(jobId, image, cb){ - var width = image.width, height = image.height; - image = desaturate(image) - - var ptr = self.module.allocate(image, 'i8', self.module.ALLOC_NORMAL); - // console.log('allocated image') - - loadLanguage(jobId, 'osd', err => { - self.module._free(ptr); - cb(err) - }, success => { - self.base.Init(null, 'osd') - self.base.SetPageSegMode(self.module.PSM_OSD_ONLY) - // console.log('loaded language') - - self.base.SetImage(self.module.wrapPointer(ptr), width, height, 1, width) - self.base.SetRectangle(0, 0, width, height) - - var results = new self.module.OSResults(); - var success = self.base.DetectOS(results); - if(!success){ - self.base.End(); - self.module._free(ptr); - cb("failed to detect os") - } - else { - var charset = results.get_unicharset() - // console.log(charset) - // results.print_scores() - - var best = results.get_best_result() - var oid = best.get_orientation_id(), - sid = best.get_script_id(); - // console.log('orientation id', oid, [0, 270, 180, 90][oid], best.get_oconfidence()) - // console.log('script id', sid, charset.get_script_from_script_id(sid), best.get_sconfidence()) - // console.log(best) - - cb(null, { - tesseract_script_id: sid, - script: charset.get_script_from_script_id(sid), - script_confidence: best.get_sconfidence(), - orientation_degrees: [0, 270, 180, 90][oid], - orientation_confidence: best.get_oconfidence() - }) - - self.base.End(); - self.module._free(ptr); - } - }) -} \ No newline at end of file diff --git a/src/worker/index.js b/src/worker/index.js deleted file mode 100644 index 9f4971f..0000000 --- a/src/worker/index.js +++ /dev/null @@ -1,37 +0,0 @@ -import recognize from './recognize' -import detect from './detect' - -var module, base, jobId - -onmessage = function(e) { - var {action, args} = e.data; - jobId = e.data.jobId - - console.log('worker got action', action) - - if(action == 'init'){ - - self.langUrl = args.langUrl - self.module = TesseractCore({ - TOTAL_MEMORY: args.mem, //must be a multiple of 10 megabytes - TesseractProgress(percent){ - postMessage({ jobId, - 'progress': { - 'recognized': Math.max(0,(percent-30)/70) - } - }) - }, - onRuntimeInitialized() {} - }) - self.module.FS_createPath("/","tessdata",true,true) - self.base = new self.module.TessBaseAPI() - - } else if(action === 'recognize'){ - var {image, options} = args - recognize(jobId, image, options, - (error, result) => postMessage({jobId, error: error.message, result})) - } else if(action === 'detect'){ - detect(jobId, args.image, - (error, result) => postMessage({jobId, error: error.message, result})) - } -} \ No newline at end of file diff --git a/src/worker/loadLanguage.js b/src/worker/loadLanguage.js deleted file mode 100644 index 2272dfb..0000000 --- a/src/worker/loadLanguage.js +++ /dev/null @@ -1,99 +0,0 @@ -import {ungzip} from 'pako' -import db from './db' -import fileSizes from '../shared/fileSizes' - -function getLanguageData(lang, progress, cb){ - var xhr = new XMLHttpRequest(); - xhr.responseType = 'arraybuffer'; - xhr.open('GET', self.langUrl + lang + '.traineddata.gz', true); - xhr.onerror = e => { - xhr.onprogress = xhr.onload = null - cb(xhr, null) - } - xhr.onprogress = e => progress({ - 'loaded_lang_model': e.loaded/fileSizes[lang], //this is kinda wrong on safari - cached: false - }) - xhr.onload = e => { - if (!(xhr.status == 200 || (xhr.status == 0 && xhr.response))) return cb(xhr, null); - - progress({'unzipping_lang_model': true}) - - var response = new Uint8Array(xhr.response) - while(response[0] == 0x1f && response[1] == 0x8b) response = ungzip(response); - - progress({ - 'unzipped_lang_model': true, - 'lang_model_size': response.length - }) - - cb(null, response) - } - - progress({ - 'loaded_lang_model': 0, - cached: false, - requesting: true - }) - - xhr.send() -} - - -function load(lang, jobId, cb){ - - console.log('loadLanguage jobId', jobId) - - function progressMessage(progress){ - postMessage({ jobId, progress }) - } - - function finish(err, data) { - if(err) return cb(err); - // loaded_langs.push(lang) - cb(null, data) - } - - function createDataFile(err, data){ - progressMessage({ created_virtual_datafile: true}) - finish(err, data) - } - - function createDataFileCached(err, data) { - if(err) return createDataFile(err); - - db.put(lang, data, err => console.log('cached', lang, err)) - progressMessage({cached_lang: lang}) - createDataFile(null, data) - } - - - db.open({compression: false}, err => { - if (err) return getLanguageData(lang, progressMessage, createDataFile); - - db.get(lang, (err, data) => { - - if (err) return getLanguageData(lang, progressMessage, createDataFileCached) - - while(data[0] == 0x1f && data[1] == 0x8b) data = ungzip(data); - - progressMessage({ loaded_lang_model: lang, from_cache: true }) - - cb(null, data) - }) - }) -} - -var loaded_langs = [] - -export default function loadLanguage(jobId, lang, error, success){ - if(loaded_langs.indexOf(lang) == -1) load(lang, jobId, function(err, result){ - if(err) return error(err) - - loaded_langs.push(lang) - self.module.FS_createDataFile('tessdata', lang +".traineddata", result, true, false); - - success() - }) - else success(); -} diff --git a/src/worker/recognize.js b/src/worker/recognize.js deleted file mode 100644 index e9acc52..0000000 --- a/src/worker/recognize.js +++ /dev/null @@ -1,56 +0,0 @@ -import desaturate from '../shared/desaturate' -import loadLanguage from './loadLanguage' -import circularize from '../shared/circularize' -import dump from '../shared/dump' - -var loaded_langs = [] - -export default function recognize(jobId, image, options, cb){ - - console.log('recognize id', jobId) - var {lang} = options - var width = image.width, height = image.height; - - image = desaturate(image) - - var ptr = self.module.allocate(image, 'i8', self.module.ALLOC_NORMAL); - - loadLanguage(jobId, lang, err => { - self.module._free(ptr) - cb(err) - }, success => { - self.base.Init(null, lang) - - postMessage({ - jobId, - 'progress': { - 'initialized_with_lang': lang - } - }) - - for (var option in options) { - if (options.hasOwnProperty(option)) { - self.base.SetVariable(option, options[option]); - postMessage({ - jobId: jobId, - 'progress': { - 'set_variable': { - variable: option, - value: options[option] - } - } - }) - } - } - - - self.base.SetImage(self.module.wrapPointer(ptr), width, height, 1, width) - self.base.SetRectangle(0, 0, width, height) - // self.base.GetUTF8Text() - self.base.Recognize(null) - var everything = circularize(dump()) - self.base.End(); - self.module._free(ptr); - cb(null, everything) - }) -} \ No newline at end of file diff --git a/webpack.config.dev.js b/webpack.config.dev.js deleted file mode 100644 index 5fc7a9e..0000000 --- a/webpack.config.dev.js +++ /dev/null @@ -1,42 +0,0 @@ -var path = require('path'); -var webpack = require('webpack'); - -function config(opt) { - return { - devtool: 'cheap-module-eval-source-map', - entry: opt.entry, - output: Object.assign({}, opt.output, { - path: path.join(__dirname, 'build'), - publicPath: '/tesseract/', - }), - plugins: [ - new webpack.NoErrorsPlugin() - ], - module: { - loaders: [{ - test: /\.js$/, - loaders: ['babel'], - include: opt.include - }] - }, - node: { - fs: "empty" - } - } -} - -module.exports = [{ - entry: './src/browser/index.js', - output: { - filename: 'tesseract.js', - library: "Tesseract", - libraryTarget: "umd" - }, - include: [path.join(__dirname, 'src/browser'), path.join(__dirname, 'src/shared')] -}, { - entry: './src/worker/index.js', - output: { - filename: 'tesseract.worker.js', - }, - include: [path.join(__dirname, 'src/worker'), path.join(__dirname, 'src/shared')] -}].map(config); \ No newline at end of file diff --git a/webpack.config.prod.js b/webpack.config.prod.js deleted file mode 100644 index 302e5eb..0000000 --- a/webpack.config.prod.js +++ /dev/null @@ -1,46 +0,0 @@ -var path = require('path'); -var webpack = require('webpack'); - -function config({entry, output, include}) { - return { - entry, - output: Object.assign({}, output, { - path: path.join(__dirname, 'dist') - }), - plugins: [ - new webpack.optimize.OccurenceOrderPlugin(), - new webpack.optimize.DedupePlugin(), - new webpack.optimize.UglifyJsPlugin({ - compressor: { - warnings: false - } - }) - ], - module: { - loaders: [{ - test: /\.js$/, - loaders: ['babel'], - include - }] - }, - node: { - fs: "empty" - } - } -} - -module.exports = [{ - entry: './src/browser/index.js', - output: { - filename: 'tesseract.js', - library: "Tesseract", - libraryTarget: "umd" - }, - include: [path.join(__dirname, 'src/browser')] -}, { - entry: './src/worker/index.js', - output: { - filename: 'tesseract.worker.js', - }, - include: [path.join(__dirname, 'src/worker')] -}].map(config); \ No newline at end of file