( function e ( t , n , r ) { function s ( o , u ) { if ( ! n [ o ] ) { if ( ! t [ o ] ) { var a = typeof require == "function" && require ; if ( ! u && a ) return a ( o , ! 0 ) ; if ( i ) return i ( o , ! 0 ) ; var f = new Error ( "Cannot find module '" + o + "'" ) ; throw f . code = "MODULE_NOT_FOUND" , f } var l = n [ o ] = { exports : { } } ; t [ o ] [ 0 ] . call ( l . exports , function ( e ) { var n = t [ o ] [ 1 ] [ e ] ; return s ( n ? n : e ) } , l , l . exports , e , t , n , r ) } return n [ o ] . exports } var i = typeof require == "function" && require ; for ( var o = 0 ; o < r . length ; o ++ ) s ( r [ o ] ) ; return s } ) ( { 1 : [ function ( require , module , exports ) {
( function ( process ) {
/* Copyright (c) 2013 Rod Vagg, MIT License */
function AbstractChainedBatch ( db ) {
this . _db = db
this . _operations = [ ]
this . _written = false
}
AbstractChainedBatch . prototype . _checkWritten = function ( ) {
if ( this . _written )
throw new Error ( 'write() already called on this batch' )
}
AbstractChainedBatch . prototype . put = function ( key , value ) {
this . _checkWritten ( )
var err = this . _db . _checkKeyValue ( key , 'key' , this . _db . _isBuffer )
if ( err ) throw err
err = this . _db . _checkKeyValue ( value , 'value' , this . _db . _isBuffer )
if ( err ) throw err
if ( ! this . _db . _isBuffer ( key ) ) key = String ( key )
if ( ! this . _db . _isBuffer ( value ) ) value = String ( value )
if ( typeof this . _put == 'function' )
this . _put ( key , value )
else
this . _operations . push ( { type : 'put' , key : key , value : value } )
return this
}
AbstractChainedBatch . prototype . del = function ( key ) {
this . _checkWritten ( )
var err = this . _db . _checkKeyValue ( key , 'key' , this . _db . _isBuffer )
if ( err ) throw err
if ( ! this . _db . _isBuffer ( key ) ) key = String ( key )
if ( typeof this . _del == 'function' )
this . _del ( key )
else
this . _operations . push ( { type : 'del' , key : key } )
return this
}
AbstractChainedBatch . prototype . clear = function ( ) {
this . _checkWritten ( )
this . _operations = [ ]
if ( typeof this . _clear == 'function' )
this . _clear ( )
return this
}
AbstractChainedBatch . prototype . write = function ( options , callback ) {
this . _checkWritten ( )
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'write() requires a callback argument' )
if ( typeof options != 'object' )
options = { }
this . _written = true
if ( typeof this . _write == 'function' )
return this . _write ( callback )
if ( typeof this . _db . _batch == 'function' )
return this . _db . _batch ( this . _operations , options , callback )
process . nextTick ( callback )
}
module . exports = AbstractChainedBatch
} ) . call ( this , require ( '_process' ) )
} , { "_process" : 37 } ] , 2 : [ function ( require , module , exports ) {
( function ( process ) {
/* Copyright (c) 2013 Rod Vagg, MIT License */
function AbstractIterator ( db ) {
this . db = db
this . _ended = false
this . _nexting = false
}
AbstractIterator . prototype . next = function ( callback ) {
var self = this
if ( typeof callback != 'function' )
throw new Error ( 'next() requires a callback argument' )
if ( self . _ended )
return callback ( new Error ( 'cannot call next() after end()' ) )
if ( self . _nexting )
return callback ( new Error ( 'cannot call next() before previous next() has completed' ) )
self . _nexting = true
if ( typeof self . _next == 'function' ) {
return self . _next ( function ( ) {
self . _nexting = false
callback . apply ( null , arguments )
} )
}
process . nextTick ( function ( ) {
self . _nexting = false
callback ( )
} )
}
AbstractIterator . prototype . end = function ( callback ) {
if ( typeof callback != 'function' )
throw new Error ( 'end() requires a callback argument' )
if ( this . _ended )
return callback ( new Error ( 'end() already called on iterator' ) )
this . _ended = true
if ( typeof this . _end == 'function' )
return this . _end ( callback )
process . nextTick ( callback )
}
module . exports = AbstractIterator
} ) . call ( this , require ( '_process' ) )
} , { "_process" : 37 } ] , 3 : [ function ( require , module , exports ) {
( function ( Buffer , process ) {
/* Copyright (c) 2013 Rod Vagg, MIT License */
var xtend = require ( 'xtend' )
, AbstractIterator = require ( './abstract-iterator' )
, AbstractChainedBatch = require ( './abstract-chained-batch' )
function AbstractLevelDOWN ( location ) {
if ( ! arguments . length || location === undefined )
throw new Error ( 'constructor requires at least a location argument' )
if ( typeof location != 'string' )
throw new Error ( 'constructor requires a location string argument' )
this . location = location
}
AbstractLevelDOWN . prototype . open = function ( options , callback ) {
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'open() requires a callback argument' )
if ( typeof options != 'object' )
options = { }
if ( typeof this . _open == 'function' )
return this . _open ( options , callback )
process . nextTick ( callback )
}
AbstractLevelDOWN . prototype . close = function ( callback ) {
if ( typeof callback != 'function' )
throw new Error ( 'close() requires a callback argument' )
if ( typeof this . _close == 'function' )
return this . _close ( callback )
process . nextTick ( callback )
}
AbstractLevelDOWN . prototype . get = function ( key , options , callback ) {
var err
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'get() requires a callback argument' )
if ( err = this . _checkKeyValue ( key , 'key' , this . _isBuffer ) )
return callback ( err )
if ( ! this . _isBuffer ( key ) )
key = String ( key )
if ( typeof options != 'object' )
options = { }
if ( typeof this . _get == 'function' )
return this . _get ( key , options , callback )
process . nextTick ( function ( ) { callback ( new Error ( 'NotFound' ) ) } )
}
AbstractLevelDOWN . prototype . put = function ( key , value , options , callback ) {
var err
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'put() requires a callback argument' )
if ( err = this . _checkKeyValue ( key , 'key' , this . _isBuffer ) )
return callback ( err )
if ( err = this . _checkKeyValue ( value , 'value' , this . _isBuffer ) )
return callback ( err )
if ( ! this . _isBuffer ( key ) )
key = String ( key )
// coerce value to string in node, don't touch it in browser
// (indexeddb can store any JS type)
if ( ! this . _isBuffer ( value ) && ! process . browser )
value = String ( value )
if ( typeof options != 'object' )
options = { }
if ( typeof this . _put == 'function' )
return this . _put ( key , value , options , callback )
process . nextTick ( callback )
}
AbstractLevelDOWN . prototype . del = function ( key , options , callback ) {
var err
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'del() requires a callback argument' )
if ( err = this . _checkKeyValue ( key , 'key' , this . _isBuffer ) )
return callback ( err )
if ( ! this . _isBuffer ( key ) )
key = String ( key )
if ( typeof options != 'object' )
options = { }
if ( typeof this . _del == 'function' )
return this . _del ( key , options , callback )
process . nextTick ( callback )
}
AbstractLevelDOWN . prototype . batch = function ( array , options , callback ) {
if ( ! arguments . length )
return this . _chainedBatch ( )
if ( typeof options == 'function' )
callback = options
if ( typeof callback != 'function' )
throw new Error ( 'batch(array) requires a callback argument' )
if ( ! Array . isArray ( array ) )
return callback ( new Error ( 'batch(array) requires an array argument' ) )
if ( typeof options != 'object' )
options = { }
var i = 0
, l = array . length
, e
, err
for ( ; i < l ; i ++ ) {
e = array [ i ]
if ( typeof e != 'object' )
continue
if ( err = this . _checkKeyValue ( e . type , 'type' , this . _isBuffer ) )
return callback ( err )
if ( err = this . _checkKeyValue ( e . key , 'key' , this . _isBuffer ) )
return callback ( err )
if ( e . type == 'put' ) {
if ( err = this . _checkKeyValue ( e . value , 'value' , this . _isBuffer ) )
return callback ( err )
}
}
if ( typeof this . _batch == 'function' )
return this . _batch ( array , options , callback )
process . nextTick ( callback )
}
//TODO: remove from here, not a necessary primitive
AbstractLevelDOWN . prototype . approximateSize = function ( start , end , callback ) {
if ( start == null
|| end == null
|| typeof start == 'function'
|| typeof end == 'function' ) {
throw new Error ( 'approximateSize() requires valid `start`, `end` and `callback` arguments' )
}
if ( typeof callback != 'function' )
throw new Error ( 'approximateSize() requires a callback argument' )
if ( ! this . _isBuffer ( start ) )
start = String ( start )
if ( ! this . _isBuffer ( end ) )
end = String ( end )
if ( typeof this . _approximateSize == 'function' )
return this . _approximateSize ( start , end , callback )
process . nextTick ( function ( ) {
callback ( null , 0 )
} )
}
AbstractLevelDOWN . prototype . _setupIteratorOptions = function ( options ) {
var self = this
options = xtend ( options )
; [ 'start' , 'end' , 'gt' , 'gte' , 'lt' , 'lte' ] . forEach ( function ( o ) {
if ( options [ o ] && self . _isBuffer ( options [ o ] ) && options [ o ] . length === 0 )
delete options [ o ]
} )
options . reverse = ! ! options . reverse
// fix `start` so it takes into account gt, gte, lt, lte as appropriate
if ( options . reverse && options . lt )
options . start = options . lt
if ( options . reverse && options . lte )
options . start = options . lte
if ( ! options . reverse && options . gt )
options . start = options . gt
if ( ! options . reverse && options . gte )
options . start = options . gte
if ( ( options . reverse && options . lt && ! options . lte )
|| ( ! options . reverse && options . gt && ! options . gte ) )
options . exclusiveStart = true // start should *not* include matching key
return options
}
AbstractLevelDOWN . prototype . iterator = function ( options ) {
if ( typeof options != 'object' )
options = { }
options = this . _setupIteratorOptions ( options )
if ( typeof this . _iterator == 'function' )
return this . _iterator ( options )
return new AbstractIterator ( this )
}
AbstractLevelDOWN . prototype . _chainedBatch = function ( ) {
return new AbstractChainedBatch ( this )
}
AbstractLevelDOWN . prototype . _isBuffer = function ( obj ) {
return Buffer . isBuffer ( obj )
}
AbstractLevelDOWN . prototype . _checkKeyValue = function ( obj , type ) {
if ( obj === null || obj === undefined )
return new Error ( type + ' cannot be `null` or `undefined`' )
if ( this . _isBuffer ( obj ) ) {
if ( obj . length === 0 )
return new Error ( type + ' cannot be an empty Buffer' )
} else if ( String ( obj ) === '' )
return new Error ( type + ' cannot be an empty String' )
}
module . exports . AbstractLevelDOWN = AbstractLevelDOWN
module . exports . AbstractIterator = AbstractIterator
module . exports . AbstractChainedBatch = AbstractChainedBatch
} ) . call ( this , { "isBuffer" : require ( "../is-buffer/index.js" ) } , require ( '_process' ) )
} , { "../is-buffer/index.js" : 9 , "./abstract-chained-batch" : 1 , "./abstract-iterator" : 2 , "_process" : 37 , "xtend" : 4 } ] , 4 : [ function ( require , module , exports ) {
module . exports = extend
function extend ( ) {
var target = { }
for ( var i = 0 ; i < arguments . length ; i ++ ) {
var source = arguments [ i ]
for ( var key in source ) {
if ( source . hasOwnProperty ( key ) ) {
target [ key ] = source [ key ]
}
}
}
return target
}
} , { } ] , 5 : [ function ( require , module , exports ) {
'use strict'
exports . byteLength = byteLength
exports . toByteArray = toByteArray
exports . fromByteArray = fromByteArray
var lookup = [ ]
var revLookup = [ ]
var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array
var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
for ( var i = 0 , len = code . length ; i < len ; ++ i ) {
lookup [ i ] = code [ i ]
revLookup [ code . charCodeAt ( i ) ] = i
}
revLookup [ '-' . charCodeAt ( 0 ) ] = 62
revLookup [ '_' . charCodeAt ( 0 ) ] = 63
function placeHoldersCount ( b64 ) {
var len = b64 . length
if ( len % 4 > 0 ) {
throw new Error ( 'Invalid string. Length must be a multiple of 4' )
}
// the number of equal signs (place holders)
// if there are two placeholders, than the two characters before it
// represent one byte
// if there is only one, then the three characters before it represent 2 bytes
// this is just a cheap hack to not do indexOf twice
return b64 [ len - 2 ] === '=' ? 2 : b64 [ len - 1 ] === '=' ? 1 : 0
}
function byteLength ( b64 ) {
// base64 is 4/3 + up to two characters of the original data
return b64 . length * 3 / 4 - placeHoldersCount ( b64 )
}
function toByteArray ( b64 ) {
var i , j , l , tmp , placeHolders , arr
var len = b64 . length
placeHolders = placeHoldersCount ( b64 )
arr = new Arr ( len * 3 / 4 - placeHolders )
// if there are placeholders, only get up to the last complete 4 chars
l = placeHolders > 0 ? len - 4 : len
var L = 0
for ( i = 0 , j = 0 ; i < l ; i += 4 , j += 3 ) {
tmp = ( revLookup [ b64 . charCodeAt ( i ) ] << 18 ) | ( revLookup [ b64 . charCodeAt ( i + 1 ) ] << 12 ) | ( revLookup [ b64 . charCodeAt ( i + 2 ) ] << 6 ) | revLookup [ b64 . charCodeAt ( i + 3 ) ]
arr [ L ++ ] = ( tmp >> 16 ) & 0xFF
arr [ L ++ ] = ( tmp >> 8 ) & 0xFF
arr [ L ++ ] = tmp & 0xFF
}
if ( placeHolders === 2 ) {
tmp = ( revLookup [ b64 . charCodeAt ( i ) ] << 2 ) | ( revLookup [ b64 . charCodeAt ( i + 1 ) ] >> 4 )
arr [ L ++ ] = tmp & 0xFF
} else if ( placeHolders === 1 ) {
tmp = ( revLookup [ b64 . charCodeAt ( i ) ] << 10 ) | ( revLookup [ b64 . charCodeAt ( i + 1 ) ] << 4 ) | ( revLookup [ b64 . charCodeAt ( i + 2 ) ] >> 2 )
arr [ L ++ ] = ( tmp >> 8 ) & 0xFF
arr [ L ++ ] = tmp & 0xFF
}
return arr
}
function tripletToBase64 ( num ) {
return lookup [ num >> 18 & 0x3F ] + lookup [ num >> 12 & 0x3F ] + lookup [ num >> 6 & 0x3F ] + lookup [ num & 0x3F ]
}
function encodeChunk ( uint8 , start , end ) {
var tmp
var output = [ ]
for ( var i = start ; i < end ; i += 3 ) {
tmp = ( uint8 [ i ] << 16 ) + ( uint8 [ i + 1 ] << 8 ) + ( uint8 [ i + 2 ] )
output . push ( tripletToBase64 ( tmp ) )
}
return output . join ( '' )
}
function fromByteArray ( uint8 ) {
var tmp
var len = uint8 . length
var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes
var output = ''
var parts = [ ]
var maxChunkLength = 16383 // must be multiple of 3
// go through the array every three bytes, we'll deal with trailing stuff later
for ( var i = 0 , len2 = len - extraBytes ; i < len2 ; i += maxChunkLength ) {
parts . push ( encodeChunk ( uint8 , i , ( i + maxChunkLength ) > len2 ? len2 : ( i + maxChunkLength ) ) )
}
// pad the end with zeros, but make sure to not forget the extra bytes
if ( extraBytes === 1 ) {
tmp = uint8 [ len - 1 ]
output += lookup [ tmp >> 2 ]
output += lookup [ ( tmp << 4 ) & 0x3F ]
output += '=='
} else if ( extraBytes === 2 ) {
tmp = ( uint8 [ len - 2 ] << 8 ) + ( uint8 [ len - 1 ] )
output += lookup [ tmp >> 10 ]
output += lookup [ ( tmp >> 4 ) & 0x3F ]
output += lookup [ ( tmp << 2 ) & 0x3F ]
output += '='
}
parts . push ( output )
return parts . join ( '' )
}
} , { } ] , 6 : [ function ( require , module , exports ) {
( function ( global ) {
/ * !
* The buffer module from node . js , for the browser .
*
* @ author Feross Aboukhadijeh < feross @ feross . org > < http : //feross.org>
* @ license MIT
* /
/* eslint-disable no-proto */
'use strict'
var base64 = require ( 'base64-js' )
var ieee754 = require ( 'ieee754' )
var isArray = require ( 'isarray' )
exports . Buffer = Buffer
exports . SlowBuffer = SlowBuffer
exports . INSPECT _MAX _BYTES = 50
/ * *
* If ` Buffer.TYPED_ARRAY_SUPPORT ` :
* === true Use Uint8Array implementation ( fastest )
* === false Use Object implementation ( most compatible , even IE6 )
*
* Browsers that support typed arrays are IE 10 + , Firefox 4 + , Chrome 7 + , Safari 5.1 + ,
* Opera 11.6 + , iOS 4.2 + .
*
* Due to various browser bugs , sometimes the Object implementation will be used even
* when the browser supports typed arrays .
*
* Note :
*
* - Firefox 4 - 29 lacks support for adding new properties to ` Uint8Array ` instances ,
* See : https : //bugzilla.mozilla.org/show_bug.cgi?id=695438.
*
* - Chrome 9 - 10 is missing the ` TypedArray.prototype.subarray ` function .
*
* - IE10 has a broken ` TypedArray.prototype.subarray ` function which returns arrays of
* incorrect length in some situations .
* We detect these buggy browsers and set ` Buffer.TYPED_ARRAY_SUPPORT ` to ` false ` so they
* get the Object implementation , which is slower but behaves correctly .
* /
Buffer . TYPED _ARRAY _SUPPORT = global . TYPED _ARRAY _SUPPORT !== undefined
? global . TYPED _ARRAY _SUPPORT
: typedArraySupport ( )
/ *
* Export kMaxLength after typed array support is determined .
* /
exports . kMaxLength = kMaxLength ( )
function typedArraySupport ( ) {
try {
var arr = new Uint8Array ( 1 )
arr . _ _proto _ _ = { _ _proto _ _ : Uint8Array . prototype , foo : function ( ) { return 42 } }
return arr . foo ( ) === 42 && // typed array instances can be augmented
typeof arr . subarray === 'function' && // chrome 9-10 lack `subarray`
arr . subarray ( 1 , 1 ) . byteLength === 0 // ie10 has broken `subarray`
} catch ( e ) {
return false
}
}
function kMaxLength ( ) {
return Buffer . TYPED _ARRAY _SUPPORT
? 0x7fffffff
: 0x3fffffff
}
function createBuffer ( that , length ) {
if ( kMaxLength ( ) < length ) {
throw new RangeError ( 'Invalid typed array length' )
}
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
// Return an augmented `Uint8Array` instance, for best performance
that = new Uint8Array ( length )
that . _ _proto _ _ = Buffer . prototype
} else {
// Fallback: Return an object instance of the Buffer class
if ( that === null ) {
that = new Buffer ( length )
}
that . length = length
}
return that
}
/ * *
* The Buffer constructor returns instances of ` Uint8Array ` that have their
* prototype changed to ` Buffer.prototype ` . Furthermore , ` Buffer ` is a subclass of
* ` Uint8Array ` , so the returned instances will have all the node ` Buffer ` methods
* and the ` Uint8Array ` methods . Square bracket notation works as expected -- it
* returns a single octet .
*
* The ` Uint8Array ` prototype remains unmodified .
* /
function Buffer ( arg , encodingOrOffset , length ) {
if ( ! Buffer . TYPED _ARRAY _SUPPORT && ! ( this instanceof Buffer ) ) {
return new Buffer ( arg , encodingOrOffset , length )
}
// Common case.
if ( typeof arg === 'number' ) {
if ( typeof encodingOrOffset === 'string' ) {
throw new Error (
'If encoding is specified then the first argument must be a string'
)
}
return allocUnsafe ( this , arg )
}
return from ( this , arg , encodingOrOffset , length )
}
Buffer . poolSize = 8192 // not used by this implementation
// TODO: Legacy, not needed anymore. Remove in next major version.
Buffer . _augment = function ( arr ) {
arr . _ _proto _ _ = Buffer . prototype
return arr
}
function from ( that , value , encodingOrOffset , length ) {
if ( typeof value === 'number' ) {
throw new TypeError ( '"value" argument must not be a number' )
}
if ( typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer ) {
return fromArrayBuffer ( that , value , encodingOrOffset , length )
}
if ( typeof value === 'string' ) {
return fromString ( that , value , encodingOrOffset )
}
return fromObject ( that , value )
}
/ * *
* Functionally equivalent to Buffer ( arg , encoding ) but throws a TypeError
* if value is a number .
* Buffer . from ( str [ , encoding ] )
* Buffer . from ( array )
* Buffer . from ( buffer )
* Buffer . from ( arrayBuffer [ , byteOffset [ , length ] ] )
* * /
Buffer . from = function ( value , encodingOrOffset , length ) {
return from ( null , value , encodingOrOffset , length )
}
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
Buffer . prototype . _ _proto _ _ = Uint8Array . prototype
Buffer . _ _proto _ _ = Uint8Array
if ( typeof Symbol !== 'undefined' && Symbol . species &&
Buffer [ Symbol . species ] === Buffer ) {
// Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97
Object . defineProperty ( Buffer , Symbol . species , {
value : null ,
configurable : true
} )
}
}
function assertSize ( size ) {
if ( typeof size !== 'number' ) {
throw new TypeError ( '"size" argument must be a number' )
} else if ( size < 0 ) {
throw new RangeError ( '"size" argument must not be negative' )
}
}
function alloc ( that , size , fill , encoding ) {
assertSize ( size )
if ( size <= 0 ) {
return createBuffer ( that , size )
}
if ( fill !== undefined ) {
// Only pay attention to encoding if it's a string. This
// prevents accidentally sending in a number that would
// be interpretted as a start offset.
return typeof encoding === 'string'
? createBuffer ( that , size ) . fill ( fill , encoding )
: createBuffer ( that , size ) . fill ( fill )
}
return createBuffer ( that , size )
}
/ * *
* Creates a new filled Buffer instance .
* alloc ( size [ , fill [ , encoding ] ] )
* * /
Buffer . alloc = function ( size , fill , encoding ) {
return alloc ( null , size , fill , encoding )
}
function allocUnsafe ( that , size ) {
assertSize ( size )
that = createBuffer ( that , size < 0 ? 0 : checked ( size ) | 0 )
if ( ! Buffer . TYPED _ARRAY _SUPPORT ) {
for ( var i = 0 ; i < size ; ++ i ) {
that [ i ] = 0
}
}
return that
}
/ * *
* Equivalent to Buffer ( num ) , by default creates a non - zero - filled Buffer instance .
* * /
Buffer . allocUnsafe = function ( size ) {
return allocUnsafe ( null , size )
}
/ * *
* Equivalent to SlowBuffer ( num ) , by default creates a non - zero - filled Buffer instance .
* /
Buffer . allocUnsafeSlow = function ( size ) {
return allocUnsafe ( null , size )
}
function fromString ( that , string , encoding ) {
if ( typeof encoding !== 'string' || encoding === '' ) {
encoding = 'utf8'
}
if ( ! Buffer . isEncoding ( encoding ) ) {
throw new TypeError ( '"encoding" must be a valid string encoding' )
}
var length = byteLength ( string , encoding ) | 0
that = createBuffer ( that , length )
var actual = that . write ( string , encoding )
if ( actual !== length ) {
// Writing a hex string, for example, that contains invalid characters will
// cause everything after the first invalid character to be ignored. (e.g.
// 'abxxcd' will be treated as 'ab')
that = that . slice ( 0 , actual )
}
return that
}
function fromArrayLike ( that , array ) {
var length = array . length < 0 ? 0 : checked ( array . length ) | 0
that = createBuffer ( that , length )
for ( var i = 0 ; i < length ; i += 1 ) {
that [ i ] = array [ i ] & 255
}
return that
}
function fromArrayBuffer ( that , array , byteOffset , length ) {
array . byteLength // this throws if `array` is not a valid ArrayBuffer
if ( byteOffset < 0 || array . byteLength < byteOffset ) {
throw new RangeError ( '\'offset\' is out of bounds' )
}
if ( array . byteLength < byteOffset + ( length || 0 ) ) {
throw new RangeError ( '\'length\' is out of bounds' )
}
if ( byteOffset === undefined && length === undefined ) {
array = new Uint8Array ( array )
} else if ( length === undefined ) {
array = new Uint8Array ( array , byteOffset )
} else {
array = new Uint8Array ( array , byteOffset , length )
}
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
// Return an augmented `Uint8Array` instance, for best performance
that = array
that . _ _proto _ _ = Buffer . prototype
} else {
// Fallback: Return an object instance of the Buffer class
that = fromArrayLike ( that , array )
}
return that
}
function fromObject ( that , obj ) {
if ( Buffer . isBuffer ( obj ) ) {
var len = checked ( obj . length ) | 0
that = createBuffer ( that , len )
if ( that . length === 0 ) {
return that
}
obj . copy ( that , 0 , 0 , len )
return that
}
if ( obj ) {
if ( ( typeof ArrayBuffer !== 'undefined' &&
obj . buffer instanceof ArrayBuffer ) || 'length' in obj ) {
if ( typeof obj . length !== 'number' || isnan ( obj . length ) ) {
return createBuffer ( that , 0 )
}
return fromArrayLike ( that , obj )
}
if ( obj . type === 'Buffer' && isArray ( obj . data ) ) {
return fromArrayLike ( that , obj . data )
}
}
throw new TypeError ( 'First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.' )
}
function checked ( length ) {
// Note: cannot use `length < kMaxLength()` here because that fails when
// length is NaN (which is otherwise coerced to zero.)
if ( length >= kMaxLength ( ) ) {
throw new RangeError ( 'Attempt to allocate Buffer larger than maximum ' +
'size: 0x' + kMaxLength ( ) . toString ( 16 ) + ' bytes' )
}
return length | 0
}
function SlowBuffer ( length ) {
if ( + length != length ) { // eslint-disable-line eqeqeq
length = 0
}
return Buffer . alloc ( + length )
}
Buffer . isBuffer = function isBuffer ( b ) {
return ! ! ( b != null && b . _isBuffer )
}
Buffer . compare = function compare ( a , b ) {
if ( ! Buffer . isBuffer ( a ) || ! Buffer . isBuffer ( b ) ) {
throw new TypeError ( 'Arguments must be Buffers' )
}
if ( a === b ) return 0
var x = a . length
var y = b . length
for ( var i = 0 , len = Math . min ( x , y ) ; i < len ; ++ i ) {
if ( a [ i ] !== b [ i ] ) {
x = a [ i ]
y = b [ i ]
break
}
}
if ( x < y ) return - 1
if ( y < x ) return 1
return 0
}
Buffer . isEncoding = function isEncoding ( encoding ) {
switch ( String ( encoding ) . toLowerCase ( ) ) {
case 'hex' :
case 'utf8' :
case 'utf-8' :
case 'ascii' :
case 'latin1' :
case 'binary' :
case 'base64' :
case 'ucs2' :
case 'ucs-2' :
case 'utf16le' :
case 'utf-16le' :
return true
default :
return false
}
}
Buffer . concat = function concat ( list , length ) {
if ( ! isArray ( list ) ) {
throw new TypeError ( '"list" argument must be an Array of Buffers' )
}
if ( list . length === 0 ) {
return Buffer . alloc ( 0 )
}
var i
if ( length === undefined ) {
length = 0
for ( i = 0 ; i < list . length ; ++ i ) {
length += list [ i ] . length
}
}
var buffer = Buffer . allocUnsafe ( length )
var pos = 0
for ( i = 0 ; i < list . length ; ++ i ) {
var buf = list [ i ]
if ( ! Buffer . isBuffer ( buf ) ) {
throw new TypeError ( '"list" argument must be an Array of Buffers' )
}
buf . copy ( buffer , pos )
pos += buf . length
}
return buffer
}
function byteLength ( string , encoding ) {
if ( Buffer . isBuffer ( string ) ) {
return string . length
}
if ( typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer . isView === 'function' &&
( ArrayBuffer . isView ( string ) || string instanceof ArrayBuffer ) ) {
return string . byteLength
}
if ( typeof string !== 'string' ) {
string = '' + string
}
var len = string . length
if ( len === 0 ) return 0
// Use a for loop to avoid recursion
var loweredCase = false
for ( ; ; ) {
switch ( encoding ) {
case 'ascii' :
case 'latin1' :
case 'binary' :
return len
case 'utf8' :
case 'utf-8' :
case undefined :
return utf8ToBytes ( string ) . length
case 'ucs2' :
case 'ucs-2' :
case 'utf16le' :
case 'utf-16le' :
return len * 2
case 'hex' :
return len >>> 1
case 'base64' :
return base64ToBytes ( string ) . length
default :
if ( loweredCase ) return utf8ToBytes ( string ) . length // assume utf8
encoding = ( '' + encoding ) . toLowerCase ( )
loweredCase = true
}
}
}
Buffer . byteLength = byteLength
function slowToString ( encoding , start , end ) {
var loweredCase = false
// No need to verify that "this.length <= MAX_UINT32" since it's a read-only
// property of a typed array.
// This behaves neither like String nor Uint8Array in that we set start/end
// to their upper/lower bounds if the value passed is out of range.
// undefined is handled specially as per ECMA-262 6th Edition,
// Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
if ( start === undefined || start < 0 ) {
start = 0
}
// Return early if start > this.length. Done here to prevent potential uint32
// coercion fail below.
if ( start > this . length ) {
return ''
}
if ( end === undefined || end > this . length ) {
end = this . length
}
if ( end <= 0 ) {
return ''
}
// Force coersion to uint32. This will also coerce falsey/NaN values to 0.
end >>>= 0
start >>>= 0
if ( end <= start ) {
return ''
}
if ( ! encoding ) encoding = 'utf8'
while ( true ) {
switch ( encoding ) {
case 'hex' :
return hexSlice ( this , start , end )
case 'utf8' :
case 'utf-8' :
return utf8Slice ( this , start , end )
case 'ascii' :
return asciiSlice ( this , start , end )
case 'latin1' :
case 'binary' :
return latin1Slice ( this , start , end )
case 'base64' :
return base64Slice ( this , start , end )
case 'ucs2' :
case 'ucs-2' :
case 'utf16le' :
case 'utf-16le' :
return utf16leSlice ( this , start , end )
default :
if ( loweredCase ) throw new TypeError ( 'Unknown encoding: ' + encoding )
encoding = ( encoding + '' ) . toLowerCase ( )
loweredCase = true
}
}
}
// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect
// Buffer instances.
Buffer . prototype . _isBuffer = true
function swap ( b , n , m ) {
var i = b [ n ]
b [ n ] = b [ m ]
b [ m ] = i
}
Buffer . prototype . swap16 = function swap16 ( ) {
var len = this . length
if ( len % 2 !== 0 ) {
throw new RangeError ( 'Buffer size must be a multiple of 16-bits' )
}
for ( var i = 0 ; i < len ; i += 2 ) {
swap ( this , i , i + 1 )
}
return this
}
Buffer . prototype . swap32 = function swap32 ( ) {
var len = this . length
if ( len % 4 !== 0 ) {
throw new RangeError ( 'Buffer size must be a multiple of 32-bits' )
}
for ( var i = 0 ; i < len ; i += 4 ) {
swap ( this , i , i + 3 )
swap ( this , i + 1 , i + 2 )
}
return this
}
Buffer . prototype . swap64 = function swap64 ( ) {
var len = this . length
if ( len % 8 !== 0 ) {
throw new RangeError ( 'Buffer size must be a multiple of 64-bits' )
}
for ( var i = 0 ; i < len ; i += 8 ) {
swap ( this , i , i + 7 )
swap ( this , i + 1 , i + 6 )
swap ( this , i + 2 , i + 5 )
swap ( this , i + 3 , i + 4 )
}
return this
}
Buffer . prototype . toString = function toString ( ) {
var length = this . length | 0
if ( length === 0 ) return ''
if ( arguments . length === 0 ) return utf8Slice ( this , 0 , length )
return slowToString . apply ( this , arguments )
}
Buffer . prototype . equals = function equals ( b ) {
if ( ! Buffer . isBuffer ( b ) ) throw new TypeError ( 'Argument must be a Buffer' )
if ( this === b ) return true
return Buffer . compare ( this , b ) === 0
}
Buffer . prototype . inspect = function inspect ( ) {
var str = ''
var max = exports . INSPECT _MAX _BYTES
if ( this . length > 0 ) {
str = this . toString ( 'hex' , 0 , max ) . match ( /.{2}/g ) . join ( ' ' )
if ( this . length > max ) str += ' ... '
}
return '<Buffer ' + str + '>'
}
Buffer . prototype . compare = function compare ( target , start , end , thisStart , thisEnd ) {
if ( ! Buffer . isBuffer ( target ) ) {
throw new TypeError ( 'Argument must be a Buffer' )
}
if ( start === undefined ) {
start = 0
}
if ( end === undefined ) {
end = target ? target . length : 0
}
if ( thisStart === undefined ) {
thisStart = 0
}
if ( thisEnd === undefined ) {
thisEnd = this . length
}
if ( start < 0 || end > target . length || thisStart < 0 || thisEnd > this . length ) {
throw new RangeError ( 'out of range index' )
}
if ( thisStart >= thisEnd && start >= end ) {
return 0
}
if ( thisStart >= thisEnd ) {
return - 1
}
if ( start >= end ) {
return 1
}
start >>>= 0
end >>>= 0
thisStart >>>= 0
thisEnd >>>= 0
if ( this === target ) return 0
var x = thisEnd - thisStart
var y = end - start
var len = Math . min ( x , y )
var thisCopy = this . slice ( thisStart , thisEnd )
var targetCopy = target . slice ( start , end )
for ( var i = 0 ; i < len ; ++ i ) {
if ( thisCopy [ i ] !== targetCopy [ i ] ) {
x = thisCopy [ i ]
y = targetCopy [ i ]
break
}
}
if ( x < y ) return - 1
if ( y < x ) return 1
return 0
}
// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,
// OR the last index of `val` in `buffer` at offset <= `byteOffset`.
//
// Arguments:
// - buffer - a Buffer to search
// - val - a string, Buffer, or number
// - byteOffset - an index into `buffer`; will be clamped to an int32
// - encoding - an optional encoding, relevant is val is a string
// - dir - true for indexOf, false for lastIndexOf
function bidirectionalIndexOf ( buffer , val , byteOffset , encoding , dir ) {
// Empty buffer means no match
if ( buffer . length === 0 ) return - 1
// Normalize byteOffset
if ( typeof byteOffset === 'string' ) {
encoding = byteOffset
byteOffset = 0
} else if ( byteOffset > 0x7fffffff ) {
byteOffset = 0x7fffffff
} else if ( byteOffset < - 0x80000000 ) {
byteOffset = - 0x80000000
}
byteOffset = + byteOffset // Coerce to Number.
if ( isNaN ( byteOffset ) ) {
// byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer
byteOffset = dir ? 0 : ( buffer . length - 1 )
}
// Normalize byteOffset: negative offsets start from the end of the buffer
if ( byteOffset < 0 ) byteOffset = buffer . length + byteOffset
if ( byteOffset >= buffer . length ) {
if ( dir ) return - 1
else byteOffset = buffer . length - 1
} else if ( byteOffset < 0 ) {
if ( dir ) byteOffset = 0
else return - 1
}
// Normalize val
if ( typeof val === 'string' ) {
val = Buffer . from ( val , encoding )
}
// Finally, search either indexOf (if dir is true) or lastIndexOf
if ( Buffer . isBuffer ( val ) ) {
// Special case: looking for empty string/buffer always fails
if ( val . length === 0 ) {
return - 1
}
return arrayIndexOf ( buffer , val , byteOffset , encoding , dir )
} else if ( typeof val === 'number' ) {
val = val & 0xFF // Search for a byte value [0-255]
if ( Buffer . TYPED _ARRAY _SUPPORT &&
typeof Uint8Array . prototype . indexOf === 'function' ) {
if ( dir ) {
return Uint8Array . prototype . indexOf . call ( buffer , val , byteOffset )
} else {
return Uint8Array . prototype . lastIndexOf . call ( buffer , val , byteOffset )
}
}
return arrayIndexOf ( buffer , [ val ] , byteOffset , encoding , dir )
}
throw new TypeError ( 'val must be string, number or Buffer' )
}
function arrayIndexOf ( arr , val , byteOffset , encoding , dir ) {
var indexSize = 1
var arrLength = arr . length
var valLength = val . length
if ( encoding !== undefined ) {
encoding = String ( encoding ) . toLowerCase ( )
if ( encoding === 'ucs2' || encoding === 'ucs-2' ||
encoding === 'utf16le' || encoding === 'utf-16le' ) {
if ( arr . length < 2 || val . length < 2 ) {
return - 1
}
indexSize = 2
arrLength /= 2
valLength /= 2
byteOffset /= 2
}
}
function read ( buf , i ) {
if ( indexSize === 1 ) {
return buf [ i ]
} else {
return buf . readUInt16BE ( i * indexSize )
}
}
var i
if ( dir ) {
var foundIndex = - 1
for ( i = byteOffset ; i < arrLength ; i ++ ) {
if ( read ( arr , i ) === read ( val , foundIndex === - 1 ? 0 : i - foundIndex ) ) {
if ( foundIndex === - 1 ) foundIndex = i
if ( i - foundIndex + 1 === valLength ) return foundIndex * indexSize
} else {
if ( foundIndex !== - 1 ) i -= i - foundIndex
foundIndex = - 1
}
}
} else {
if ( byteOffset + valLength > arrLength ) byteOffset = arrLength - valLength
for ( i = byteOffset ; i >= 0 ; i -- ) {
var found = true
for ( var j = 0 ; j < valLength ; j ++ ) {
if ( read ( arr , i + j ) !== read ( val , j ) ) {
found = false
break
}
}
if ( found ) return i
}
}
return - 1
}
Buffer . prototype . includes = function includes ( val , byteOffset , encoding ) {
return this . indexOf ( val , byteOffset , encoding ) !== - 1
}
Buffer . prototype . indexOf = function indexOf ( val , byteOffset , encoding ) {
return bidirectionalIndexOf ( this , val , byteOffset , encoding , true )
}
Buffer . prototype . lastIndexOf = function lastIndexOf ( val , byteOffset , encoding ) {
return bidirectionalIndexOf ( this , val , byteOffset , encoding , false )
}
function hexWrite ( buf , string , offset , length ) {
offset = Number ( offset ) || 0
var remaining = buf . length - offset
if ( ! length ) {
length = remaining
} else {
length = Number ( length )
if ( length > remaining ) {
length = remaining
}
}
// must be an even number of digits
var strLen = string . length
if ( strLen % 2 !== 0 ) throw new TypeError ( 'Invalid hex string' )
if ( length > strLen / 2 ) {
length = strLen / 2
}
for ( var i = 0 ; i < length ; ++ i ) {
var parsed = parseInt ( string . substr ( i * 2 , 2 ) , 16 )
if ( isNaN ( parsed ) ) return i
buf [ offset + i ] = parsed
}
return i
}
function utf8Write ( buf , string , offset , length ) {
return blitBuffer ( utf8ToBytes ( string , buf . length - offset ) , buf , offset , length )
}
function asciiWrite ( buf , string , offset , length ) {
return blitBuffer ( asciiToBytes ( string ) , buf , offset , length )
}
function latin1Write ( buf , string , offset , length ) {
return asciiWrite ( buf , string , offset , length )
}
function base64Write ( buf , string , offset , length ) {
return blitBuffer ( base64ToBytes ( string ) , buf , offset , length )
}
function ucs2Write ( buf , string , offset , length ) {
return blitBuffer ( utf16leToBytes ( string , buf . length - offset ) , buf , offset , length )
}
Buffer . prototype . write = function write ( string , offset , length , encoding ) {
// Buffer#write(string)
if ( offset === undefined ) {
encoding = 'utf8'
length = this . length
offset = 0
// Buffer#write(string, encoding)
} else if ( length === undefined && typeof offset === 'string' ) {
encoding = offset
length = this . length
offset = 0
// Buffer#write(string, offset[, length][, encoding])
} else if ( isFinite ( offset ) ) {
offset = offset | 0
if ( isFinite ( length ) ) {
length = length | 0
if ( encoding === undefined ) encoding = 'utf8'
} else {
encoding = length
length = undefined
}
// legacy write(string, encoding, offset, length) - remove in v0.13
} else {
throw new Error (
'Buffer.write(string, encoding, offset[, length]) is no longer supported'
)
}
var remaining = this . length - offset
if ( length === undefined || length > remaining ) length = remaining
if ( ( string . length > 0 && ( length < 0 || offset < 0 ) ) || offset > this . length ) {
throw new RangeError ( 'Attempt to write outside buffer bounds' )
}
if ( ! encoding ) encoding = 'utf8'
var loweredCase = false
for ( ; ; ) {
switch ( encoding ) {
case 'hex' :
return hexWrite ( this , string , offset , length )
case 'utf8' :
case 'utf-8' :
return utf8Write ( this , string , offset , length )
case 'ascii' :
return asciiWrite ( this , string , offset , length )
case 'latin1' :
case 'binary' :
return latin1Write ( this , string , offset , length )
case 'base64' :
// Warning: maxLength not taken into account in base64Write
return base64Write ( this , string , offset , length )
case 'ucs2' :
case 'ucs-2' :
case 'utf16le' :
case 'utf-16le' :
return ucs2Write ( this , string , offset , length )
default :
if ( loweredCase ) throw new TypeError ( 'Unknown encoding: ' + encoding )
encoding = ( '' + encoding ) . toLowerCase ( )
loweredCase = true
}
}
}
Buffer . prototype . toJSON = function toJSON ( ) {
return {
type : 'Buffer' ,
data : Array . prototype . slice . call ( this . _arr || this , 0 )
}
}
function base64Slice ( buf , start , end ) {
if ( start === 0 && end === buf . length ) {
return base64 . fromByteArray ( buf )
} else {
return base64 . fromByteArray ( buf . slice ( start , end ) )
}
}
function utf8Slice ( buf , start , end ) {
end = Math . min ( buf . length , end )
var res = [ ]
var i = start
while ( i < end ) {
var firstByte = buf [ i ]
var codePoint = null
var bytesPerSequence = ( firstByte > 0xEF ) ? 4
: ( firstByte > 0xDF ) ? 3
: ( firstByte > 0xBF ) ? 2
: 1
if ( i + bytesPerSequence <= end ) {
var secondByte , thirdByte , fourthByte , tempCodePoint
switch ( bytesPerSequence ) {
case 1 :
if ( firstByte < 0x80 ) {
codePoint = firstByte
}
break
case 2 :
secondByte = buf [ i + 1 ]
if ( ( secondByte & 0xC0 ) === 0x80 ) {
tempCodePoint = ( firstByte & 0x1F ) << 0x6 | ( secondByte & 0x3F )
if ( tempCodePoint > 0x7F ) {
codePoint = tempCodePoint
}
}
break
case 3 :
secondByte = buf [ i + 1 ]
thirdByte = buf [ i + 2 ]
if ( ( secondByte & 0xC0 ) === 0x80 && ( thirdByte & 0xC0 ) === 0x80 ) {
tempCodePoint = ( firstByte & 0xF ) << 0xC | ( secondByte & 0x3F ) << 0x6 | ( thirdByte & 0x3F )
if ( tempCodePoint > 0x7FF && ( tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF ) ) {
codePoint = tempCodePoint
}
}
break
case 4 :
secondByte = buf [ i + 1 ]
thirdByte = buf [ i + 2 ]
fourthByte = buf [ i + 3 ]
if ( ( secondByte & 0xC0 ) === 0x80 && ( thirdByte & 0xC0 ) === 0x80 && ( fourthByte & 0xC0 ) === 0x80 ) {
tempCodePoint = ( firstByte & 0xF ) << 0x12 | ( secondByte & 0x3F ) << 0xC | ( thirdByte & 0x3F ) << 0x6 | ( fourthByte & 0x3F )
if ( tempCodePoint > 0xFFFF && tempCodePoint < 0x110000 ) {
codePoint = tempCodePoint
}
}
}
}
if ( codePoint === null ) {
// we did not generate a valid codePoint so insert a
// replacement char (U+FFFD) and advance only 1 byte
codePoint = 0xFFFD
bytesPerSequence = 1
} else if ( codePoint > 0xFFFF ) {
// encode to utf16 (surrogate pair dance)
codePoint -= 0x10000
res . push ( codePoint >>> 10 & 0x3FF | 0xD800 )
codePoint = 0xDC00 | codePoint & 0x3FF
}
res . push ( codePoint )
i += bytesPerSequence
}
return decodeCodePointsArray ( res )
}
// Based on http://stackoverflow.com/a/22747272/680742, the browser with
// the lowest limit is Chrome, with 0x10000 args.
// We go 1 magnitude less, for safety
var MAX _ARGUMENTS _LENGTH = 0x1000
function decodeCodePointsArray ( codePoints ) {
var len = codePoints . length
if ( len <= MAX _ARGUMENTS _LENGTH ) {
return String . fromCharCode . apply ( String , codePoints ) // avoid extra slice()
}
// Decode in chunks to avoid "call stack size exceeded".
var res = ''
var i = 0
while ( i < len ) {
res += String . fromCharCode . apply (
String ,
codePoints . slice ( i , i += MAX _ARGUMENTS _LENGTH )
)
}
return res
}
function asciiSlice ( buf , start , end ) {
var ret = ''
end = Math . min ( buf . length , end )
for ( var i = start ; i < end ; ++ i ) {
ret += String . fromCharCode ( buf [ i ] & 0x7F )
}
return ret
}
function latin1Slice ( buf , start , end ) {
var ret = ''
end = Math . min ( buf . length , end )
for ( var i = start ; i < end ; ++ i ) {
ret += String . fromCharCode ( buf [ i ] )
}
return ret
}
function hexSlice ( buf , start , end ) {
var len = buf . length
if ( ! start || start < 0 ) start = 0
if ( ! end || end < 0 || end > len ) end = len
var out = ''
for ( var i = start ; i < end ; ++ i ) {
out += toHex ( buf [ i ] )
}
return out
}
function utf16leSlice ( buf , start , end ) {
var bytes = buf . slice ( start , end )
var res = ''
for ( var i = 0 ; i < bytes . length ; i += 2 ) {
res += String . fromCharCode ( bytes [ i ] + bytes [ i + 1 ] * 256 )
}
return res
}
Buffer . prototype . slice = function slice ( start , end ) {
var len = this . length
start = ~ ~ start
end = end === undefined ? len : ~ ~ end
if ( start < 0 ) {
start += len
if ( start < 0 ) start = 0
} else if ( start > len ) {
start = len
}
if ( end < 0 ) {
end += len
if ( end < 0 ) end = 0
} else if ( end > len ) {
end = len
}
if ( end < start ) end = start
var newBuf
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
newBuf = this . subarray ( start , end )
newBuf . _ _proto _ _ = Buffer . prototype
} else {
var sliceLen = end - start
newBuf = new Buffer ( sliceLen , undefined )
for ( var i = 0 ; i < sliceLen ; ++ i ) {
newBuf [ i ] = this [ i + start ]
}
}
return newBuf
}
/ *
* Need to make sure that buffer isn ' t trying to write out of bounds .
* /
function checkOffset ( offset , ext , length ) {
if ( ( offset % 1 ) !== 0 || offset < 0 ) throw new RangeError ( 'offset is not uint' )
if ( offset + ext > length ) throw new RangeError ( 'Trying to access beyond buffer length' )
}
Buffer . prototype . readUIntLE = function readUIntLE ( offset , byteLength , noAssert ) {
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) checkOffset ( offset , byteLength , this . length )
var val = this [ offset ]
var mul = 1
var i = 0
while ( ++ i < byteLength && ( mul *= 0x100 ) ) {
val += this [ offset + i ] * mul
}
return val
}
Buffer . prototype . readUIntBE = function readUIntBE ( offset , byteLength , noAssert ) {
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) {
checkOffset ( offset , byteLength , this . length )
}
var val = this [ offset + -- byteLength ]
var mul = 1
while ( byteLength > 0 && ( mul *= 0x100 ) ) {
val += this [ offset + -- byteLength ] * mul
}
return val
}
Buffer . prototype . readUInt8 = function readUInt8 ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 1 , this . length )
return this [ offset ]
}
Buffer . prototype . readUInt16LE = function readUInt16LE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 2 , this . length )
return this [ offset ] | ( this [ offset + 1 ] << 8 )
}
Buffer . prototype . readUInt16BE = function readUInt16BE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 2 , this . length )
return ( this [ offset ] << 8 ) | this [ offset + 1 ]
}
Buffer . prototype . readUInt32LE = function readUInt32LE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ( ( this [ offset ] ) |
( this [ offset + 1 ] << 8 ) |
( this [ offset + 2 ] << 16 ) ) +
( this [ offset + 3 ] * 0x1000000 )
}
Buffer . prototype . readUInt32BE = function readUInt32BE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ( this [ offset ] * 0x1000000 ) +
( ( this [ offset + 1 ] << 16 ) |
( this [ offset + 2 ] << 8 ) |
this [ offset + 3 ] )
}
Buffer . prototype . readIntLE = function readIntLE ( offset , byteLength , noAssert ) {
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) checkOffset ( offset , byteLength , this . length )
var val = this [ offset ]
var mul = 1
var i = 0
while ( ++ i < byteLength && ( mul *= 0x100 ) ) {
val += this [ offset + i ] * mul
}
mul *= 0x80
if ( val >= mul ) val -= Math . pow ( 2 , 8 * byteLength )
return val
}
Buffer . prototype . readIntBE = function readIntBE ( offset , byteLength , noAssert ) {
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) checkOffset ( offset , byteLength , this . length )
var i = byteLength
var mul = 1
var val = this [ offset + -- i ]
while ( i > 0 && ( mul *= 0x100 ) ) {
val += this [ offset + -- i ] * mul
}
mul *= 0x80
if ( val >= mul ) val -= Math . pow ( 2 , 8 * byteLength )
return val
}
Buffer . prototype . readInt8 = function readInt8 ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 1 , this . length )
if ( ! ( this [ offset ] & 0x80 ) ) return ( this [ offset ] )
return ( ( 0xff - this [ offset ] + 1 ) * - 1 )
}
Buffer . prototype . readInt16LE = function readInt16LE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 2 , this . length )
var val = this [ offset ] | ( this [ offset + 1 ] << 8 )
return ( val & 0x8000 ) ? val | 0xFFFF0000 : val
}
Buffer . prototype . readInt16BE = function readInt16BE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 2 , this . length )
var val = this [ offset + 1 ] | ( this [ offset ] << 8 )
return ( val & 0x8000 ) ? val | 0xFFFF0000 : val
}
Buffer . prototype . readInt32LE = function readInt32LE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ( this [ offset ] ) |
( this [ offset + 1 ] << 8 ) |
( this [ offset + 2 ] << 16 ) |
( this [ offset + 3 ] << 24 )
}
Buffer . prototype . readInt32BE = function readInt32BE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ( this [ offset ] << 24 ) |
( this [ offset + 1 ] << 16 ) |
( this [ offset + 2 ] << 8 ) |
( this [ offset + 3 ] )
}
Buffer . prototype . readFloatLE = function readFloatLE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ieee754 . read ( this , offset , true , 23 , 4 )
}
Buffer . prototype . readFloatBE = function readFloatBE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 4 , this . length )
return ieee754 . read ( this , offset , false , 23 , 4 )
}
Buffer . prototype . readDoubleLE = function readDoubleLE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 8 , this . length )
return ieee754 . read ( this , offset , true , 52 , 8 )
}
Buffer . prototype . readDoubleBE = function readDoubleBE ( offset , noAssert ) {
if ( ! noAssert ) checkOffset ( offset , 8 , this . length )
return ieee754 . read ( this , offset , false , 52 , 8 )
}
function checkInt ( buf , value , offset , ext , max , min ) {
if ( ! Buffer . isBuffer ( buf ) ) throw new TypeError ( '"buffer" argument must be a Buffer instance' )
if ( value > max || value < min ) throw new RangeError ( '"value" argument is out of bounds' )
if ( offset + ext > buf . length ) throw new RangeError ( 'Index out of range' )
}
Buffer . prototype . writeUIntLE = function writeUIntLE ( value , offset , byteLength , noAssert ) {
value = + value
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) {
var maxBytes = Math . pow ( 2 , 8 * byteLength ) - 1
checkInt ( this , value , offset , byteLength , maxBytes , 0 )
}
var mul = 1
var i = 0
this [ offset ] = value & 0xFF
while ( ++ i < byteLength && ( mul *= 0x100 ) ) {
this [ offset + i ] = ( value / mul ) & 0xFF
}
return offset + byteLength
}
Buffer . prototype . writeUIntBE = function writeUIntBE ( value , offset , byteLength , noAssert ) {
value = + value
offset = offset | 0
byteLength = byteLength | 0
if ( ! noAssert ) {
var maxBytes = Math . pow ( 2 , 8 * byteLength ) - 1
checkInt ( this , value , offset , byteLength , maxBytes , 0 )
}
var i = byteLength - 1
var mul = 1
this [ offset + i ] = value & 0xFF
while ( -- i >= 0 && ( mul *= 0x100 ) ) {
this [ offset + i ] = ( value / mul ) & 0xFF
}
return offset + byteLength
}
Buffer . prototype . writeUInt8 = function writeUInt8 ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 1 , 0xff , 0 )
if ( ! Buffer . TYPED _ARRAY _SUPPORT ) value = Math . floor ( value )
this [ offset ] = ( value & 0xff )
return offset + 1
}
function objectWriteUInt16 ( buf , value , offset , littleEndian ) {
if ( value < 0 ) value = 0xffff + value + 1
for ( var i = 0 , j = Math . min ( buf . length - offset , 2 ) ; i < j ; ++ i ) {
buf [ offset + i ] = ( value & ( 0xff << ( 8 * ( littleEndian ? i : 1 - i ) ) ) ) >>>
( littleEndian ? i : 1 - i ) * 8
}
}
Buffer . prototype . writeUInt16LE = function writeUInt16LE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 2 , 0xffff , 0 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value & 0xff )
this [ offset + 1 ] = ( value >>> 8 )
} else {
objectWriteUInt16 ( this , value , offset , true )
}
return offset + 2
}
Buffer . prototype . writeUInt16BE = function writeUInt16BE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 2 , 0xffff , 0 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value >>> 8 )
this [ offset + 1 ] = ( value & 0xff )
} else {
objectWriteUInt16 ( this , value , offset , false )
}
return offset + 2
}
function objectWriteUInt32 ( buf , value , offset , littleEndian ) {
if ( value < 0 ) value = 0xffffffff + value + 1
for ( var i = 0 , j = Math . min ( buf . length - offset , 4 ) ; i < j ; ++ i ) {
buf [ offset + i ] = ( value >>> ( littleEndian ? i : 3 - i ) * 8 ) & 0xff
}
}
Buffer . prototype . writeUInt32LE = function writeUInt32LE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 4 , 0xffffffff , 0 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset + 3 ] = ( value >>> 24 )
this [ offset + 2 ] = ( value >>> 16 )
this [ offset + 1 ] = ( value >>> 8 )
this [ offset ] = ( value & 0xff )
} else {
objectWriteUInt32 ( this , value , offset , true )
}
return offset + 4
}
Buffer . prototype . writeUInt32BE = function writeUInt32BE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 4 , 0xffffffff , 0 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value >>> 24 )
this [ offset + 1 ] = ( value >>> 16 )
this [ offset + 2 ] = ( value >>> 8 )
this [ offset + 3 ] = ( value & 0xff )
} else {
objectWriteUInt32 ( this , value , offset , false )
}
return offset + 4
}
Buffer . prototype . writeIntLE = function writeIntLE ( value , offset , byteLength , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) {
var limit = Math . pow ( 2 , 8 * byteLength - 1 )
checkInt ( this , value , offset , byteLength , limit - 1 , - limit )
}
var i = 0
var mul = 1
var sub = 0
this [ offset ] = value & 0xFF
while ( ++ i < byteLength && ( mul *= 0x100 ) ) {
if ( value < 0 && sub === 0 && this [ offset + i - 1 ] !== 0 ) {
sub = 1
}
this [ offset + i ] = ( ( value / mul ) >> 0 ) - sub & 0xFF
}
return offset + byteLength
}
Buffer . prototype . writeIntBE = function writeIntBE ( value , offset , byteLength , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) {
var limit = Math . pow ( 2 , 8 * byteLength - 1 )
checkInt ( this , value , offset , byteLength , limit - 1 , - limit )
}
var i = byteLength - 1
var mul = 1
var sub = 0
this [ offset + i ] = value & 0xFF
while ( -- i >= 0 && ( mul *= 0x100 ) ) {
if ( value < 0 && sub === 0 && this [ offset + i + 1 ] !== 0 ) {
sub = 1
}
this [ offset + i ] = ( ( value / mul ) >> 0 ) - sub & 0xFF
}
return offset + byteLength
}
Buffer . prototype . writeInt8 = function writeInt8 ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 1 , 0x7f , - 0x80 )
if ( ! Buffer . TYPED _ARRAY _SUPPORT ) value = Math . floor ( value )
if ( value < 0 ) value = 0xff + value + 1
this [ offset ] = ( value & 0xff )
return offset + 1
}
Buffer . prototype . writeInt16LE = function writeInt16LE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 2 , 0x7fff , - 0x8000 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value & 0xff )
this [ offset + 1 ] = ( value >>> 8 )
} else {
objectWriteUInt16 ( this , value , offset , true )
}
return offset + 2
}
Buffer . prototype . writeInt16BE = function writeInt16BE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 2 , 0x7fff , - 0x8000 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value >>> 8 )
this [ offset + 1 ] = ( value & 0xff )
} else {
objectWriteUInt16 ( this , value , offset , false )
}
return offset + 2
}
Buffer . prototype . writeInt32LE = function writeInt32LE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 4 , 0x7fffffff , - 0x80000000 )
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value & 0xff )
this [ offset + 1 ] = ( value >>> 8 )
this [ offset + 2 ] = ( value >>> 16 )
this [ offset + 3 ] = ( value >>> 24 )
} else {
objectWriteUInt32 ( this , value , offset , true )
}
return offset + 4
}
Buffer . prototype . writeInt32BE = function writeInt32BE ( value , offset , noAssert ) {
value = + value
offset = offset | 0
if ( ! noAssert ) checkInt ( this , value , offset , 4 , 0x7fffffff , - 0x80000000 )
if ( value < 0 ) value = 0xffffffff + value + 1
if ( Buffer . TYPED _ARRAY _SUPPORT ) {
this [ offset ] = ( value >>> 24 )
this [ offset + 1 ] = ( value >>> 16 )
this [ offset + 2 ] = ( value >>> 8 )
this [ offset + 3 ] = ( value & 0xff )
} else {
objectWriteUInt32 ( this , value , offset , false )
}
return offset + 4
}
function checkIEEE754 ( buf , value , offset , ext , max , min ) {
if ( offset + ext > buf . length ) throw new RangeError ( 'Index out of range' )
if ( offset < 0 ) throw new RangeError ( 'Index out of range' )
}
function writeFloat ( buf , value , offset , littleEndian , noAssert ) {
if ( ! noAssert ) {
checkIEEE754 ( buf , value , offset , 4 , 3.4028234663852886 e + 38 , - 3.4028234663852886 e + 38 )
}
ieee754 . write ( buf , value , offset , littleEndian , 23 , 4 )
return offset + 4
}
Buffer . prototype . writeFloatLE = function writeFloatLE ( value , offset , noAssert ) {
return writeFloat ( this , value , offset , true , noAssert )
}
Buffer . prototype . writeFloatBE = function writeFloatBE ( value , offset , noAssert ) {
return writeFloat ( this , value , offset , false , noAssert )
}
function writeDouble ( buf , value , offset , littleEndian , noAssert ) {
if ( ! noAssert ) {
checkIEEE754 ( buf , value , offset , 8 , 1.7976931348623157 E + 308 , - 1.7976931348623157 E + 308 )
}
ieee754 . write ( buf , value , offset , littleEndian , 52 , 8 )
return offset + 8
}
Buffer . prototype . writeDoubleLE = function writeDoubleLE ( value , offset , noAssert ) {
return writeDouble ( this , value , offset , true , noAssert )
}
Buffer . prototype . writeDoubleBE = function writeDoubleBE ( value , offset , noAssert ) {
return writeDouble ( this , value , offset , false , noAssert )
}
// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
Buffer . prototype . copy = function copy ( target , targetStart , start , end ) {
if ( ! start ) start = 0
if ( ! end && end !== 0 ) end = this . length
if ( targetStart >= target . length ) targetStart = target . length
if ( ! targetStart ) targetStart = 0
if ( end > 0 && end < start ) end = start
// Copy 0 bytes; we're done
if ( end === start ) return 0
if ( target . length === 0 || this . length === 0 ) return 0
// Fatal error conditions
if ( targetStart < 0 ) {
throw new RangeError ( 'targetStart out of bounds' )
}
if ( start < 0 || start >= this . length ) throw new RangeError ( 'sourceStart out of bounds' )
if ( end < 0 ) throw new RangeError ( 'sourceEnd out of bounds' )
// Are we oob?
if ( end > this . length ) end = this . length
if ( target . length - targetStart < end - start ) {
end = target . length - targetStart + start
}
var len = end - start
var i
if ( this === target && start < targetStart && targetStart < end ) {
// descending copy from end
for ( i = len - 1 ; i >= 0 ; -- i ) {
target [ i + targetStart ] = this [ i + start ]
}
} else if ( len < 1000 || ! Buffer . TYPED _ARRAY _SUPPORT ) {
// ascending copy from start
for ( i = 0 ; i < len ; ++ i ) {
target [ i + targetStart ] = this [ i + start ]
}
} else {
Uint8Array . prototype . set . call (
target ,
this . subarray ( start , start + len ) ,
targetStart
)
}
return len
}
// Usage:
// buffer.fill(number[, offset[, end]])
// buffer.fill(buffer[, offset[, end]])
// buffer.fill(string[, offset[, end]][, encoding])
Buffer . prototype . fill = function fill ( val , start , end , encoding ) {
// Handle string cases:
if ( typeof val === 'string' ) {
if ( typeof start === 'string' ) {
encoding = start
start = 0
end = this . length
} else if ( typeof end === 'string' ) {
encoding = end
end = this . length
}
if ( val . length === 1 ) {
var code = val . charCodeAt ( 0 )
if ( code < 256 ) {
val = code
}
}
if ( encoding !== undefined && typeof encoding !== 'string' ) {
throw new TypeError ( 'encoding must be a string' )
}
if ( typeof encoding === 'string' && ! Buffer . isEncoding ( encoding ) ) {
throw new TypeError ( 'Unknown encoding: ' + encoding )
}
} else if ( typeof val === 'number' ) {
val = val & 255
}
// Invalid ranges are not set to a default, so can range check early.
if ( start < 0 || this . length < start || this . length < end ) {
throw new RangeError ( 'Out of range index' )
}
if ( end <= start ) {
return this
}
start = start >>> 0
end = end === undefined ? this . length : end >>> 0
if ( ! val ) val = 0
var i
if ( typeof val === 'number' ) {
for ( i = start ; i < end ; ++ i ) {
this [ i ] = val
}
} else {
var bytes = Buffer . isBuffer ( val )
? val
: utf8ToBytes ( new Buffer ( val , encoding ) . toString ( ) )
var len = bytes . length
for ( i = 0 ; i < end - start ; ++ i ) {
this [ i + start ] = bytes [ i % len ]
}
}
return this
}
// HELPER FUNCTIONS
// ================
var INVALID _BASE64 _RE = /[^+\/0-9A-Za-z-_]/g
function base64clean ( str ) {
// Node strips out invalid characters like \n and \t from the string, base64-js does not
str = stringtrim ( str ) . replace ( INVALID _BASE64 _RE , '' )
// Node converts strings with length < 2 to ''
if ( str . length < 2 ) return ''
// Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
while ( str . length % 4 !== 0 ) {
str = str + '='
}
return str
}
function stringtrim ( str ) {
if ( str . trim ) return str . trim ( )
return str . replace ( /^\s+|\s+$/g , '' )
}
function toHex ( n ) {
if ( n < 16 ) return '0' + n . toString ( 16 )
return n . toString ( 16 )
}
function utf8ToBytes ( string , units ) {
units = units || Infinity
var codePoint
var length = string . length
var leadSurrogate = null
var bytes = [ ]
for ( var i = 0 ; i < length ; ++ i ) {
codePoint = string . charCodeAt ( i )
// is surrogate component
if ( codePoint > 0xD7FF && codePoint < 0xE000 ) {
// last char was a lead
if ( ! leadSurrogate ) {
// no lead yet
if ( codePoint > 0xDBFF ) {
// unexpected trail
if ( ( units -= 3 ) > - 1 ) bytes . push ( 0xEF , 0xBF , 0xBD )
continue
} else if ( i + 1 === length ) {
// unpaired lead
if ( ( units -= 3 ) > - 1 ) bytes . push ( 0xEF , 0xBF , 0xBD )
continue
}
// valid lead
leadSurrogate = codePoint
continue
}
// 2 leads in a row
if ( codePoint < 0xDC00 ) {
if ( ( units -= 3 ) > - 1 ) bytes . push ( 0xEF , 0xBF , 0xBD )
leadSurrogate = codePoint
continue
}
// valid surrogate pair
codePoint = ( leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00 ) + 0x10000
} else if ( leadSurrogate ) {
// valid bmp char, but last char was a lead
if ( ( units -= 3 ) > - 1 ) bytes . push ( 0xEF , 0xBF , 0xBD )
}
leadSurrogate = null
// encode utf8
if ( codePoint < 0x80 ) {
if ( ( units -= 1 ) < 0 ) break
bytes . push ( codePoint )
} else if ( codePoint < 0x800 ) {
if ( ( units -= 2 ) < 0 ) break
bytes . push (
codePoint >> 0x6 | 0xC0 ,
codePoint & 0x3F | 0x80
)
} else if ( codePoint < 0x10000 ) {
if ( ( units -= 3 ) < 0 ) break
bytes . push (
codePoint >> 0xC | 0xE0 ,
codePoint >> 0x6 & 0x3F | 0x80 ,
codePoint & 0x3F | 0x80
)
} else if ( codePoint < 0x110000 ) {
if ( ( units -= 4 ) < 0 ) break
bytes . push (
codePoint >> 0x12 | 0xF0 ,
codePoint >> 0xC & 0x3F | 0x80 ,
codePoint >> 0x6 & 0x3F | 0x80 ,
codePoint & 0x3F | 0x80
)
} else {
throw new Error ( 'Invalid code point' )
}
}
return bytes
}
function asciiToBytes ( str ) {
var byteArray = [ ]
for ( var i = 0 ; i < str . length ; ++ i ) {
// Node's code seems to be doing this and not & 0x7F..
byteArray . push ( str . charCodeAt ( i ) & 0xFF )
}
return byteArray
}
function utf16leToBytes ( str , units ) {
var c , hi , lo
var byteArray = [ ]
for ( var i = 0 ; i < str . length ; ++ i ) {
if ( ( units -= 2 ) < 0 ) break
c = str . charCodeAt ( i )
hi = c >> 8
lo = c % 256
byteArray . push ( lo )
byteArray . push ( hi )
}
return byteArray
}
function base64ToBytes ( str ) {
return base64 . toByteArray ( base64clean ( str ) )
}
function blitBuffer ( src , dst , offset , length ) {
for ( var i = 0 ; i < length ; ++ i ) {
if ( ( i + offset >= dst . length ) || ( i >= src . length ) ) break
dst [ i + offset ] = src [ i ]
}
return i
}
function isnan ( val ) {
return val !== val // eslint-disable-line no-self-compare
}
} ) . call ( this , typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : { } )
} , { "base64-js" : 5 , "ieee754" : 8 , "isarray" : 10 } ] , 7 : [ function ( require , module , exports ) {
/*global window:false, self:false, define:false, module:false */
/ * *
* @ license IDBWrapper - A cross - browser wrapper for IndexedDB
* Version 1.7 . 1
* Copyright ( c ) 2011 - 2016 Jens Arps
* http : //jensarps.de/
*
* Licensed under the MIT ( X11 ) license
* /
( function ( name , definition , global ) {
'use strict' ;
if ( typeof define === 'function' ) {
define ( definition ) ;
} else if ( typeof module !== 'undefined' && module . exports ) {
module . exports = definition ( ) ;
} else {
global [ name ] = definition ( ) ;
}
} ) ( 'IDBStore' , function ( ) {
'use strict' ;
var defaultErrorHandler = function ( error ) {
throw error ;
} ;
var defaultSuccessHandler = function ( ) {
} ;
var defaults = {
storeName : 'Store' ,
storePrefix : 'IDBWrapper-' ,
dbVersion : 1 ,
keyPath : 'id' ,
autoIncrement : true ,
onStoreReady : function ( ) {
} ,
onError : defaultErrorHandler ,
indexes : [ ] ,
implementationPreference : [
'indexedDB' ,
'webkitIndexedDB' ,
'mozIndexedDB' ,
'shimIndexedDB'
]
} ;
/ * *
*
* The IDBStore constructor
*
* @ constructor
* @ name IDBStore
* @ version 1.7 . 1
*
* @ param { Object } [ kwArgs ] An options object used to configure the store and
* set callbacks
* @ param { String } [ kwArgs . storeName = 'Store' ] The name of the store
* @ param { String } [ kwArgs . storePrefix = 'IDBWrapper-' ] A prefix that is
* internally used to construct the name of the database , which will be
* kwArgs . storePrefix + kwArgs . storeName
* @ param { Number } [ kwArgs . dbVersion = 1 ] The version of the store
* @ param { String } [ kwArgs . keyPath = 'id' ] The key path to use . If you want to
* setup IDBWrapper to work with out - of - line keys , you need to set this to
* ` null `
* @ param { Boolean } [ kwArgs . autoIncrement = true ] If set to true , IDBStore will
* automatically make sure a unique keyPath value is present on each object
* that is stored .
* @ param { Function } [ kwArgs . onStoreReady ] A callback to be called when the
* store is ready to be used .
* @ param { Function } [ kwArgs . onError = throw ] A callback to be called when an
* error occurred during instantiation of the store .
* @ param { Array } [ kwArgs . indexes = [ ] ] An array of indexData objects
* defining the indexes to use with the store . For every index to be used
* one indexData object needs to be passed in the array .
* An indexData object is defined as follows :
* @ param { Object } [ kwArgs . indexes . indexData ] An object defining the index to
* use
* @ param { String } kwArgs . indexes . indexData . name The name of the index
* @ param { String } [ kwArgs . indexes . indexData . keyPath ] The key path of the index
* @ param { Boolean } [ kwArgs . indexes . indexData . unique ] Whether the index is unique
* @ param { Boolean } [ kwArgs . indexes . indexData . multiEntry ] Whether the index is multi entry
* @ param { Array } [ kwArgs . implementationPreference = [ 'indexedDB' , 'webkitIndexedDB' , 'mozIndexedDB' , 'shimIndexedDB' ] ] An array of strings naming implementations to be used , in order or preference
* @ param { Function } [ onStoreReady ] A callback to be called when the store
* is ready to be used .
* @ example
// create a store for customers with an additional index over the
// `lastname` property.
var myCustomerStore = new IDBStore ( {
dbVersion : 1 ,
storeName : 'customer-index' ,
keyPath : 'customerid' ,
autoIncrement : true ,
onStoreReady : populateTable ,
indexes : [
{ name : 'lastname' , keyPath : 'lastname' , unique : false , multiEntry : false }
]
} ) ;
* @ example
// create a generic store
var myCustomerStore = new IDBStore ( {
storeName : 'my-data-store' ,
onStoreReady : function ( ) {
// start working with the store.
}
} ) ;
* /
var IDBStore = function ( kwArgs , onStoreReady ) {
if ( typeof onStoreReady == 'undefined' && typeof kwArgs == 'function' ) {
onStoreReady = kwArgs ;
}
if ( Object . prototype . toString . call ( kwArgs ) != '[object Object]' ) {
kwArgs = { } ;
}
for ( var key in defaults ) {
this [ key ] = typeof kwArgs [ key ] != 'undefined' ? kwArgs [ key ] : defaults [ key ] ;
}
this . dbName = this . storePrefix + this . storeName ;
this . dbVersion = parseInt ( this . dbVersion , 10 ) || 1 ;
onStoreReady && ( this . onStoreReady = onStoreReady ) ;
var env = typeof window == 'object' ? window : self ;
var availableImplementations = this . implementationPreference . filter ( function ( implName ) {
return implName in env ;
} ) ;
this . implementation = availableImplementations [ 0 ] ;
this . idb = env [ this . implementation ] ;
this . keyRange = env . IDBKeyRange || env . webkitIDBKeyRange || env . mozIDBKeyRange ;
this . consts = {
'READ_ONLY' : 'readonly' ,
'READ_WRITE' : 'readwrite' ,
'VERSION_CHANGE' : 'versionchange' ,
'NEXT' : 'next' ,
'NEXT_NO_DUPLICATE' : 'nextunique' ,
'PREV' : 'prev' ,
'PREV_NO_DUPLICATE' : 'prevunique'
} ;
this . openDB ( ) ;
} ;
/** @lends IDBStore.prototype */
var proto = {
/ * *
* A pointer to the IDBStore ctor
*
* @ private
* @ type { Function }
* @ constructs
* /
constructor : IDBStore ,
/ * *
* The version of IDBStore
*
* @ type { String }
* /
version : '1.7.1' ,
/ * *
* A reference to the IndexedDB object
*
* @ type { IDBDatabase }
* /
db : null ,
/ * *
* The full name of the IndexedDB used by IDBStore , composed of
* this . storePrefix + this . storeName
*
* @ type { String }
* /
dbName : null ,
/ * *
* The version of the IndexedDB used by IDBStore
*
* @ type { Number }
* /
dbVersion : null ,
/ * *
* A reference to the objectStore used by IDBStore
*
* @ type { IDBObjectStore }
* /
store : null ,
/ * *
* The store name
*
* @ type { String }
* /
storeName : null ,
/ * *
* The prefix to prepend to the store name
*
* @ type { String }
* /
storePrefix : null ,
/ * *
* The key path
*
* @ type { String }
* /
keyPath : null ,
/ * *
* Whether IDBStore uses autoIncrement
*
* @ type { Boolean }
* /
autoIncrement : null ,
/ * *
* The indexes used by IDBStore
*
* @ type { Array }
* /
indexes : null ,
/ * *
* The implemantations to try to use , in order of preference
*
* @ type { Array }
* /
implementationPreference : null ,
/ * *
* The actual implementation being used
*
* @ type { String }
* /
implementation : '' ,
/ * *
* The callback to be called when the store is ready to be used
*
* @ type { Function }
* /
onStoreReady : null ,
/ * *
* The callback to be called if an error occurred during instantiation
* of the store
*
* @ type { Function }
* /
onError : null ,
/ * *
* The internal insertID counter
*
* @ type { Number }
* @ private
* /
_insertIdCount : 0 ,
/ * *
* Opens an IndexedDB ; called by the constructor .
*
* Will check if versions match and compare provided index configuration
* with existing ones , and update indexes if necessary .
*
* Will call this . onStoreReady ( ) if everything went well and the store
* is ready to use , and this . onError ( ) is something went wrong .
*
* @ private
*
* /
openDB : function ( ) {
var openRequest = this . idb . open ( this . dbName , this . dbVersion ) ;
var preventSuccessCallback = false ;
openRequest . onerror = function ( errorEvent ) {
if ( hasVersionError ( errorEvent ) ) {
this . onError ( new Error ( 'The version number provided is lower than the existing one.' ) ) ;
} else {
var error ;
if ( errorEvent . target . error ) {
error = errorEvent . target . error ;
} else {
var errorMessage = 'IndexedDB unknown error occurred when opening DB ' + this . dbName + ' version ' + this . dbVersion ;
if ( 'errorCode' in errorEvent . target ) {
errorMessage += ' with error code ' + errorEvent . target . errorCode ;
}
error = new Error ( errorMessage ) ;
}
this . onError ( error ) ;
}
} . bind ( this ) ;
openRequest . onsuccess = function ( event ) {
if ( preventSuccessCallback ) {
return ;
}
if ( this . db ) {
this . onStoreReady ( ) ;
return ;
}
this . db = event . target . result ;
if ( typeof this . db . version == 'string' ) {
this . onError ( new Error ( 'The IndexedDB implementation in this browser is outdated. Please upgrade your browser.' ) ) ;
return ;
}
if ( ! this . db . objectStoreNames . contains ( this . storeName ) ) {
// We should never ever get here.
// Lets notify the user anyway.
this . onError ( new Error ( 'Object store couldn\'t be created.' ) ) ;
return ;
}
var emptyTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _ONLY ) ;
this . store = emptyTransaction . objectStore ( this . storeName ) ;
// check indexes
var existingIndexes = Array . prototype . slice . call ( this . getIndexList ( ) ) ;
this . indexes . forEach ( function ( indexData ) {
var indexName = indexData . name ;
if ( ! indexName ) {
preventSuccessCallback = true ;
this . onError ( new Error ( 'Cannot create index: No index name given.' ) ) ;
return ;
}
this . normalizeIndexData ( indexData ) ;
if ( this . hasIndex ( indexName ) ) {
// check if it complies
var actualIndex = this . store . index ( indexName ) ;
var complies = this . indexComplies ( actualIndex , indexData ) ;
if ( ! complies ) {
preventSuccessCallback = true ;
this . onError ( new Error ( 'Cannot modify index "' + indexName + '" for current version. Please bump version number to ' + ( this . dbVersion + 1 ) + '.' ) ) ;
}
existingIndexes . splice ( existingIndexes . indexOf ( indexName ) , 1 ) ;
} else {
preventSuccessCallback = true ;
this . onError ( new Error ( 'Cannot create new index "' + indexName + '" for current version. Please bump version number to ' + ( this . dbVersion + 1 ) + '.' ) ) ;
}
} , this ) ;
if ( existingIndexes . length ) {
preventSuccessCallback = true ;
this . onError ( new Error ( 'Cannot delete index(es) "' + existingIndexes . toString ( ) + '" for current version. Please bump version number to ' + ( this . dbVersion + 1 ) + '.' ) ) ;
}
preventSuccessCallback || this . onStoreReady ( ) ;
} . bind ( this ) ;
openRequest . onupgradeneeded = function ( /* IDBVersionChangeEvent */ event ) {
this . db = event . target . result ;
if ( this . db . objectStoreNames . contains ( this . storeName ) ) {
this . store = event . target . transaction . objectStore ( this . storeName ) ;
} else {
var optionalParameters = { autoIncrement : this . autoIncrement } ;
if ( this . keyPath !== null ) {
optionalParameters . keyPath = this . keyPath ;
}
this . store = this . db . createObjectStore ( this . storeName , optionalParameters ) ;
}
var existingIndexes = Array . prototype . slice . call ( this . getIndexList ( ) ) ;
this . indexes . forEach ( function ( indexData ) {
var indexName = indexData . name ;
if ( ! indexName ) {
preventSuccessCallback = true ;
this . onError ( new Error ( 'Cannot create index: No index name given.' ) ) ;
}
this . normalizeIndexData ( indexData ) ;
if ( this . hasIndex ( indexName ) ) {
// check if it complies
var actualIndex = this . store . index ( indexName ) ;
var complies = this . indexComplies ( actualIndex , indexData ) ;
if ( ! complies ) {
// index differs, need to delete and re-create
this . store . deleteIndex ( indexName ) ;
this . store . createIndex ( indexName , indexData . keyPath , {
unique : indexData . unique ,
multiEntry : indexData . multiEntry
} ) ;
}
existingIndexes . splice ( existingIndexes . indexOf ( indexName ) , 1 ) ;
} else {
this . store . createIndex ( indexName , indexData . keyPath , {
unique : indexData . unique ,
multiEntry : indexData . multiEntry
} ) ;
}
} , this ) ;
if ( existingIndexes . length ) {
existingIndexes . forEach ( function ( _indexName ) {
this . store . deleteIndex ( _indexName ) ;
} , this ) ;
}
} . bind ( this ) ;
} ,
/ * *
* Deletes the database used for this store if the IDB implementations
* provides that functionality .
*
* @ param { Function } [ onSuccess ] A callback that is called if deletion
* was successful .
* @ param { Function } [ onError ] A callback that is called if deletion
* failed .
* /
deleteDatabase : function ( onSuccess , onError ) {
if ( this . idb . deleteDatabase ) {
this . db . close ( ) ;
var deleteRequest = this . idb . deleteDatabase ( this . dbName ) ;
deleteRequest . onsuccess = onSuccess ;
deleteRequest . onerror = onError ;
} else {
onError ( new Error ( 'Browser does not support IndexedDB deleteDatabase!' ) ) ;
}
} ,
/ * * * * * * * * * * * * * * * * * * * * *
* data manipulation *
* * * * * * * * * * * * * * * * * * * * * /
/ * *
* Puts an object into the store . If an entry with the given id exists ,
* it will be overwritten . This method has a different signature for inline
* keys and out - of - line keys ; please see the examples below .
*
* @ param { * } [ key ] The key to store . This is only needed if IDBWrapper
* is set to use out - of - line keys . For inline keys - the default scenario -
* this can be omitted .
* @ param { Object } value The data object to store .
* @ param { Function } [ onSuccess ] A callback that is called if insertion
* was successful .
* @ param { Function } [ onError ] A callback that is called if insertion
* failed .
* @ returns { IDBTransaction } The transaction used for this operation .
* @ example
// Storing an object, using inline keys (the default scenario):
var myCustomer = {
customerid : 2346223 ,
lastname : 'Doe' ,
firstname : 'John'
} ;
myCustomerStore . put ( myCustomer , mySuccessHandler , myErrorHandler ) ;
// Note that passing success- and error-handlers is optional.
* @ example
// Storing an object, using out-of-line keys:
var myCustomer = {
lastname : 'Doe' ,
firstname : 'John'
} ;
myCustomerStore . put ( 2346223 , myCustomer , mySuccessHandler , myErrorHandler ) ;
// Note that passing success- and error-handlers is optional.
* /
put : function ( key , value , onSuccess , onError ) {
if ( this . keyPath !== null ) {
onError = onSuccess ;
onSuccess = value ;
value = key ;
}
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
var hasSuccess = false ,
result = null ,
putRequest ;
var putTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _WRITE ) ;
putTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
putTransaction . onabort = onError ;
putTransaction . onerror = onError ;
if ( this . keyPath !== null ) { // in-line keys
this . _addIdPropertyIfNeeded ( value ) ;
putRequest = putTransaction . objectStore ( this . storeName ) . put ( value ) ;
} else { // out-of-line keys
putRequest = putTransaction . objectStore ( this . storeName ) . put ( value , key ) ;
}
putRequest . onsuccess = function ( event ) {
hasSuccess = true ;
result = event . target . result ;
} ;
putRequest . onerror = onError ;
return putTransaction ;
} ,
/ * *
* Retrieves an object from the store . If no entry exists with the given id ,
* the success handler will be called with null as first and only argument .
*
* @ param { * } key The id of the object to fetch .
* @ param { Function } [ onSuccess ] A callback that is called if fetching
* was successful . Will receive the object as only argument .
* @ param { Function } [ onError ] A callback that will be called if an error
* occurred during the operation .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
get : function ( key , onSuccess , onError ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
var hasSuccess = false ,
result = null ;
var getTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _ONLY ) ;
getTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
getTransaction . onabort = onError ;
getTransaction . onerror = onError ;
var getRequest = getTransaction . objectStore ( this . storeName ) . get ( key ) ;
getRequest . onsuccess = function ( event ) {
hasSuccess = true ;
result = event . target . result ;
} ;
getRequest . onerror = onError ;
return getTransaction ;
} ,
/ * *
* Removes an object from the store .
*
* @ param { * } key The id of the object to remove .
* @ param { Function } [ onSuccess ] A callback that is called if the removal
* was successful .
* @ param { Function } [ onError ] A callback that will be called if an error
* occurred during the operation .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
remove : function ( key , onSuccess , onError ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
var hasSuccess = false ,
result = null ;
var removeTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _WRITE ) ;
removeTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
removeTransaction . onabort = onError ;
removeTransaction . onerror = onError ;
var deleteRequest = removeTransaction . objectStore ( this . storeName ) [ 'delete' ] ( key ) ;
deleteRequest . onsuccess = function ( event ) {
hasSuccess = true ;
result = event . target . result ;
} ;
deleteRequest . onerror = onError ;
return removeTransaction ;
} ,
/ * *
* Runs a batch of put and / or remove operations on the store .
*
* @ param { Array } dataArray An array of objects containing the operation to run
* and the data object ( for put operations ) .
* @ param { Function } [ onSuccess ] A callback that is called if all operations
* were successful .
* @ param { Function } [ onError ] A callback that is called if an error
* occurred during one of the operations .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
batch : function ( dataArray , onSuccess , onError ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
if ( Object . prototype . toString . call ( dataArray ) != '[object Array]' ) {
onError ( new Error ( 'dataArray argument must be of type Array.' ) ) ;
} else if ( dataArray . length === 0 ) {
return onSuccess ( true ) ;
}
var count = dataArray . length ;
var called = false ;
var hasSuccess = false ;
var batchTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _WRITE ) ;
batchTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( hasSuccess ) ;
} ;
batchTransaction . onabort = onError ;
batchTransaction . onerror = onError ;
var onItemSuccess = function ( ) {
count -- ;
if ( count === 0 && ! called ) {
called = true ;
hasSuccess = true ;
}
} ;
dataArray . forEach ( function ( operation ) {
var type = operation . type ;
var key = operation . key ;
var value = operation . value ;
var onItemError = function ( err ) {
batchTransaction . abort ( ) ;
if ( ! called ) {
called = true ;
onError ( err , type , key ) ;
}
} ;
if ( type == 'remove' ) {
var deleteRequest = batchTransaction . objectStore ( this . storeName ) [ 'delete' ] ( key ) ;
deleteRequest . onsuccess = onItemSuccess ;
deleteRequest . onerror = onItemError ;
} else if ( type == 'put' ) {
var putRequest ;
if ( this . keyPath !== null ) { // in-line keys
this . _addIdPropertyIfNeeded ( value ) ;
putRequest = batchTransaction . objectStore ( this . storeName ) . put ( value ) ;
} else { // out-of-line keys
putRequest = batchTransaction . objectStore ( this . storeName ) . put ( value , key ) ;
}
putRequest . onsuccess = onItemSuccess ;
putRequest . onerror = onItemError ;
}
} , this ) ;
return batchTransaction ;
} ,
/ * *
* Takes an array of objects and stores them in a single transaction .
*
* @ param { Array } dataArray An array of objects to store
* @ param { Function } [ onSuccess ] A callback that is called if all operations
* were successful .
* @ param { Function } [ onError ] A callback that is called if an error
* occurred during one of the operations .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
putBatch : function ( dataArray , onSuccess , onError ) {
var batchData = dataArray . map ( function ( item ) {
return { type : 'put' , value : item } ;
} ) ;
return this . batch ( batchData , onSuccess , onError ) ;
} ,
/ * *
* Like putBatch , takes an array of objects and stores them in a single
* transaction , but allows processing of the result values . Returns the
* processed records containing the key for newly created records to the
* onSuccess calllback instead of only returning true or false for success .
* In addition , added the option for the caller to specify a key field that
* should be set to the newly created key .
*
* @ param { Array } dataArray An array of objects to store
* @ param { Object } [ options ] An object containing optional options
* @ param { String } [ options . keyField = this . keyPath ] Specifies a field in the record to update
* with the auto - incrementing key . Defaults to the store ' s keyPath .
* @ param { Function } [ onSuccess ] A callback that is called if all operations
* were successful .
* @ param { Function } [ onError ] A callback that is called if an error
* occurred during one of the operations .
* @ returns { IDBTransaction } The transaction used for this operation .
*
* /
upsertBatch : function ( dataArray , options , onSuccess , onError ) {
// handle `dataArray, onSuccess, onError` signature
if ( typeof options == 'function' ) {
onSuccess = options ;
onError = onSuccess ;
options = { } ;
}
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
options || ( options = { } ) ;
if ( Object . prototype . toString . call ( dataArray ) != '[object Array]' ) {
onError ( new Error ( 'dataArray argument must be of type Array.' ) ) ;
}
var keyField = options . keyField || this . keyPath ;
var count = dataArray . length ;
var called = false ;
var hasSuccess = false ;
var index = 0 ; // assume success callbacks are executed in order
var batchTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _WRITE ) ;
batchTransaction . oncomplete = function ( ) {
if ( hasSuccess ) {
onSuccess ( dataArray ) ;
} else {
onError ( false ) ;
}
} ;
batchTransaction . onabort = onError ;
batchTransaction . onerror = onError ;
var onItemSuccess = function ( event ) {
var record = dataArray [ index ++ ] ;
record [ keyField ] = event . target . result ;
count -- ;
if ( count === 0 && ! called ) {
called = true ;
hasSuccess = true ;
}
} ;
dataArray . forEach ( function ( record ) {
var key = record . key ;
var onItemError = function ( err ) {
batchTransaction . abort ( ) ;
if ( ! called ) {
called = true ;
onError ( err ) ;
}
} ;
var putRequest ;
if ( this . keyPath !== null ) { // in-line keys
this . _addIdPropertyIfNeeded ( record ) ;
putRequest = batchTransaction . objectStore ( this . storeName ) . put ( record ) ;
} else { // out-of-line keys
putRequest = batchTransaction . objectStore ( this . storeName ) . put ( record , key ) ;
}
putRequest . onsuccess = onItemSuccess ;
putRequest . onerror = onItemError ;
} , this ) ;
return batchTransaction ;
} ,
/ * *
* Takes an array of keys and removes matching objects in a single
* transaction .
*
* @ param { Array } keyArray An array of keys to remove
* @ param { Function } [ onSuccess ] A callback that is called if all operations
* were successful .
* @ param { Function } [ onError ] A callback that is called if an error
* occurred during one of the operations .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
removeBatch : function ( keyArray , onSuccess , onError ) {
var batchData = keyArray . map ( function ( key ) {
return { type : 'remove' , key : key } ;
} ) ;
return this . batch ( batchData , onSuccess , onError ) ;
} ,
/ * *
* Takes an array of keys and fetches matching objects
*
* @ param { Array } keyArray An array of keys identifying the objects to fetch
* @ param { Function } [ onSuccess ] A callback that is called if all operations
* were successful .
* @ param { Function } [ onError ] A callback that is called if an error
* occurred during one of the operations .
* @ param { String } [ arrayType = 'sparse' ] The type of array to pass to the
* success handler . May be one of 'sparse' , 'dense' or 'skip' . Defaults to
* 'sparse' . This parameter specifies how to handle the situation if a get
* operation did not throw an error , but there was no matching object in
* the database . In most cases , 'sparse' provides the most desired
* behavior . See the examples for details .
* @ returns { IDBTransaction } The transaction used for this operation .
* @ example
// given that there are two objects in the database with the keypath
// values 1 and 2, and the call looks like this:
myStore . getBatch ( [ 1 , 5 , 2 ] , onError , function ( data ) { … } , arrayType ) ;
// this is what the `data` array will be like:
// arrayType == 'sparse':
// data is a sparse array containing two entries and having a length of 3:
[ Object , 2 : Object ]
0 : Object
2 : Object
length : 3
// calling forEach on data will result in the callback being called two
// times, with the index parameter matching the index of the key in the
// keyArray.
// arrayType == 'dense':
// data is a dense array containing three entries and having a length of 3,
// where data[1] is of type undefined:
[ Object , undefined , Object ]
0 : Object
1 : undefined
2 : Object
length : 3
// calling forEach on data will result in the callback being called three
// times, with the index parameter matching the index of the key in the
// keyArray, but the second call will have undefined as first argument.
// arrayType == 'skip':
// data is a dense array containing two entries and having a length of 2:
[ Object , Object ]
0 : Object
1 : Object
length : 2
// calling forEach on data will result in the callback being called two
// times, with the index parameter not matching the index of the key in the
// keyArray.
* /
getBatch : function ( keyArray , onSuccess , onError , arrayType ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
arrayType || ( arrayType = 'sparse' ) ;
if ( Object . prototype . toString . call ( keyArray ) != '[object Array]' ) {
onError ( new Error ( 'keyArray argument must be of type Array.' ) ) ;
} else if ( keyArray . length === 0 ) {
return onSuccess ( [ ] ) ;
}
var data = [ ] ;
var count = keyArray . length ;
var called = false ;
var hasSuccess = false ;
var result = null ;
var batchTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _ONLY ) ;
batchTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
batchTransaction . onabort = onError ;
batchTransaction . onerror = onError ;
var onItemSuccess = function ( event ) {
if ( event . target . result || arrayType == 'dense' ) {
data . push ( event . target . result ) ;
} else if ( arrayType == 'sparse' ) {
data . length ++ ;
}
count -- ;
if ( count === 0 ) {
called = true ;
hasSuccess = true ;
result = data ;
}
} ;
keyArray . forEach ( function ( key ) {
var onItemError = function ( err ) {
called = true ;
result = err ;
onError ( err ) ;
batchTransaction . abort ( ) ;
} ;
var getRequest = batchTransaction . objectStore ( this . storeName ) . get ( key ) ;
getRequest . onsuccess = onItemSuccess ;
getRequest . onerror = onItemError ;
} , this ) ;
return batchTransaction ;
} ,
/ * *
* Fetches all entries in the store .
*
* @ param { Function } [ onSuccess ] A callback that is called if the operation
* was successful . Will receive an array of objects .
* @ param { Function } [ onError ] A callback that will be called if an error
* occurred during the operation .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
getAll : function ( onSuccess , onError ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
var getAllTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _ONLY ) ;
var store = getAllTransaction . objectStore ( this . storeName ) ;
if ( store . getAll ) {
this . _getAllNative ( getAllTransaction , store , onSuccess , onError ) ;
} else {
this . _getAllCursor ( getAllTransaction , store , onSuccess , onError ) ;
}
return getAllTransaction ;
} ,
/ * *
* Implements getAll for IDB implementations that have a non - standard
* getAll ( ) method .
*
* @ param { IDBTransaction } getAllTransaction An open READ transaction .
* @ param { IDBObjectStore } store A reference to the store .
* @ param { Function } onSuccess A callback that will be called if the
* operation was successful .
* @ param { Function } onError A callback that will be called if an
* error occurred during the operation .
* @ private
* /
_getAllNative : function ( getAllTransaction , store , onSuccess , onError ) {
var hasSuccess = false ,
result = null ;
getAllTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
getAllTransaction . onabort = onError ;
getAllTransaction . onerror = onError ;
var getAllRequest = store . getAll ( ) ;
getAllRequest . onsuccess = function ( event ) {
hasSuccess = true ;
result = event . target . result ;
} ;
getAllRequest . onerror = onError ;
} ,
/ * *
* Implements getAll for IDB implementations that do not have a getAll ( )
* method .
*
* @ param { IDBTransaction } getAllTransaction An open READ transaction .
* @ param { IDBObjectStore } store A reference to the store .
* @ param { Function } onSuccess A callback that will be called if the
* operation was successful .
* @ param { Function } onError A callback that will be called if an
* error occurred during the operation .
* @ private
* /
_getAllCursor : function ( getAllTransaction , store , onSuccess , onError ) {
var all = [ ] ,
hasSuccess = false ,
result = null ;
getAllTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
getAllTransaction . onabort = onError ;
getAllTransaction . onerror = onError ;
var cursorRequest = store . openCursor ( ) ;
cursorRequest . onsuccess = function ( event ) {
var cursor = event . target . result ;
if ( cursor ) {
all . push ( cursor . value ) ;
cursor [ 'continue' ] ( ) ;
}
else {
hasSuccess = true ;
result = all ;
}
} ;
cursorRequest . onError = onError ;
} ,
/ * *
* Clears the store , i . e . deletes all entries in the store .
*
* @ param { Function } [ onSuccess ] A callback that will be called if the
* operation was successful .
* @ param { Function } [ onError ] A callback that will be called if an
* error occurred during the operation .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
clear : function ( onSuccess , onError ) {
onError || ( onError = defaultErrorHandler ) ;
onSuccess || ( onSuccess = defaultSuccessHandler ) ;
var hasSuccess = false ,
result = null ;
var clearTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _WRITE ) ;
clearTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
clearTransaction . onabort = onError ;
clearTransaction . onerror = onError ;
var clearRequest = clearTransaction . objectStore ( this . storeName ) . clear ( ) ;
clearRequest . onsuccess = function ( event ) {
hasSuccess = true ;
result = event . target . result ;
} ;
clearRequest . onerror = onError ;
return clearTransaction ;
} ,
/ * *
* Checks if an id property needs to present on a object and adds one if
* necessary .
*
* @ param { Object } dataObj The data object that is about to be stored
* @ private
* /
_addIdPropertyIfNeeded : function ( dataObj ) {
if ( typeof dataObj [ this . keyPath ] == 'undefined' ) {
dataObj [ this . keyPath ] = this . _insertIdCount ++ + Date . now ( ) ;
}
} ,
/ * * * * * * * * * * * *
* indexing *
* * * * * * * * * * * * /
/ * *
* Returns a DOMStringList of index names of the store .
*
* @ return { DOMStringList } The list of index names
* /
getIndexList : function ( ) {
return this . store . indexNames ;
} ,
/ * *
* Checks if an index with the given name exists in the store .
*
* @ param { String } indexName The name of the index to look for
* @ return { Boolean } Whether the store contains an index with the given name
* /
hasIndex : function ( indexName ) {
return this . store . indexNames . contains ( indexName ) ;
} ,
/ * *
* Normalizes an object containing index data and assures that all
* properties are set .
*
* @ param { Object } indexData The index data object to normalize
* @ param { String } indexData . name The name of the index
* @ param { String } [ indexData . keyPath ] The key path of the index
* @ param { Boolean } [ indexData . unique ] Whether the index is unique
* @ param { Boolean } [ indexData . multiEntry ] Whether the index is multi entry
* /
normalizeIndexData : function ( indexData ) {
indexData . keyPath = indexData . keyPath || indexData . name ;
indexData . unique = ! ! indexData . unique ;
indexData . multiEntry = ! ! indexData . multiEntry ;
} ,
/ * *
* Checks if an actual index complies with an expected index .
*
* @ param { IDBIndex } actual The actual index found in the store
* @ param { Object } expected An Object describing an expected index
* @ return { Boolean } Whether both index definitions are identical
* /
indexComplies : function ( actual , expected ) {
var complies = [ 'keyPath' , 'unique' , 'multiEntry' ] . every ( function ( key ) {
// IE10 returns undefined for no multiEntry
if ( key == 'multiEntry' && actual [ key ] === undefined && expected [ key ] === false ) {
return true ;
}
// Compound keys
if ( key == 'keyPath' && Object . prototype . toString . call ( expected [ key ] ) == '[object Array]' ) {
var exp = expected . keyPath ;
var act = actual . keyPath ;
// IE10 can't handle keyPath sequences and stores them as a string.
// The index will be unusable there, but let's still return true if
// the keyPath sequence matches.
if ( typeof act == 'string' ) {
return exp . toString ( ) == act ;
}
// Chrome/Opera stores keyPath squences as DOMStringList, Firefox
// as Array
if ( ! ( typeof act . contains == 'function' || typeof act . indexOf == 'function' ) ) {
return false ;
}
if ( act . length !== exp . length ) {
return false ;
}
for ( var i = 0 , m = exp . length ; i < m ; i ++ ) {
if ( ! ( ( act . contains && act . contains ( exp [ i ] ) ) || act . indexOf ( exp [ i ] !== - 1 ) ) ) {
return false ;
}
}
return true ;
}
return expected [ key ] == actual [ key ] ;
} ) ;
return complies ;
} ,
/ * * * * * * * * * *
* cursor *
* * * * * * * * * * /
/ * *
* Iterates over the store using the given options and calling onItem
* for each entry matching the options .
*
* @ param { Function } onItem A callback to be called for each match
* @ param { Object } [ options ] An object defining specific options
* @ param { String } [ options . index = null ] A name of an IDBIndex to operate on
* @ param { String } [ options . order = ASC ] The order in which to provide the
* results , can be 'DESC' or 'ASC'
* @ param { Boolean } [ options . autoContinue = true ] Whether to automatically
* iterate the cursor to the next result
* @ param { Boolean } [ options . filterDuplicates = false ] Whether to exclude
* duplicate matches
* @ param { IDBKeyRange } [ options . keyRange = null ] An IDBKeyRange to use
* @ param { Boolean } [ options . writeAccess = false ] Whether grant write access
* to the store in the onItem callback
* @ param { Function } [ options . onEnd = null ] A callback to be called after
* iteration has ended
* @ param { Function } [ options . onError = throw ] A callback to be called
* if an error occurred during the operation .
* @ param { Number } [ options . limit = Infinity ] Limit the number of returned
* results to this number
* @ param { Number } [ options . offset = 0 ] Skip the provided number of results
* in the resultset
* @ param { Boolean } [ options . allowItemRejection = false ] Allows the onItem
* function to return a Boolean to accept or reject the current item
* @ returns { IDBTransaction } The transaction used for this operation .
* /
iterate : function ( onItem , options ) {
options = mixin ( {
index : null ,
order : 'ASC' ,
autoContinue : true ,
filterDuplicates : false ,
keyRange : null ,
writeAccess : false ,
onEnd : null ,
onError : defaultErrorHandler ,
limit : Infinity ,
offset : 0 ,
allowItemRejection : false
} , options || { } ) ;
var directionType = options . order . toLowerCase ( ) == 'desc' ? 'PREV' : 'NEXT' ;
if ( options . filterDuplicates ) {
directionType += '_NO_DUPLICATE' ;
}
var hasSuccess = false ;
var cursorTransaction = this . db . transaction ( [ this . storeName ] , this . consts [ options . writeAccess ? 'READ_WRITE' : 'READ_ONLY' ] ) ;
var cursorTarget = cursorTransaction . objectStore ( this . storeName ) ;
if ( options . index ) {
cursorTarget = cursorTarget . index ( options . index ) ;
}
var recordCount = 0 ;
cursorTransaction . oncomplete = function ( ) {
if ( ! hasSuccess ) {
options . onError ( null ) ;
return ;
}
if ( options . onEnd ) {
options . onEnd ( ) ;
} else {
onItem ( null ) ;
}
} ;
cursorTransaction . onabort = options . onError ;
cursorTransaction . onerror = options . onError ;
var cursorRequest = cursorTarget . openCursor ( options . keyRange , this . consts [ directionType ] ) ;
cursorRequest . onerror = options . onError ;
cursorRequest . onsuccess = function ( event ) {
var cursor = event . target . result ;
if ( cursor ) {
if ( options . offset ) {
cursor . advance ( options . offset ) ;
options . offset = 0 ;
} else {
var onItemReturn = onItem ( cursor . value , cursor , cursorTransaction ) ;
if ( ! options . allowItemRejection || onItemReturn !== false ) {
recordCount ++ ;
}
if ( options . autoContinue ) {
if ( recordCount + options . offset < options . limit ) {
cursor [ 'continue' ] ( ) ;
} else {
hasSuccess = true ;
}
}
}
} else {
hasSuccess = true ;
}
} ;
return cursorTransaction ;
} ,
/ * *
* Runs a query against the store and passes an array containing matched
* objects to the success handler .
*
* @ param { Function } onSuccess A callback to be called when the operation
* was successful .
* @ param { Object } [ options ] An object defining specific options
* @ param { String } [ options . index = null ] A name of an IDBIndex to operate on
* @ param { String } [ options . order = ASC ] The order in which to provide the
* results , can be 'DESC' or 'ASC'
* @ param { Boolean } [ options . filterDuplicates = false ] Whether to exclude
* duplicate matches
* @ param { IDBKeyRange } [ options . keyRange = null ] An IDBKeyRange to use
* @ param { Function } [ options . onError = throw ] A callback to be called
* if an error occurred during the operation .
* @ param { Number } [ options . limit = Infinity ] Limit the number of returned
* results to this number
* @ param { Number } [ options . offset = 0 ] Skip the provided number of results
* in the resultset
* @ param { Function } [ options . filter = null ] A custom filter function to
* apply to query resuts before returning . Must return ` false ` to reject
* an item . Can be combined with keyRanges .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
query : function ( onSuccess , options ) {
var result = [ ] ,
processedItems = 0 ;
options = options || { } ;
options . autoContinue = true ;
options . writeAccess = false ;
options . allowItemRejection = ! ! options . filter ;
options . onEnd = function ( ) {
onSuccess ( result , processedItems ) ;
} ;
return this . iterate ( function ( item ) {
processedItems ++ ;
var accept = options . filter ? options . filter ( item ) : true ;
if ( accept !== false ) {
result . push ( item ) ;
}
return accept ;
} , options ) ;
} ,
/ * *
*
* Runs a query against the store , but only returns the number of matches
* instead of the matches itself .
*
* @ param { Function } onSuccess A callback to be called if the opration
* was successful .
* @ param { Object } [ options ] An object defining specific options
* @ param { String } [ options . index = null ] A name of an IDBIndex to operate on
* @ param { IDBKeyRange } [ options . keyRange = null ] An IDBKeyRange to use
* @ param { Function } [ options . onError = throw ] A callback to be called if an error
* occurred during the operation .
* @ returns { IDBTransaction } The transaction used for this operation .
* /
count : function ( onSuccess , options ) {
options = mixin ( {
index : null ,
keyRange : null
} , options || { } ) ;
var onError = options . onError || defaultErrorHandler ;
var hasSuccess = false ,
result = null ;
var cursorTransaction = this . db . transaction ( [ this . storeName ] , this . consts . READ _ONLY ) ;
cursorTransaction . oncomplete = function ( ) {
var callback = hasSuccess ? onSuccess : onError ;
callback ( result ) ;
} ;
cursorTransaction . onabort = onError ;
cursorTransaction . onerror = onError ;
var cursorTarget = cursorTransaction . objectStore ( this . storeName ) ;
if ( options . index ) {
cursorTarget = cursorTarget . index ( options . index ) ;
}
var countRequest = cursorTarget . count ( options . keyRange ) ;
countRequest . onsuccess = function ( evt ) {
hasSuccess = true ;
result = evt . target . result ;
} ;
countRequest . onError = onError ;
return cursorTransaction ;
} ,
/**************/
/* key ranges */
/**************/
/ * *
* Creates a key range using specified options . This key range can be
* handed over to the count ( ) and iterate ( ) methods .
*
* Note : You must provide at least one or both of "lower" or "upper" value .
*
* @ param { Object } options The options for the key range to create
* @ param { * } [ options . lower ] The lower bound
* @ param { Boolean } [ options . excludeLower ] Whether to exclude the lower
* bound passed in options . lower from the key range
* @ param { * } [ options . upper ] The upper bound
* @ param { Boolean } [ options . excludeUpper ] Whether to exclude the upper
* bound passed in options . upper from the key range
* @ param { * } [ options . only ] A single key value . Use this if you need a key
* range that only includes one value for a key . Providing this
* property invalidates all other properties .
* @ return { IDBKeyRange } The IDBKeyRange representing the specified options
* /
makeKeyRange : function ( options ) {
/*jshint onecase:true */
var keyRange ,
hasLower = typeof options . lower != 'undefined' ,
hasUpper = typeof options . upper != 'undefined' ,
isOnly = typeof options . only != 'undefined' ;
switch ( true ) {
case isOnly :
keyRange = this . keyRange . only ( options . only ) ;
break ;
case hasLower && hasUpper :
keyRange = this . keyRange . bound ( options . lower , options . upper , options . excludeLower , options . excludeUpper ) ;
break ;
case hasLower :
keyRange = this . keyRange . lowerBound ( options . lower , options . excludeLower ) ;
break ;
case hasUpper :
keyRange = this . keyRange . upperBound ( options . upper , options . excludeUpper ) ;
break ;
default :
throw new Error ( 'Cannot create KeyRange. Provide one or both of "lower" or "upper" value, or an "only" value.' ) ;
}
return keyRange ;
}
} ;
/** helpers **/
var empty = { } ;
function mixin ( target , source ) {
var name , s ;
for ( name in source ) {
s = source [ name ] ;
if ( s !== empty [ name ] && s !== target [ name ] ) {
target [ name ] = s ;
}
}
return target ;
}
function hasVersionError ( errorEvent ) {
if ( 'error' in errorEvent . target ) {
return errorEvent . target . error . name == 'VersionError' ;
} else if ( 'errorCode' in errorEvent . target ) {
return errorEvent . target . errorCode == 12 ;
}
return false ;
}
IDBStore . prototype = proto ;
IDBStore . version = proto . version ;
return IDBStore ;
} , this ) ;
} , { } ] , 8 : [ function ( require , module , exports ) {
exports . read = function ( buffer , offset , isLE , mLen , nBytes ) {
var e , m
var eLen = nBytes * 8 - mLen - 1
var eMax = ( 1 << eLen ) - 1
var eBias = eMax >> 1
var nBits = - 7
var i = isLE ? ( nBytes - 1 ) : 0
var d = isLE ? - 1 : 1
var s = buffer [ offset + i ]
i += d
e = s & ( ( 1 << ( - nBits ) ) - 1 )
s >>= ( - nBits )
nBits += eLen
for ( ; nBits > 0 ; e = e * 256 + buffer [ offset + i ] , i += d , nBits -= 8 ) { }
m = e & ( ( 1 << ( - nBits ) ) - 1 )
e >>= ( - nBits )
nBits += mLen
for ( ; nBits > 0 ; m = m * 256 + buffer [ offset + i ] , i += d , nBits -= 8 ) { }
if ( e === 0 ) {
e = 1 - eBias
} else if ( e === eMax ) {
return m ? NaN : ( ( s ? - 1 : 1 ) * Infinity )
} else {
m = m + Math . pow ( 2 , mLen )
e = e - eBias
}
return ( s ? - 1 : 1 ) * m * Math . pow ( 2 , e - mLen )
}
exports . write = function ( buffer , value , offset , isLE , mLen , nBytes ) {
var e , m , c
var eLen = nBytes * 8 - mLen - 1
var eMax = ( 1 << eLen ) - 1
var eBias = eMax >> 1
var rt = ( mLen === 23 ? Math . pow ( 2 , - 24 ) - Math . pow ( 2 , - 77 ) : 0 )
var i = isLE ? 0 : ( nBytes - 1 )
var d = isLE ? 1 : - 1
var s = value < 0 || ( value === 0 && 1 / value < 0 ) ? 1 : 0
value = Math . abs ( value )
if ( isNaN ( value ) || value === Infinity ) {
m = isNaN ( value ) ? 1 : 0
e = eMax
} else {
e = Math . floor ( Math . log ( value ) / Math . LN2 )
if ( value * ( c = Math . pow ( 2 , - e ) ) < 1 ) {
e --
c *= 2
}
if ( e + eBias >= 1 ) {
value += rt / c
} else {
value += rt * Math . pow ( 2 , 1 - eBias )
}
if ( value * c >= 2 ) {
e ++
c /= 2
}
if ( e + eBias >= eMax ) {
m = 0
e = eMax
} else if ( e + eBias >= 1 ) {
m = ( value * c - 1 ) * Math . pow ( 2 , mLen )
e = e + eBias
} else {
m = value * Math . pow ( 2 , eBias - 1 ) * Math . pow ( 2 , mLen )
e = 0
}
}
for ( ; mLen >= 8 ; buffer [ offset + i ] = m & 0xff , i += d , m /= 256 , mLen -= 8 ) { }
e = ( e << mLen ) | m
eLen += mLen
for ( ; eLen > 0 ; buffer [ offset + i ] = e & 0xff , i += d , e /= 256 , eLen -= 8 ) { }
buffer [ offset + i - d ] |= s * 128
}
} , { } ] , 9 : [ function ( require , module , exports ) {
/ * !
* Determine if an object is a Buffer
*
* @ author Feross Aboukhadijeh < feross @ feross . org > < http : //feross.org>
* @ license MIT
* /
// The _isBuffer check is for Safari 5-7 support, because it's missing
// Object.prototype.constructor. Remove this eventually
module . exports = function ( obj ) {
return obj != null && ( isBuffer ( obj ) || isSlowBuffer ( obj ) || ! ! obj . _isBuffer )
}
function isBuffer ( obj ) {
return ! ! obj . constructor && typeof obj . constructor . isBuffer === 'function' && obj . constructor . isBuffer ( obj )
}
// For Node v0.10 support. Remove this eventually.
function isSlowBuffer ( obj ) {
return typeof obj . readFloatLE === 'function' && typeof obj . slice === 'function' && isBuffer ( obj . slice ( 0 , 0 ) )
}
} , { } ] , 10 : [ function ( require , module , exports ) {
var toString = { } . toString ;
module . exports = Array . isArray || function ( arr ) {
return toString . call ( arr ) == '[object Array]' ;
} ;
} , { } ] , 11 : [ function ( require , module , exports ) {
var Buffer = require ( 'buffer' ) . Buffer ;
module . exports = isBuffer ;
function isBuffer ( o ) {
return Buffer . isBuffer ( o )
|| /\[object (.+Array|Array.+)\]/ . test ( Object . prototype . toString . call ( o ) ) ;
}
} , { "buffer" : 6 } ] , 12 : [ function ( require , module , exports ) {
( function ( Buffer ) {
module . exports = Level
var IDB = require ( 'idb-wrapper' )
var AbstractLevelDOWN = require ( 'abstract-leveldown' ) . AbstractLevelDOWN
var util = require ( 'util' )
var Iterator = require ( './iterator' )
var isBuffer = require ( 'isbuffer' )
var xtend = require ( 'xtend' )
var toBuffer = require ( 'typedarray-to-buffer' )
function Level ( location ) {
if ( ! ( this instanceof Level ) ) return new Level ( location )
if ( ! location ) throw new Error ( "constructor requires at least a location argument" )
this . IDBOptions = { }
this . location = location
}
util . inherits ( Level , AbstractLevelDOWN )
Level . prototype . _open = function ( options , callback ) {
var self = this
var idbOpts = {
storeName : this . location ,
autoIncrement : false ,
keyPath : null ,
onStoreReady : function ( ) {
callback && callback ( null , self . idb )
} ,
onError : function ( err ) {
callback && callback ( err )
}
}
xtend ( idbOpts , options )
this . IDBOptions = idbOpts
this . idb = new IDB ( idbOpts )
}
Level . prototype . _get = function ( key , options , callback ) {
this . idb . get ( key , function ( value ) {
if ( value === undefined ) {
// 'NotFound' error, consistent with LevelDOWN API
return callback ( new Error ( 'NotFound' ) )
}
// by default return buffers, unless explicitly told not to
var asBuffer = true
if ( options . asBuffer === false ) asBuffer = false
if ( options . raw ) asBuffer = false
if ( asBuffer ) {
if ( value instanceof Uint8Array ) value = toBuffer ( value )
else value = new Buffer ( String ( value ) )
}
return callback ( null , value , key )
} , callback )
}
Level . prototype . _del = function ( id , options , callback ) {
this . idb . remove ( id , callback , callback )
}
Level . prototype . _put = function ( key , value , options , callback ) {
if ( value instanceof ArrayBuffer ) {
value = toBuffer ( new Uint8Array ( value ) )
}
var obj = this . convertEncoding ( key , value , options )
if ( Buffer . isBuffer ( obj . value ) ) {
if ( typeof value . toArrayBuffer === 'function' ) {
obj . value = new Uint8Array ( value . toArrayBuffer ( ) )
} else {
obj . value = new Uint8Array ( value )
}
}
this . idb . put ( obj . key , obj . value , function ( ) { callback ( ) } , callback )
}
Level . prototype . convertEncoding = function ( key , value , options ) {
if ( options . raw ) return { key : key , value : value }
if ( value ) {
var stringed = value . toString ( )
if ( stringed === 'NaN' ) value = 'NaN'
}
var valEnc = options . valueEncoding
var obj = { key : key , value : value }
if ( value && ( ! valEnc || valEnc !== 'binary' ) ) {
if ( typeof obj . value !== 'object' ) {
obj . value = stringed
}
}
return obj
}
Level . prototype . iterator = function ( options ) {
if ( typeof options !== 'object' ) options = { }
return new Iterator ( this . idb , options )
}
Level . prototype . _batch = function ( array , options , callback ) {
var op
var i
var k
var copiedOp
var currentOp
var modified = [ ]
if ( array . length === 0 ) return setTimeout ( callback , 0 )
for ( i = 0 ; i < array . length ; i ++ ) {
copiedOp = { }
currentOp = array [ i ]
modified [ i ] = copiedOp
var converted = this . convertEncoding ( currentOp . key , currentOp . value , options )
currentOp . key = converted . key
currentOp . value = converted . value
for ( k in currentOp ) {
if ( k === 'type' && currentOp [ k ] == 'del' ) {
copiedOp [ k ] = 'remove'
} else {
copiedOp [ k ] = currentOp [ k ]
}
}
}
return this . idb . batch ( modified , function ( ) { callback ( ) } , callback )
}
Level . prototype . _close = function ( callback ) {
this . idb . db . close ( )
callback ( )
}
Level . prototype . _approximateSize = function ( start , end , callback ) {
var err = new Error ( 'Not implemented' )
if ( callback )
return callback ( err )
throw err
}
Level . prototype . _isBuffer = function ( obj ) {
return Buffer . isBuffer ( obj )
}
Level . destroy = function ( db , callback ) {
if ( typeof db === 'object' ) {
var prefix = db . IDBOptions . storePrefix || 'IDBWrapper-'
var dbname = db . location
} else {
var prefix = 'IDBWrapper-'
var dbname = db
}
var request = indexedDB . deleteDatabase ( prefix + dbname )
request . onsuccess = function ( ) {
callback ( )
}
request . onerror = function ( err ) {
callback ( err )
}
}
var checkKeyValue = Level . prototype . _checkKeyValue = function ( obj , type ) {
if ( obj === null || obj === undefined )
return new Error ( type + ' cannot be `null` or `undefined`' )
if ( obj === null || obj === undefined )
return new Error ( type + ' cannot be `null` or `undefined`' )
if ( isBuffer ( obj ) && obj . byteLength === 0 )
return new Error ( type + ' cannot be an empty ArrayBuffer' )
if ( String ( obj ) === '' )
return new Error ( type + ' cannot be an empty String' )
if ( obj . length === 0 )
return new Error ( type + ' cannot be an empty Array' )
}
} ) . call ( this , require ( "buffer" ) . Buffer )
} , { "./iterator" : 13 , "abstract-leveldown" : 3 , "buffer" : 6 , "idb-wrapper" : 7 , "isbuffer" : 11 , "typedarray-to-buffer" : 38 , "util" : 41 , "xtend" : 15 } ] , 13 : [ function ( require , module , exports ) {
var util = require ( 'util' )
var AbstractIterator = require ( 'abstract-leveldown' ) . AbstractIterator
var ltgt = require ( 'ltgt' )
module . exports = Iterator
function Iterator ( db , options ) {
if ( ! options ) options = { }
this . options = options
AbstractIterator . call ( this , db )
this . _order = options . reverse ? 'DESC' : 'ASC'
this . _limit = options . limit
this . _count = 0
this . _done = false
var lower = ltgt . lowerBound ( options )
var upper = ltgt . upperBound ( options )
try {
this . _keyRange = lower || upper ? this . db . makeKeyRange ( {
lower : lower ,
upper : upper ,
excludeLower : ltgt . lowerBoundExclusive ( options ) ,
excludeUpper : ltgt . upperBoundExclusive ( options )
} ) : null
} catch ( e ) {
// The lower key is greater than the upper key.
// IndexedDB throws an error, but we'll just return 0 results.
this . _keyRangeError = true
}
this . callback = null
}
util . inherits ( Iterator , AbstractIterator )
Iterator . prototype . createIterator = function ( ) {
var self = this
self . iterator = self . db . iterate ( function ( ) {
self . onItem . apply ( self , arguments )
} , {
keyRange : self . _keyRange ,
autoContinue : false ,
order : self . _order ,
onError : function ( err ) { console . log ( 'horrible error' , err ) } ,
} )
}
// TODO the limit implementation here just ignores all reads after limit has been reached
// it should cancel the iterator instead but I don't know how
Iterator . prototype . onItem = function ( value , cursor , cursorTransaction ) {
if ( ! cursor && this . callback ) {
this . callback ( )
this . callback = false
return
}
var shouldCall = true
if ( ! ! this . _limit && this . _limit > 0 && this . _count ++ >= this . _limit )
shouldCall = false
if ( shouldCall ) this . callback ( false , cursor . key , cursor . value )
if ( cursor ) cursor [ 'continue' ] ( )
}
Iterator . prototype . _next = function ( callback ) {
if ( ! callback ) return new Error ( 'next() requires a callback argument' )
if ( this . _keyRangeError ) return callback ( )
if ( ! this . _started ) {
this . createIterator ( )
this . _started = true
}
this . callback = callback
}
} , { "abstract-leveldown" : 3 , "ltgt" : 16 , "util" : 41 } ] , 14 : [ function ( require , module , exports ) {
module . exports = hasKeys
function hasKeys ( source ) {
return source !== null &&
( typeof source === "object" ||
typeof source === "function" )
}
} , { } ] , 15 : [ function ( require , module , exports ) {
var Keys = require ( "object-keys" )
var hasKeys = require ( "./has-keys" )
module . exports = extend
function extend ( ) {
var target = { }
for ( var i = 0 ; i < arguments . length ; i ++ ) {
var source = arguments [ i ]
if ( ! hasKeys ( source ) ) {
continue
}
var keys = Keys ( source )
for ( var j = 0 ; j < keys . length ; j ++ ) {
var name = keys [ j ]
target [ name ] = source [ name ]
}
}
return target
}
} , { "./has-keys" : 14 , "object-keys" : 18 } ] , 16 : [ function ( require , module , exports ) {
( function ( Buffer ) {
exports . compare = function ( a , b ) {
if ( Buffer . isBuffer ( a ) ) {
var l = Math . min ( a . length , b . length )
for ( var i = 0 ; i < l ; i ++ ) {
var cmp = a [ i ] - b [ i ]
if ( cmp ) return cmp
}
return a . length - b . length
}
return a < b ? - 1 : a > b ? 1 : 0
}
function has ( obj , key ) {
return Object . hasOwnProperty . call ( obj , key )
}
// to be compatible with the current abstract-leveldown tests
// nullish or empty strings.
// I could use !!val but I want to permit numbers and booleans,
// if possible.
function isDef ( val ) {
return val !== undefined && val !== ''
}
function has ( range , name ) {
return Object . hasOwnProperty . call ( range , name )
}
function hasKey ( range , name ) {
return Object . hasOwnProperty . call ( range , name ) && name
}
var lowerBoundKey = exports . lowerBoundKey = function ( range ) {
return (
hasKey ( range , 'gt' )
|| hasKey ( range , 'gte' )
|| hasKey ( range , 'min' )
|| ( range . reverse ? hasKey ( range , 'end' ) : hasKey ( range , 'start' ) )
|| undefined
)
}
var lowerBound = exports . lowerBound = function ( range ) {
var k = lowerBoundKey ( range )
return k && range [ k ]
}
exports . lowerBoundInclusive = function ( range ) {
return has ( range , 'gt' ) ? false : true
}
exports . upperBoundInclusive =
function ( range ) {
return has ( range , 'lt' ) || ! range . minEx ? false : true
}
var lowerBoundExclusive = exports . lowerBoundExclusive =
function ( range ) {
return has ( range , 'gt' ) || range . minEx ? true : false
}
var upperBoundExclusive = exports . upperBoundExclusive =
function ( range ) {
return has ( range , 'lt' ) ? true : false
}
var upperBoundKey = exports . upperBoundKey = function ( range ) {
return (
hasKey ( range , 'lt' )
|| hasKey ( range , 'lte' )
|| hasKey ( range , 'max' )
|| ( range . reverse ? hasKey ( range , 'start' ) : hasKey ( range , 'end' ) )
|| undefined
)
}
var upperBound = exports . upperBound = function ( range ) {
var k = upperBoundKey ( range )
return k && range [ k ]
}
function id ( e ) { return e }
exports . toLtgt = function ( range , _range , map , lower , upper ) {
_range = _range || { }
map = map || id
var defaults = arguments . length > 3
var lb = exports . lowerBoundKey ( range )
var ub = exports . upperBoundKey ( range )
if ( lb ) {
if ( lb === 'gt' ) _range . gt = map ( range . gt , false )
else _range . gte = map ( range [ lb ] , false )
}
else if ( defaults )
_range . gte = map ( lower , false )
if ( ub ) {
if ( ub === 'lt' ) _range . lt = map ( range . lt , true )
else _range . lte = map ( range [ ub ] , true )
}
else if ( defaults )
_range . lte = map ( upper , true )
if ( range . reverse != null )
_range . reverse = ! ! range . reverse
//if range was used mutably
//(in level-sublevel it's part of an options object
//that has more properties on it.)
if ( has ( _range , 'max' ) ) delete _range . max
if ( has ( _range , 'min' ) ) delete _range . min
if ( has ( _range , 'start' ) ) delete _range . start
if ( has ( _range , 'end' ) ) delete _range . end
return _range
}
exports . contains = function ( range , key , compare ) {
compare = compare || exports . compare
var lb = lowerBound ( range )
if ( isDef ( lb ) ) {
var cmp = compare ( key , lb )
if ( cmp < 0 || ( cmp === 0 && lowerBoundExclusive ( range ) ) )
return false
}
var ub = upperBound ( range )
if ( isDef ( ub ) ) {
var cmp = compare ( key , ub )
if ( cmp > 0 || ( cmp === 0 ) && upperBoundExclusive ( range ) )
return false
}
return true
}
exports . filter = function ( range , compare ) {
return function ( key ) {
return exports . contains ( range , key , compare )
}
}
} ) . call ( this , { "isBuffer" : require ( "../is-buffer/index.js" ) } )
} , { "../is-buffer/index.js" : 9 } ] , 17 : [ function ( require , module , exports ) {
var hasOwn = Object . prototype . hasOwnProperty ;
var toString = Object . prototype . toString ;
var isFunction = function ( fn ) {
var isFunc = ( typeof fn === 'function' && ! ( fn instanceof RegExp ) ) || toString . call ( fn ) === '[object Function]' ;
if ( ! isFunc && typeof window !== 'undefined' ) {
isFunc = fn === window . setTimeout || fn === window . alert || fn === window . confirm || fn === window . prompt ;
}
return isFunc ;
} ;
module . exports = function forEach ( obj , fn ) {
if ( ! isFunction ( fn ) ) {
throw new TypeError ( 'iterator must be a function' ) ;
}
var i , k ,
isString = typeof obj === 'string' ,
l = obj . length ,
context = arguments . length > 2 ? arguments [ 2 ] : null ;
if ( l === + l ) {
for ( i = 0 ; i < l ; i ++ ) {
if ( context === null ) {
fn ( isString ? obj . charAt ( i ) : obj [ i ] , i , obj ) ;
} else {
fn . call ( context , isString ? obj . charAt ( i ) : obj [ i ] , i , obj ) ;
}
}
} else {
for ( k in obj ) {
if ( hasOwn . call ( obj , k ) ) {
if ( context === null ) {
fn ( obj [ k ] , k , obj ) ;
} else {
fn . call ( context , obj [ k ] , k , obj ) ;
}
}
}
}
} ;
} , { } ] , 18 : [ function ( require , module , exports ) {
module . exports = Object . keys || require ( './shim' ) ;
} , { "./shim" : 20 } ] , 19 : [ function ( require , module , exports ) {
var toString = Object . prototype . toString ;
module . exports = function isArguments ( value ) {
var str = toString . call ( value ) ;
var isArguments = str === '[object Arguments]' ;
if ( ! isArguments ) {
isArguments = str !== '[object Array]'
&& value !== null
&& typeof value === 'object'
&& typeof value . length === 'number'
&& value . length >= 0
&& toString . call ( value . callee ) === '[object Function]' ;
}
return isArguments ;
} ;
} , { } ] , 20 : [ function ( require , module , exports ) {
( function ( ) {
"use strict" ;
// modified from https://github.com/kriskowal/es5-shim
var has = Object . prototype . hasOwnProperty ,
toString = Object . prototype . toString ,
forEach = require ( './foreach' ) ,
isArgs = require ( './isArguments' ) ,
hasDontEnumBug = ! ( { 'toString' : null } ) . propertyIsEnumerable ( 'toString' ) ,
hasProtoEnumBug = ( function ( ) { } ) . propertyIsEnumerable ( 'prototype' ) ,
dontEnums = [
"toString" ,
"toLocaleString" ,
"valueOf" ,
"hasOwnProperty" ,
"isPrototypeOf" ,
"propertyIsEnumerable" ,
"constructor"
] ,
keysShim ;
keysShim = function keys ( object ) {
var isObject = object !== null && typeof object === 'object' ,
isFunction = toString . call ( object ) === '[object Function]' ,
isArguments = isArgs ( object ) ,
theKeys = [ ] ;
if ( ! isObject && ! isFunction && ! isArguments ) {
throw new TypeError ( "Object.keys called on a non-object" ) ;
}
if ( isArguments ) {
forEach ( object , function ( value ) {
theKeys . push ( value ) ;
} ) ;
} else {
var name ,
skipProto = hasProtoEnumBug && isFunction ;
for ( name in object ) {
if ( ! ( skipProto && name === 'prototype' ) && has . call ( object , name ) ) {
theKeys . push ( name ) ;
}
}
}
if ( hasDontEnumBug ) {
var ctor = object . constructor ,
skipConstructor = ctor && ctor . prototype === object ;
forEach ( dontEnums , function ( dontEnum ) {
if ( ! ( skipConstructor && dontEnum === 'constructor' ) && has . call ( object , dontEnum ) ) {
theKeys . push ( dontEnum ) ;
}
} ) ;
}
return theKeys ;
} ;
module . exports = keysShim ;
} ( ) ) ;
} , { "./foreach" : 17 , "./isArguments" : 19 } ] , 21 : [ function ( require , module , exports ) {
// Top level file is just a mixin of submodules & constants
'use strict' ;
var assign = require ( './lib/utils/common' ) . assign ;
var deflate = require ( './lib/deflate' ) ;
var inflate = require ( './lib/inflate' ) ;
var constants = require ( './lib/zlib/constants' ) ;
var pako = { } ;
assign ( pako , deflate , inflate , constants ) ;
module . exports = pako ;
} , { "./lib/deflate" : 22 , "./lib/inflate" : 23 , "./lib/utils/common" : 24 , "./lib/zlib/constants" : 27 } ] , 22 : [ function ( require , module , exports ) {
'use strict' ;
var zlib _deflate = require ( './zlib/deflate' ) ;
var utils = require ( './utils/common' ) ;
var strings = require ( './utils/strings' ) ;
var msg = require ( './zlib/messages' ) ;
var ZStream = require ( './zlib/zstream' ) ;
var toString = Object . prototype . toString ;
/* Public constants ==========================================================*/
/* ===========================================================================*/
var Z _NO _FLUSH = 0 ;
var Z _FINISH = 4 ;
var Z _OK = 0 ;
var Z _STREAM _END = 1 ;
var Z _SYNC _FLUSH = 2 ;
var Z _DEFAULT _COMPRESSION = - 1 ;
var Z _DEFAULT _STRATEGY = 0 ;
var Z _DEFLATED = 8 ;
/* ===========================================================================*/
/ * *
* class Deflate
*
* Generic JS - style wrapper for zlib calls . If you don ' t need
* streaming behaviour - use more simple functions : [ [ deflate ] ] ,
* [ [ deflateRaw ] ] and [ [ gzip ] ] .
* * /
/ * i n t e r n a l
* Deflate . chunks - > Array
*
* Chunks of output data , if [ [ Deflate # onData ] ] not overriden .
* * /
/ * *
* Deflate . result - > Uint8Array | Array
*
* Compressed result , generated by default [ [ Deflate # onData ] ]
* and [ [ Deflate # onEnd ] ] handlers . Filled after you push last chunk
* ( call [ [ Deflate # push ] ] with ` Z_FINISH ` / ` true ` param ) or if you
* push a chunk with explicit flush ( call [ [ Deflate # push ] ] with
* ` Z_SYNC_FLUSH ` param ) .
* * /
/ * *
* Deflate . err - > Number
*
* Error code after deflate finished . 0 ( Z _OK ) on success .
* You will not need it in real life , because deflate errors
* are possible only on wrong options or bad ` onData ` / ` onEnd `
* custom handlers .
* * /
/ * *
* Deflate . msg - > String
*
* Error message , if [ [ Deflate . err ] ] != 0
* * /
/ * *
* new Deflate ( options )
* - options ( Object ) : zlib deflate options .
*
* Creates new deflator instance with specified params . Throws exception
* on bad params . Supported options :
*
* - ` level `
* - ` windowBits `
* - ` memLevel `
* - ` strategy `
* - ` dictionary `
*
* [ http : //zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these .
*
* Additional options , for internal needs :
*
* - ` chunkSize ` - size of generated data chunks ( 16 K by default )
* - ` raw ` ( Boolean ) - do raw deflate
* - ` gzip ` ( Boolean ) - create gzip wrapper
* - ` to ` ( String ) - if equal to 'string' , then result will be "binary string"
* ( each char code [ 0. . 255 ] )
* - ` header ` ( Object ) - custom header for gzip
* - ` text ` ( Boolean ) - true if compressed data believed to be text
* - ` time ` ( Number ) - modification time , unix timestamp
* - ` os ` ( Number ) - operation system code
* - ` extra ` ( Array ) - array of bytes with extra data ( max 65536 )
* - ` name ` ( String ) - file name ( binary string )
* - ` comment ` ( String ) - comment ( binary string )
* - ` hcrc ` ( Boolean ) - true if header crc should be added
*
* # # # # # Example :
*
* ` ` ` javascript
* var pako = require ( 'pako' )
* , chunk1 = Uint8Array ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] )
* , chunk2 = Uint8Array ( [ 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 ] ) ;
*
* var deflate = new pako . Deflate ( { level : 3 } ) ;
*
* deflate . push ( chunk1 , false ) ;
* deflate . push ( chunk2 , true ) ; // true -> last chunk
*
* if ( deflate . err ) { throw new Error ( deflate . err ) ; }
*
* console . log ( deflate . result ) ;
* ` ` `
* * /
function Deflate ( options ) {
if ( ! ( this instanceof Deflate ) ) return new Deflate ( options ) ;
this . options = utils . assign ( {
level : Z _DEFAULT _COMPRESSION ,
method : Z _DEFLATED ,
chunkSize : 16384 ,
windowBits : 15 ,
memLevel : 8 ,
strategy : Z _DEFAULT _STRATEGY ,
to : ''
} , options || { } ) ;
var opt = this . options ;
if ( opt . raw && ( opt . windowBits > 0 ) ) {
opt . windowBits = - opt . windowBits ;
}
else if ( opt . gzip && ( opt . windowBits > 0 ) && ( opt . windowBits < 16 ) ) {
opt . windowBits += 16 ;
}
this . err = 0 ; // error code, if happens (0 = Z_OK)
this . msg = '' ; // error message
this . ended = false ; // used to avoid multiple onEnd() calls
this . chunks = [ ] ; // chunks of compressed data
this . strm = new ZStream ( ) ;
this . strm . avail _out = 0 ;
var status = zlib _deflate . deflateInit2 (
this . strm ,
opt . level ,
opt . method ,
opt . windowBits ,
opt . memLevel ,
opt . strategy
) ;
if ( status !== Z _OK ) {
throw new Error ( msg [ status ] ) ;
}
if ( opt . header ) {
zlib _deflate . deflateSetHeader ( this . strm , opt . header ) ;
}
if ( opt . dictionary ) {
var dict ;
// Convert data if needed
if ( typeof opt . dictionary === 'string' ) {
// If we need to compress text, change encoding to utf8.
dict = strings . string2buf ( opt . dictionary ) ;
} else if ( toString . call ( opt . dictionary ) === '[object ArrayBuffer]' ) {
dict = new Uint8Array ( opt . dictionary ) ;
} else {
dict = opt . dictionary ;
}
status = zlib _deflate . deflateSetDictionary ( this . strm , dict ) ;
if ( status !== Z _OK ) {
throw new Error ( msg [ status ] ) ;
}
this . _dict _set = true ;
}
}
/ * *
* Deflate # push ( data [ , mode ] ) - > Boolean
* - data ( Uint8Array | Array | ArrayBuffer | String ) : input data . Strings will be
* converted to utf8 byte sequence .
* - mode ( Number | Boolean ) : 0. . 6 for corresponding Z _NO _FLUSH . . Z _TREE modes .
* See constants . Skipped or ` false ` means Z _NO _FLUSH , ` true ` meansh Z _FINISH .
*
* Sends input data to deflate pipe , generating [ [ Deflate # onData ] ] calls with
* new compressed chunks . Returns ` true ` on success . The last data block must have
* mode Z _FINISH ( or ` true ` ) . That will flush internal pending buffers and call
* [ [ Deflate # onEnd ] ] . For interim explicit flushes ( without ending the stream ) you
* can use mode Z _SYNC _FLUSH , keeping the compression context .
*
* On fail call [ [ Deflate # onEnd ] ] with error code and return false .
*
* We strongly recommend to use ` Uint8Array ` on input for best speed ( output
* array format is detected automatically ) . Also , don ' t skip last param and always
* use the same type in your code ( boolean or number ) . That will improve JS speed .
*
* For regular ` Array ` - s make sure all elements are [ 0. . 255 ] .
*
* # # # # # Example
*
* ` ` ` javascript
* push ( chunk , false ) ; // push one of data chunks
* ...
* push ( chunk , true ) ; // push last chunk
* ` ` `
* * /
Deflate . prototype . push = function ( data , mode ) {
var strm = this . strm ;
var chunkSize = this . options . chunkSize ;
var status , _mode ;
if ( this . ended ) { return false ; }
_mode = ( mode === ~ ~ mode ) ? mode : ( ( mode === true ) ? Z _FINISH : Z _NO _FLUSH ) ;
// Convert data if needed
if ( typeof data === 'string' ) {
// If we need to compress text, change encoding to utf8.
strm . input = strings . string2buf ( data ) ;
} else if ( toString . call ( data ) === '[object ArrayBuffer]' ) {
strm . input = new Uint8Array ( data ) ;
} else {
strm . input = data ;
}
strm . next _in = 0 ;
strm . avail _in = strm . input . length ;
do {
if ( strm . avail _out === 0 ) {
strm . output = new utils . Buf8 ( chunkSize ) ;
strm . next _out = 0 ;
strm . avail _out = chunkSize ;
}
status = zlib _deflate . deflate ( strm , _mode ) ; /* no bad return value */
if ( status !== Z _STREAM _END && status !== Z _OK ) {
this . onEnd ( status ) ;
this . ended = true ;
return false ;
}
if ( strm . avail _out === 0 || ( strm . avail _in === 0 && ( _mode === Z _FINISH || _mode === Z _SYNC _FLUSH ) ) ) {
if ( this . options . to === 'string' ) {
this . onData ( strings . buf2binstring ( utils . shrinkBuf ( strm . output , strm . next _out ) ) ) ;
} else {
this . onData ( utils . shrinkBuf ( strm . output , strm . next _out ) ) ;
}
}
} while ( ( strm . avail _in > 0 || strm . avail _out === 0 ) && status !== Z _STREAM _END ) ;
// Finalize on the last chunk.
if ( _mode === Z _FINISH ) {
status = zlib _deflate . deflateEnd ( this . strm ) ;
this . onEnd ( status ) ;
this . ended = true ;
return status === Z _OK ;
}
// callback interim results if Z_SYNC_FLUSH.
if ( _mode === Z _SYNC _FLUSH ) {
this . onEnd ( Z _OK ) ;
strm . avail _out = 0 ;
return true ;
}
return true ;
} ;
/ * *
* Deflate # onData ( chunk ) - > Void
* - chunk ( Uint8Array | Array | String ) : ouput data . Type of array depends
* on js engine support . When string output requested , each chunk
* will be string .
*
* By default , stores data blocks in ` chunks[] ` property and glue
* those in ` onEnd ` . Override this handler , if you need another behaviour .
* * /
Deflate . prototype . onData = function ( chunk ) {
this . chunks . push ( chunk ) ;
} ;
/ * *
* Deflate # onEnd ( status ) - > Void
* - status ( Number ) : deflate status . 0 ( Z _OK ) on success ,
* other if not .
*
* Called once after you tell deflate that the input stream is
* complete ( Z _FINISH ) or should be flushed ( Z _SYNC _FLUSH )
* or if an error happened . By default - join collected chunks ,
* free memory and fill ` results ` / ` err ` properties .
* * /
Deflate . prototype . onEnd = function ( status ) {
// On success - join
if ( status === Z _OK ) {
if ( this . options . to === 'string' ) {
this . result = this . chunks . join ( '' ) ;
} else {
this . result = utils . flattenChunks ( this . chunks ) ;
}
}
this . chunks = [ ] ;
this . err = status ;
this . msg = this . strm . msg ;
} ;
/ * *
* deflate ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to compress .
* - options ( Object ) : zlib deflate options .
*
* Compress ` data ` with deflate algorithm and ` options ` .
*
* Supported options are :
*
* - level
* - windowBits
* - memLevel
* - strategy
* - dictionary
*
* [ http : //zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these .
*
* Sugar ( options ) :
*
* - ` raw ` ( Boolean ) - say that we work with raw stream , if you don ' t wish to specify
* negative windowBits implicitly .
* - ` to ` ( String ) - if equal to 'string' , then result will be "binary string"
* ( each char code [ 0. . 255 ] )
*
* # # # # # Example :
*
* ` ` ` javascript
* var pako = require ( 'pako' )
* , data = Uint8Array ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] ) ;
*
* console . log ( pako . deflate ( data ) ) ;
* ` ` `
* * /
function deflate ( input , options ) {
var deflator = new Deflate ( options ) ;
deflator . push ( input , true ) ;
// That will never happens, if you don't cheat with options :)
if ( deflator . err ) { throw deflator . msg ; }
return deflator . result ;
}
/ * *
* deflateRaw ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to compress .
* - options ( Object ) : zlib deflate options .
*
* The same as [ [ deflate ] ] , but creates raw data , without wrapper
* ( header and adler32 crc ) .
* * /
function deflateRaw ( input , options ) {
options = options || { } ;
options . raw = true ;
return deflate ( input , options ) ;
}
/ * *
* gzip ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to compress .
* - options ( Object ) : zlib deflate options .
*
* The same as [ [ deflate ] ] , but create gzip wrapper instead of
* deflate one .
* * /
function gzip ( input , options ) {
options = options || { } ;
options . gzip = true ;
return deflate ( input , options ) ;
}
exports . Deflate = Deflate ;
exports . deflate = deflate ;
exports . deflateRaw = deflateRaw ;
exports . gzip = gzip ;
} , { "./utils/common" : 24 , "./utils/strings" : 25 , "./zlib/deflate" : 29 , "./zlib/messages" : 34 , "./zlib/zstream" : 36 } ] , 23 : [ function ( require , module , exports ) {
'use strict' ;
var zlib _inflate = require ( './zlib/inflate' ) ;
var utils = require ( './utils/common' ) ;
var strings = require ( './utils/strings' ) ;
var c = require ( './zlib/constants' ) ;
var msg = require ( './zlib/messages' ) ;
var ZStream = require ( './zlib/zstream' ) ;
var GZheader = require ( './zlib/gzheader' ) ;
var toString = Object . prototype . toString ;
/ * *
* class Inflate
*
* Generic JS - style wrapper for zlib calls . If you don ' t need
* streaming behaviour - use more simple functions : [ [ inflate ] ]
* and [ [ inflateRaw ] ] .
* * /
/ * i n t e r n a l
* inflate . chunks - > Array
*
* Chunks of output data , if [ [ Inflate # onData ] ] not overriden .
* * /
/ * *
* Inflate . result - > Uint8Array | Array | String
*
* Uncompressed result , generated by default [ [ Inflate # onData ] ]
* and [ [ Inflate # onEnd ] ] handlers . Filled after you push last chunk
* ( call [ [ Inflate # push ] ] with ` Z_FINISH ` / ` true ` param ) or if you
* push a chunk with explicit flush ( call [ [ Inflate # push ] ] with
* ` Z_SYNC_FLUSH ` param ) .
* * /
/ * *
* Inflate . err - > Number
*
* Error code after inflate finished . 0 ( Z _OK ) on success .
* Should be checked if broken data possible .
* * /
/ * *
* Inflate . msg - > String
*
* Error message , if [ [ Inflate . err ] ] != 0
* * /
/ * *
* new Inflate ( options )
* - options ( Object ) : zlib inflate options .
*
* Creates new inflator instance with specified params . Throws exception
* on bad params . Supported options :
*
* - ` windowBits `
* - ` dictionary `
*
* [ http : //zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these .
*
* Additional options , for internal needs :
*
* - ` chunkSize ` - size of generated data chunks ( 16 K by default )
* - ` raw ` ( Boolean ) - do raw inflate
* - ` to ` ( String ) - if equal to 'string' , then result will be converted
* from utf8 to utf16 ( javascript ) string . When string output requested ,
* chunk length can differ from ` chunkSize ` , depending on content .
*
* By default , when no options set , autodetect deflate / gzip data format via
* wrapper header .
*
* # # # # # Example :
*
* ` ` ` javascript
* var pako = require ( 'pako' )
* , chunk1 = Uint8Array ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] )
* , chunk2 = Uint8Array ( [ 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 ] ) ;
*
* var inflate = new pako . Inflate ( { level : 3 } ) ;
*
* inflate . push ( chunk1 , false ) ;
* inflate . push ( chunk2 , true ) ; // true -> last chunk
*
* if ( inflate . err ) { throw new Error ( inflate . err ) ; }
*
* console . log ( inflate . result ) ;
* ` ` `
* * /
function Inflate ( options ) {
if ( ! ( this instanceof Inflate ) ) return new Inflate ( options ) ;
this . options = utils . assign ( {
chunkSize : 16384 ,
windowBits : 0 ,
to : ''
} , options || { } ) ;
var opt = this . options ;
// Force window size for `raw` data, if not set directly,
// because we have no header for autodetect.
if ( opt . raw && ( opt . windowBits >= 0 ) && ( opt . windowBits < 16 ) ) {
opt . windowBits = - opt . windowBits ;
if ( opt . windowBits === 0 ) { opt . windowBits = - 15 ; }
}
// If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate
if ( ( opt . windowBits >= 0 ) && ( opt . windowBits < 16 ) &&
! ( options && options . windowBits ) ) {
opt . windowBits += 32 ;
}
// Gzip header has no info about windows size, we can do autodetect only
// for deflate. So, if window size not set, force it to max when gzip possible
if ( ( opt . windowBits > 15 ) && ( opt . windowBits < 48 ) ) {
// bit 3 (16) -> gzipped data
// bit 4 (32) -> autodetect gzip/deflate
if ( ( opt . windowBits & 15 ) === 0 ) {
opt . windowBits |= 15 ;
}
}
this . err = 0 ; // error code, if happens (0 = Z_OK)
this . msg = '' ; // error message
this . ended = false ; // used to avoid multiple onEnd() calls
this . chunks = [ ] ; // chunks of compressed data
this . strm = new ZStream ( ) ;
this . strm . avail _out = 0 ;
var status = zlib _inflate . inflateInit2 (
this . strm ,
opt . windowBits
) ;
if ( status !== c . Z _OK ) {
throw new Error ( msg [ status ] ) ;
}
this . header = new GZheader ( ) ;
zlib _inflate . inflateGetHeader ( this . strm , this . header ) ;
}
/ * *
* Inflate # push ( data [ , mode ] ) - > Boolean
* - data ( Uint8Array | Array | ArrayBuffer | String ) : input data
* - mode ( Number | Boolean ) : 0. . 6 for corresponding Z _NO _FLUSH . . Z _TREE modes .
* See constants . Skipped or ` false ` means Z _NO _FLUSH , ` true ` meansh Z _FINISH .
*
* Sends input data to inflate pipe , generating [ [ Inflate # onData ] ] calls with
* new output chunks . Returns ` true ` on success . The last data block must have
* mode Z _FINISH ( or ` true ` ) . That will flush internal pending buffers and call
* [ [ Inflate # onEnd ] ] . For interim explicit flushes ( without ending the stream ) you
* can use mode Z _SYNC _FLUSH , keeping the decompression context .
*
* On fail call [ [ Inflate # onEnd ] ] with error code and return false .
*
* We strongly recommend to use ` Uint8Array ` on input for best speed ( output
* format is detected automatically ) . Also , don ' t skip last param and always
* use the same type in your code ( boolean or number ) . That will improve JS speed .
*
* For regular ` Array ` - s make sure all elements are [ 0. . 255 ] .
*
* # # # # # Example
*
* ` ` ` javascript
* push ( chunk , false ) ; // push one of data chunks
* ...
* push ( chunk , true ) ; // push last chunk
* ` ` `
* * /
Inflate . prototype . push = function ( data , mode ) {
var strm = this . strm ;
var chunkSize = this . options . chunkSize ;
var dictionary = this . options . dictionary ;
var status , _mode ;
var next _out _utf8 , tail , utf8str ;
var dict ;
// Flag to properly process Z_BUF_ERROR on testing inflate call
// when we check that all output data was flushed.
var allowBufError = false ;
if ( this . ended ) { return false ; }
_mode = ( mode === ~ ~ mode ) ? mode : ( ( mode === true ) ? c . Z _FINISH : c . Z _NO _FLUSH ) ;
// Convert data if needed
if ( typeof data === 'string' ) {
// Only binary strings can be decompressed on practice
strm . input = strings . binstring2buf ( data ) ;
} else if ( toString . call ( data ) === '[object ArrayBuffer]' ) {
strm . input = new Uint8Array ( data ) ;
} else {
strm . input = data ;
}
strm . next _in = 0 ;
strm . avail _in = strm . input . length ;
do {
if ( strm . avail _out === 0 ) {
strm . output = new utils . Buf8 ( chunkSize ) ;
strm . next _out = 0 ;
strm . avail _out = chunkSize ;
}
status = zlib _inflate . inflate ( strm , c . Z _NO _FLUSH ) ; /* no bad return value */
if ( status === c . Z _NEED _DICT && dictionary ) {
// Convert data if needed
if ( typeof dictionary === 'string' ) {
dict = strings . string2buf ( dictionary ) ;
} else if ( toString . call ( dictionary ) === '[object ArrayBuffer]' ) {
dict = new Uint8Array ( dictionary ) ;
} else {
dict = dictionary ;
}
status = zlib _inflate . inflateSetDictionary ( this . strm , dict ) ;
}
if ( status === c . Z _BUF _ERROR && allowBufError === true ) {
status = c . Z _OK ;
allowBufError = false ;
}
if ( status !== c . Z _STREAM _END && status !== c . Z _OK ) {
this . onEnd ( status ) ;
this . ended = true ;
return false ;
}
if ( strm . next _out ) {
if ( strm . avail _out === 0 || status === c . Z _STREAM _END || ( strm . avail _in === 0 && ( _mode === c . Z _FINISH || _mode === c . Z _SYNC _FLUSH ) ) ) {
if ( this . options . to === 'string' ) {
next _out _utf8 = strings . utf8border ( strm . output , strm . next _out ) ;
tail = strm . next _out - next _out _utf8 ;
utf8str = strings . buf2string ( strm . output , next _out _utf8 ) ;
// move tail
strm . next _out = tail ;
strm . avail _out = chunkSize - tail ;
if ( tail ) { utils . arraySet ( strm . output , strm . output , next _out _utf8 , tail , 0 ) ; }
this . onData ( utf8str ) ;
} else {
this . onData ( utils . shrinkBuf ( strm . output , strm . next _out ) ) ;
}
}
}
// When no more input data, we should check that internal inflate buffers
// are flushed. The only way to do it when avail_out = 0 - run one more
// inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.
// Here we set flag to process this error properly.
//
// NOTE. Deflate does not return error in this case and does not needs such
// logic.
if ( strm . avail _in === 0 && strm . avail _out === 0 ) {
allowBufError = true ;
}
} while ( ( strm . avail _in > 0 || strm . avail _out === 0 ) && status !== c . Z _STREAM _END ) ;
if ( status === c . Z _STREAM _END ) {
_mode = c . Z _FINISH ;
}
// Finalize on the last chunk.
if ( _mode === c . Z _FINISH ) {
status = zlib _inflate . inflateEnd ( this . strm ) ;
this . onEnd ( status ) ;
this . ended = true ;
return status === c . Z _OK ;
}
// callback interim results if Z_SYNC_FLUSH.
if ( _mode === c . Z _SYNC _FLUSH ) {
this . onEnd ( c . Z _OK ) ;
strm . avail _out = 0 ;
return true ;
}
return true ;
} ;
/ * *
* Inflate # onData ( chunk ) - > Void
* - chunk ( Uint8Array | Array | String ) : ouput data . Type of array depends
* on js engine support . When string output requested , each chunk
* will be string .
*
* By default , stores data blocks in ` chunks[] ` property and glue
* those in ` onEnd ` . Override this handler , if you need another behaviour .
* * /
Inflate . prototype . onData = function ( chunk ) {
this . chunks . push ( chunk ) ;
} ;
/ * *
* Inflate # onEnd ( status ) - > Void
* - status ( Number ) : inflate status . 0 ( Z _OK ) on success ,
* other if not .
*
* Called either after you tell inflate that the input stream is
* complete ( Z _FINISH ) or should be flushed ( Z _SYNC _FLUSH )
* or if an error happened . By default - join collected chunks ,
* free memory and fill ` results ` / ` err ` properties .
* * /
Inflate . prototype . onEnd = function ( status ) {
// On success - join
if ( status === c . Z _OK ) {
if ( this . options . to === 'string' ) {
// Glue & convert here, until we teach pako to send
// utf8 alligned strings to onData
this . result = this . chunks . join ( '' ) ;
} else {
this . result = utils . flattenChunks ( this . chunks ) ;
}
}
this . chunks = [ ] ;
this . err = status ;
this . msg = this . strm . msg ;
} ;
/ * *
* inflate ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to decompress .
* - options ( Object ) : zlib inflate options .
*
* Decompress ` data ` with inflate / ungzip and ` options ` . Autodetect
* format via wrapper header by default . That 's why we don' t provide
* separate ` ungzip ` method .
*
* Supported options are :
*
* - windowBits
*
* [ http : //zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information .
*
* Sugar ( options ) :
*
* - ` raw ` ( Boolean ) - say that we work with raw stream , if you don ' t wish to specify
* negative windowBits implicitly .
* - ` to ` ( String ) - if equal to 'string' , then result will be converted
* from utf8 to utf16 ( javascript ) string . When string output requested ,
* chunk length can differ from ` chunkSize ` , depending on content .
*
*
* # # # # # Example :
*
* ` ` ` javascript
* var pako = require ( 'pako' )
* , input = pako . deflate ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] )
* , output ;
*
* try {
* output = pako . inflate ( input ) ;
* } catch ( err )
* console . log ( err ) ;
* }
* ` ` `
* * /
function inflate ( input , options ) {
var inflator = new Inflate ( options ) ;
inflator . push ( input , true ) ;
// That will never happens, if you don't cheat with options :)
if ( inflator . err ) { throw inflator . msg ; }
return inflator . result ;
}
/ * *
* inflateRaw ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to decompress .
* - options ( Object ) : zlib inflate options .
*
* The same as [ [ inflate ] ] , but creates raw data , without wrapper
* ( header and adler32 crc ) .
* * /
function inflateRaw ( input , options ) {
options = options || { } ;
options . raw = true ;
return inflate ( input , options ) ;
}
/ * *
* ungzip ( data [ , options ] ) - > Uint8Array | Array | String
* - data ( Uint8Array | Array | String ) : input data to decompress .
* - options ( Object ) : zlib inflate options .
*
* Just shortcut to [ [ inflate ] ] , because it autodetects format
* by header . content . Done for convenience .
* * /
exports . Inflate = Inflate ;
exports . inflate = inflate ;
exports . inflateRaw = inflateRaw ;
exports . ungzip = inflate ;
} , { "./utils/common" : 24 , "./utils/strings" : 25 , "./zlib/constants" : 27 , "./zlib/gzheader" : 30 , "./zlib/inflate" : 32 , "./zlib/messages" : 34 , "./zlib/zstream" : 36 } ] , 24 : [ function ( require , module , exports ) {
'use strict' ;
var TYPED _OK = ( typeof Uint8Array !== 'undefined' ) &&
( typeof Uint16Array !== 'undefined' ) &&
( typeof Int32Array !== 'undefined' ) ;
exports . assign = function ( obj /*from1, from2, from3, ...*/ ) {
var sources = Array . prototype . slice . call ( arguments , 1 ) ;
while ( sources . length ) {
var source = sources . shift ( ) ;
if ( ! source ) { continue ; }
if ( typeof source !== 'object' ) {
throw new TypeError ( source + 'must be non-object' ) ;
}
for ( var p in source ) {
if ( source . hasOwnProperty ( p ) ) {
obj [ p ] = source [ p ] ;
}
}
}
return obj ;
} ;
// reduce buffer size, avoiding mem copy
exports . shrinkBuf = function ( buf , size ) {
if ( buf . length === size ) { return buf ; }
if ( buf . subarray ) { return buf . subarray ( 0 , size ) ; }
buf . length = size ;
return buf ;
} ;
var fnTyped = {
arraySet : function ( dest , src , src _offs , len , dest _offs ) {
if ( src . subarray && dest . subarray ) {
dest . set ( src . subarray ( src _offs , src _offs + len ) , dest _offs ) ;
return ;
}
// Fallback to ordinary array
for ( var i = 0 ; i < len ; i ++ ) {
dest [ dest _offs + i ] = src [ src _offs + i ] ;
}
} ,
// Join array of chunks to single array.
flattenChunks : function ( chunks ) {
var i , l , len , pos , chunk , result ;
// calculate data length
len = 0 ;
for ( i = 0 , l = chunks . length ; i < l ; i ++ ) {
len += chunks [ i ] . length ;
}
// join chunks
result = new Uint8Array ( len ) ;
pos = 0 ;
for ( i = 0 , l = chunks . length ; i < l ; i ++ ) {
chunk = chunks [ i ] ;
result . set ( chunk , pos ) ;
pos += chunk . length ;
}
return result ;
}
} ;
var fnUntyped = {
arraySet : function ( dest , src , src _offs , len , dest _offs ) {
for ( var i = 0 ; i < len ; i ++ ) {
dest [ dest _offs + i ] = src [ src _offs + i ] ;
}
} ,
// Join array of chunks to single array.
flattenChunks : function ( chunks ) {
return [ ] . concat . apply ( [ ] , chunks ) ;
}
} ;
// Enable/Disable typed arrays use, for testing
//
exports . setTyped = function ( on ) {
if ( on ) {
exports . Buf8 = Uint8Array ;
exports . Buf16 = Uint16Array ;
exports . Buf32 = Int32Array ;
exports . assign ( exports , fnTyped ) ;
} else {
exports . Buf8 = Array ;
exports . Buf16 = Array ;
exports . Buf32 = Array ;
exports . assign ( exports , fnUntyped ) ;
}
} ;
exports . setTyped ( TYPED _OK ) ;
} , { } ] , 25 : [ function ( require , module , exports ) {
// String encode/decode helpers
'use strict' ;
var utils = require ( './common' ) ;
// Quick check if we can use fast array to bin string conversion
//
// - apply(Array) can fail on Android 2.2
// - apply(Uint8Array) can fail on iOS 5.1 Safary
//
var STR _APPLY _OK = true ;
var STR _APPLY _UIA _OK = true ;
try { String . fromCharCode . apply ( null , [ 0 ] ) ; } catch ( _ _ ) { STR _APPLY _OK = false ; }
try { String . fromCharCode . apply ( null , new Uint8Array ( 1 ) ) ; } catch ( _ _ ) { STR _APPLY _UIA _OK = false ; }
// Table with utf8 lengths (calculated by first byte of sequence)
// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,
// because max possible codepoint is 0x10ffff
var _utf8len = new utils . Buf8 ( 256 ) ;
for ( var q = 0 ; q < 256 ; q ++ ) {
_utf8len [ q ] = ( q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1 ) ;
}
_utf8len [ 254 ] = _utf8len [ 254 ] = 1 ; // Invalid sequence start
// convert string to array (typed, when possible)
exports . string2buf = function ( str ) {
var buf , c , c2 , m _pos , i , str _len = str . length , buf _len = 0 ;
// count binary size
for ( m _pos = 0 ; m _pos < str _len ; m _pos ++ ) {
c = str . charCodeAt ( m _pos ) ;
if ( ( c & 0xfc00 ) === 0xd800 && ( m _pos + 1 < str _len ) ) {
c2 = str . charCodeAt ( m _pos + 1 ) ;
if ( ( c2 & 0xfc00 ) === 0xdc00 ) {
c = 0x10000 + ( ( c - 0xd800 ) << 10 ) + ( c2 - 0xdc00 ) ;
m _pos ++ ;
}
}
buf _len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4 ;
}
// allocate buffer
buf = new utils . Buf8 ( buf _len ) ;
// convert
for ( i = 0 , m _pos = 0 ; i < buf _len ; m _pos ++ ) {
c = str . charCodeAt ( m _pos ) ;
if ( ( c & 0xfc00 ) === 0xd800 && ( m _pos + 1 < str _len ) ) {
c2 = str . charCodeAt ( m _pos + 1 ) ;
if ( ( c2 & 0xfc00 ) === 0xdc00 ) {
c = 0x10000 + ( ( c - 0xd800 ) << 10 ) + ( c2 - 0xdc00 ) ;
m _pos ++ ;
}
}
if ( c < 0x80 ) {
/* one byte */
buf [ i ++ ] = c ;
} else if ( c < 0x800 ) {
/* two bytes */
buf [ i ++ ] = 0xC0 | ( c >>> 6 ) ;
buf [ i ++ ] = 0x80 | ( c & 0x3f ) ;
} else if ( c < 0x10000 ) {
/* three bytes */
buf [ i ++ ] = 0xE0 | ( c >>> 12 ) ;
buf [ i ++ ] = 0x80 | ( c >>> 6 & 0x3f ) ;
buf [ i ++ ] = 0x80 | ( c & 0x3f ) ;
} else {
/* four bytes */
buf [ i ++ ] = 0xf0 | ( c >>> 18 ) ;
buf [ i ++ ] = 0x80 | ( c >>> 12 & 0x3f ) ;
buf [ i ++ ] = 0x80 | ( c >>> 6 & 0x3f ) ;
buf [ i ++ ] = 0x80 | ( c & 0x3f ) ;
}
}
return buf ;
} ;
// Helper (used in 2 places)
function buf2binstring ( buf , len ) {
// use fallback for big arrays to avoid stack overflow
if ( len < 65537 ) {
if ( ( buf . subarray && STR _APPLY _UIA _OK ) || ( ! buf . subarray && STR _APPLY _OK ) ) {
return String . fromCharCode . apply ( null , utils . shrinkBuf ( buf , len ) ) ;
}
}
var result = '' ;
for ( var i = 0 ; i < len ; i ++ ) {
result += String . fromCharCode ( buf [ i ] ) ;
}
return result ;
}
// Convert byte array to binary string
exports . buf2binstring = function ( buf ) {
return buf2binstring ( buf , buf . length ) ;
} ;
// Convert binary string (typed, when possible)
exports . binstring2buf = function ( str ) {
var buf = new utils . Buf8 ( str . length ) ;
for ( var i = 0 , len = buf . length ; i < len ; i ++ ) {
buf [ i ] = str . charCodeAt ( i ) ;
}
return buf ;
} ;
// convert array to string
exports . buf2string = function ( buf , max ) {
var i , out , c , c _len ;
var len = max || buf . length ;
// Reserve max possible length (2 words per char)
// NB: by unknown reasons, Array is significantly faster for
// String.fromCharCode.apply than Uint16Array.
var utf16buf = new Array ( len * 2 ) ;
for ( out = 0 , i = 0 ; i < len ; ) {
c = buf [ i ++ ] ;
// quick process ascii
if ( c < 0x80 ) { utf16buf [ out ++ ] = c ; continue ; }
c _len = _utf8len [ c ] ;
// skip 5 & 6 byte codes
if ( c _len > 4 ) { utf16buf [ out ++ ] = 0xfffd ; i += c _len - 1 ; continue ; }
// apply mask on first byte
c &= c _len === 2 ? 0x1f : c _len === 3 ? 0x0f : 0x07 ;
// join the rest
while ( c _len > 1 && i < len ) {
c = ( c << 6 ) | ( buf [ i ++ ] & 0x3f ) ;
c _len -- ;
}
// terminated by end of string?
if ( c _len > 1 ) { utf16buf [ out ++ ] = 0xfffd ; continue ; }
if ( c < 0x10000 ) {
utf16buf [ out ++ ] = c ;
} else {
c -= 0x10000 ;
utf16buf [ out ++ ] = 0xd800 | ( ( c >> 10 ) & 0x3ff ) ;
utf16buf [ out ++ ] = 0xdc00 | ( c & 0x3ff ) ;
}
}
return buf2binstring ( utf16buf , out ) ;
} ;
// Calculate max possible position in utf8 buffer,
// that will not break sequence. If that's not possible
// - (very small limits) return max size as is.
//
// buf[] - utf8 bytes array
// max - length limit (mandatory);
exports . utf8border = function ( buf , max ) {
var pos ;
max = max || buf . length ;
if ( max > buf . length ) { max = buf . length ; }
// go back from last position, until start of sequence found
pos = max - 1 ;
while ( pos >= 0 && ( buf [ pos ] & 0xC0 ) === 0x80 ) { pos -- ; }
// Fuckup - very small and broken sequence,
// return max, because we should return something anyway.
if ( pos < 0 ) { return max ; }
// If we came to start of buffer - that means vuffer is too small,
// return max too.
if ( pos === 0 ) { return max ; }
return ( pos + _utf8len [ buf [ pos ] ] > max ) ? pos : max ;
} ;
} , { "./common" : 24 } ] , 26 : [ function ( require , module , exports ) {
'use strict' ;
// Note: adler32 takes 12% for level 0 and 2% for level 6.
// It doesn't worth to make additional optimizationa as in original.
// Small size is preferable.
function adler32 ( adler , buf , len , pos ) {
var s1 = ( adler & 0xffff ) | 0 ,
s2 = ( ( adler >>> 16 ) & 0xffff ) | 0 ,
n = 0 ;
while ( len !== 0 ) {
// Set limit ~ twice less than 5552, to keep
// s2 in 31-bits, because we force signed ints.
// in other case %= will fail.
n = len > 2000 ? 2000 : len ;
len -= n ;
do {
s1 = ( s1 + buf [ pos ++ ] ) | 0 ;
s2 = ( s2 + s1 ) | 0 ;
} while ( -- n ) ;
s1 %= 65521 ;
s2 %= 65521 ;
}
return ( s1 | ( s2 << 16 ) ) | 0 ;
}
module . exports = adler32 ;
} , { } ] , 27 : [ function ( require , module , exports ) {
'use strict' ;
module . exports = {
/* Allowed flush values; see deflate() and inflate() below for details */
Z _NO _FLUSH : 0 ,
Z _PARTIAL _FLUSH : 1 ,
Z _SYNC _FLUSH : 2 ,
Z _FULL _FLUSH : 3 ,
Z _FINISH : 4 ,
Z _BLOCK : 5 ,
Z _TREES : 6 ,
/ * R e t u r n c o d e s f o r t h e c o m p r e s s i o n / d e c o m p r e s s i o n f u n c t i o n s . N e g a t i v e v a l u e s
* are errors , positive values are used for special but normal events .
* /
Z _OK : 0 ,
Z _STREAM _END : 1 ,
Z _NEED _DICT : 2 ,
Z _ERRNO : - 1 ,
Z _STREAM _ERROR : - 2 ,
Z _DATA _ERROR : - 3 ,
//Z_MEM_ERROR: -4,
Z _BUF _ERROR : - 5 ,
//Z_VERSION_ERROR: -6,
/* compression levels */
Z _NO _COMPRESSION : 0 ,
Z _BEST _SPEED : 1 ,
Z _BEST _COMPRESSION : 9 ,
Z _DEFAULT _COMPRESSION : - 1 ,
Z _FILTERED : 1 ,
Z _HUFFMAN _ONLY : 2 ,
Z _RLE : 3 ,
Z _FIXED : 4 ,
Z _DEFAULT _STRATEGY : 0 ,
/* Possible values of the data_type field (though see inflate()) */
Z _BINARY : 0 ,
Z _TEXT : 1 ,
//Z_ASCII: 1, // = Z_TEXT (deprecated)
Z _UNKNOWN : 2 ,
/* The deflate compression method */
Z _DEFLATED : 8
//Z_NULL: null // Use -1 or null inline, depending on var type
} ;
} , { } ] , 28 : [ function ( require , module , exports ) {
'use strict' ;
// Note: we can't get significant speed boost here.
// So write code to minimize size - no pregenerated tables
// and array tools dependencies.
// Use ordinary array, since untyped makes no boost here
function makeTable ( ) {
var c , table = [ ] ;
for ( var n = 0 ; n < 256 ; n ++ ) {
c = n ;
for ( var k = 0 ; k < 8 ; k ++ ) {
c = ( ( c & 1 ) ? ( 0xEDB88320 ^ ( c >>> 1 ) ) : ( c >>> 1 ) ) ;
}
table [ n ] = c ;
}
return table ;
}
// Create table on load. Just 255 signed longs. Not a problem.
var crcTable = makeTable ( ) ;
function crc32 ( crc , buf , len , pos ) {
var t = crcTable ,
end = pos + len ;
crc ^= - 1 ;
for ( var i = pos ; i < end ; i ++ ) {
crc = ( crc >>> 8 ) ^ t [ ( crc ^ buf [ i ] ) & 0xFF ] ;
}
return ( crc ^ ( - 1 ) ) ; // >>> 0;
}
module . exports = crc32 ;
} , { } ] , 29 : [ function ( require , module , exports ) {
'use strict' ;
var utils = require ( '../utils/common' ) ;
var trees = require ( './trees' ) ;
var adler32 = require ( './adler32' ) ;
var crc32 = require ( './crc32' ) ;
var msg = require ( './messages' ) ;
/* Public constants ==========================================================*/
/* ===========================================================================*/
/* Allowed flush values; see deflate() and inflate() below for details */
var Z _NO _FLUSH = 0 ;
var Z _PARTIAL _FLUSH = 1 ;
//var Z_SYNC_FLUSH = 2;
var Z _FULL _FLUSH = 3 ;
var Z _FINISH = 4 ;
var Z _BLOCK = 5 ;
//var Z_TREES = 6;
/ * R e t u r n c o d e s f o r t h e c o m p r e s s i o n / d e c o m p r e s s i o n f u n c t i o n s . N e g a t i v e v a l u e s
* are errors , positive values are used for special but normal events .
* /
var Z _OK = 0 ;
var Z _STREAM _END = 1 ;
//var Z_NEED_DICT = 2;
//var Z_ERRNO = -1;
var Z _STREAM _ERROR = - 2 ;
var Z _DATA _ERROR = - 3 ;
//var Z_MEM_ERROR = -4;
var Z _BUF _ERROR = - 5 ;
//var Z_VERSION_ERROR = -6;
/* compression levels */
//var Z_NO_COMPRESSION = 0;
//var Z_BEST_SPEED = 1;
//var Z_BEST_COMPRESSION = 9;
var Z _DEFAULT _COMPRESSION = - 1 ;
var Z _FILTERED = 1 ;
var Z _HUFFMAN _ONLY = 2 ;
var Z _RLE = 3 ;
var Z _FIXED = 4 ;
var Z _DEFAULT _STRATEGY = 0 ;
/* Possible values of the data_type field (though see inflate()) */
//var Z_BINARY = 0;
//var Z_TEXT = 1;
//var Z_ASCII = 1; // = Z_TEXT
var Z _UNKNOWN = 2 ;
/* The deflate compression method */
var Z _DEFLATED = 8 ;
/*============================================================================*/
var MAX _MEM _LEVEL = 9 ;
/* Maximum value for memLevel in deflateInit2 */
var MAX _WBITS = 15 ;
/* 32K LZ77 window */
var DEF _MEM _LEVEL = 8 ;
var LENGTH _CODES = 29 ;
/* number of length codes, not counting the special END_BLOCK code */
var LITERALS = 256 ;
/* number of literal bytes 0..255 */
var L _CODES = LITERALS + 1 + LENGTH _CODES ;
/* number of Literal or Length codes, including the END_BLOCK code */
var D _CODES = 30 ;
/* number of distance codes */
var BL _CODES = 19 ;
/* number of codes used to transfer the bit lengths */
var HEAP _SIZE = 2 * L _CODES + 1 ;
/* maximum heap size */
var MAX _BITS = 15 ;
/* All codes must not exceed MAX_BITS bits */
var MIN _MATCH = 3 ;
var MAX _MATCH = 258 ;
var MIN _LOOKAHEAD = ( MAX _MATCH + MIN _MATCH + 1 ) ;
var PRESET _DICT = 0x20 ;
var INIT _STATE = 42 ;
var EXTRA _STATE = 69 ;
var NAME _STATE = 73 ;
var COMMENT _STATE = 91 ;
var HCRC _STATE = 103 ;
var BUSY _STATE = 113 ;
var FINISH _STATE = 666 ;
var BS _NEED _MORE = 1 ; /* block not completed, need more input or more output */
var BS _BLOCK _DONE = 2 ; /* block flush performed */
var BS _FINISH _STARTED = 3 ; /* finish started, need only more output at next deflate */
var BS _FINISH _DONE = 4 ; /* finish done, accept no more input or output */
var OS _CODE = 0x03 ; // Unix :) . Don't detect, use this default.
function err ( strm , errorCode ) {
strm . msg = msg [ errorCode ] ;
return errorCode ;
}
function rank ( f ) {
return ( ( f ) << 1 ) - ( ( f ) > 4 ? 9 : 0 ) ;
}
function zero ( buf ) { var len = buf . length ; while ( -- len >= 0 ) { buf [ len ] = 0 ; } }
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Flush as much pending output as possible . All deflate ( ) output goes
* through this function so some applications may wish to modify it
* to avoid allocating a large strm - > output buffer and copying into it .
* ( See also read _buf ( ) ) .
* /
function flush _pending ( strm ) {
var s = strm . state ;
//_tr_flush_bits(s);
var len = s . pending ;
if ( len > strm . avail _out ) {
len = strm . avail _out ;
}
if ( len === 0 ) { return ; }
utils . arraySet ( strm . output , s . pending _buf , s . pending _out , len , strm . next _out ) ;
strm . next _out += len ;
s . pending _out += len ;
strm . total _out += len ;
strm . avail _out -= len ;
s . pending -= len ;
if ( s . pending === 0 ) {
s . pending _out = 0 ;
}
}
function flush _block _only ( s , last ) {
trees . _tr _flush _block ( s , ( s . block _start >= 0 ? s . block _start : - 1 ) , s . strstart - s . block _start , last ) ;
s . block _start = s . strstart ;
flush _pending ( s . strm ) ;
}
function put _byte ( s , b ) {
s . pending _buf [ s . pending ++ ] = b ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Put a short in the pending buffer . The 16 - bit value is put in MSB order .
* IN assertion : the stream state is correct and there is enough room in
* pending _buf .
* /
function putShortMSB ( s , b ) {
// put_byte(s, (Byte)(b >> 8));
// put_byte(s, (Byte)(b & 0xff));
s . pending _buf [ s . pending ++ ] = ( b >>> 8 ) & 0xff ;
s . pending _buf [ s . pending ++ ] = b & 0xff ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Read a new buffer from the current input stream , update the adler32
* and total number of bytes read . All deflate ( ) input goes through
* this function so some applications may wish to modify it to avoid
* allocating a large strm - > input buffer and copying from it .
* ( See also flush _pending ( ) ) .
* /
function read _buf ( strm , buf , start , size ) {
var len = strm . avail _in ;
if ( len > size ) { len = size ; }
if ( len === 0 ) { return 0 ; }
strm . avail _in -= len ;
// zmemcpy(buf, strm->next_in, len);
utils . arraySet ( buf , strm . input , strm . next _in , len , start ) ;
if ( strm . state . wrap === 1 ) {
strm . adler = adler32 ( strm . adler , buf , len , start ) ;
}
else if ( strm . state . wrap === 2 ) {
strm . adler = crc32 ( strm . adler , buf , len , start ) ;
}
strm . next _in += len ;
strm . total _in += len ;
return len ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Set match _start to the longest match starting at the given string and
* return its length . Matches shorter or equal to prev _length are discarded ,
* in which case the result is equal to prev _length and match _start is
* garbage .
* IN assertions : cur _match is the head of the hash chain for the current
* string ( strstart ) and its distance is <= MAX _DIST , and prev _length >= 1
* OUT assertion : the match length is not greater than s - > lookahead .
* /
function longest _match ( s , cur _match ) {
var chain _length = s . max _chain _length ; /* max hash chain length */
var scan = s . strstart ; /* current string */
var match ; /* matched string */
var len ; /* length of current match */
var best _len = s . prev _length ; /* best match length so far */
var nice _match = s . nice _match ; /* stop if match long enough */
var limit = ( s . strstart > ( s . w _size - MIN _LOOKAHEAD ) ) ?
s . strstart - ( s . w _size - MIN _LOOKAHEAD ) : 0 /*NIL*/ ;
var _win = s . window ; // shortcut
var wmask = s . w _mask ;
var prev = s . prev ;
/ * S t o p w h e n c u r _ m a t c h b e c o m e s < = l i m i t . T o s i m p l i f y t h e c o d e ,
* we prevent matches with the string of window index 0.
* /
var strend = s . strstart + MAX _MATCH ;
var scan _end1 = _win [ scan + best _len - 1 ] ;
var scan _end = _win [ scan + best _len ] ;
/ * T h e c o d e i s o p t i m i z e d f o r H A S H _ B I T S > = 8 a n d M A X _ M A T C H - 2 m u l t i p l e o f 1 6 .
* It is easy to get rid of this optimization if necessary .
* /
// Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
/* Do not waste too much time if we already have a good match: */
if ( s . prev _length >= s . good _match ) {
chain _length >>= 2 ;
}
/ * D o n o t l o o k f o r m a t c h e s b e y o n d t h e e n d o f t h e i n p u t . T h i s i s n e c e s s a r y
* to make deflate deterministic .
* /
if ( nice _match > s . lookahead ) { nice _match = s . lookahead ; }
// Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
do {
// Assert(cur_match < s->strstart, "no future");
match = cur _match ;
/ * S k i p t o n e x t m a t c h i f t h e m a t c h l e n g t h c a n n o t i n c r e a s e
* or if the match length is less than 2. Note that the checks below
* for insufficient lookahead only occur occasionally for performance
* reasons . Therefore uninitialized memory will be accessed , and
* conditional jumps will be made that depend on those values .
* However the length of the match is limited to the lookahead , so
* the output of deflate is not affected by the uninitialized values .
* /
if ( _win [ match + best _len ] !== scan _end ||
_win [ match + best _len - 1 ] !== scan _end1 ||
_win [ match ] !== _win [ scan ] ||
_win [ ++ match ] !== _win [ scan + 1 ] ) {
continue ;
}
/ * T h e c h e c k a t b e s t _ l e n - 1 c a n b e r e m o v e d b e c a u s e i t w i l l b e m a d e
* again later . ( This heuristic is not always a win . )
* It is not necessary to compare scan [ 2 ] and match [ 2 ] since they
* are always equal when the other bytes match , given that
* the hash keys are equal and that HASH _BITS >= 8.
* /
scan += 2 ;
match ++ ;
// Assert(*scan == *match, "match[2]?");
/ * W e c h e c k f o r i n s u f f i c i e n t l o o k a h e a d o n l y e v e r y 8 t h c o m p a r i s o n ;
* the 256 th check will be made at strstart + 258.
* /
do {
/*jshint noempty:false*/
} while ( _win [ ++ scan ] === _win [ ++ match ] && _win [ ++ scan ] === _win [ ++ match ] &&
_win [ ++ scan ] === _win [ ++ match ] && _win [ ++ scan ] === _win [ ++ match ] &&
_win [ ++ scan ] === _win [ ++ match ] && _win [ ++ scan ] === _win [ ++ match ] &&
_win [ ++ scan ] === _win [ ++ match ] && _win [ ++ scan ] === _win [ ++ match ] &&
scan < strend ) ;
// Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
len = MAX _MATCH - ( strend - scan ) ;
scan = strend - MAX _MATCH ;
if ( len > best _len ) {
s . match _start = cur _match ;
best _len = len ;
if ( len >= nice _match ) {
break ;
}
scan _end1 = _win [ scan + best _len - 1 ] ;
scan _end = _win [ scan + best _len ] ;
}
} while ( ( cur _match = prev [ cur _match & wmask ] ) > limit && -- chain _length !== 0 ) ;
if ( best _len <= s . lookahead ) {
return best _len ;
}
return s . lookahead ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Fill the window when the lookahead becomes insufficient .
* Updates strstart and lookahead .
*
* IN assertion : lookahead < MIN _LOOKAHEAD
* OUT assertions : strstart <= window _size - MIN _LOOKAHEAD
* At least one byte has been read , or avail _in == 0 ; reads are
* performed for at least two bytes ( required for the zip translate _eol
* option -- not supported here ) .
* /
function fill _window ( s ) {
var _w _size = s . w _size ;
var p , n , m , more , str ;
//Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
do {
more = s . window _size - s . lookahead - s . strstart ;
// JS ints have 32 bit, block below not needed
/* Deal with !@#$% 64K limit: */
//if (sizeof(int) <= 2) {
// if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
// more = wsize;
//
// } else if (more == (unsigned)(-1)) {
// /* Very unlikely, but possible on 16 bit machine if
// * strstart == 0 && lookahead == 1 (input done a byte at time)
// */
// more--;
// }
//}
/ * I f t h e w i n d o w i s a l m o s t f u l l a n d t h e r e i s i n s u f f i c i e n t l o o k a h e a d ,
* move the upper half to the lower one to make room in the upper half .
* /
if ( s . strstart >= _w _size + ( _w _size - MIN _LOOKAHEAD ) ) {
utils . arraySet ( s . window , s . window , _w _size , _w _size , 0 ) ;
s . match _start -= _w _size ;
s . strstart -= _w _size ;
/* we now have strstart >= MAX_DIST */
s . block _start -= _w _size ;
/ * S l i d e t h e h a s h t a b l e ( c o u l d b e a v o i d e d w i t h 3 2 b i t v a l u e s
at the expense of memory usage ) . We slide even when level == 0
to keep the hash table consistent if we switch back to level > 0
later . ( Using level 0 permanently is not an optimal usage of
zlib , so we don ' t care about this pathological case . )
* /
n = s . hash _size ;
p = n ;
do {
m = s . head [ -- p ] ;
s . head [ p ] = ( m >= _w _size ? m - _w _size : 0 ) ;
} while ( -- n ) ;
n = _w _size ;
p = n ;
do {
m = s . prev [ -- p ] ;
s . prev [ p ] = ( m >= _w _size ? m - _w _size : 0 ) ;
/ * I f n i s n o t o n a n y h a s h c h a i n , p r e v [ n ] i s g a r b a g e b u t
* its value will never be used .
* /
} while ( -- n ) ;
more += _w _size ;
}
if ( s . strm . avail _in === 0 ) {
break ;
}
/ * I f t h e r e w a s n o s l i d i n g :
* strstart <= WSIZE + MAX _DIST - 1 && lookahead <= MIN _LOOKAHEAD - 1 &&
* more == window _size - lookahead - strstart
* => more >= window _size - ( MIN _LOOKAHEAD - 1 + WSIZE + MAX _DIST - 1 )
* => more >= window _size - 2 * WSIZE + 2
* In the BIG _MEM or MMAP case ( not yet supported ) ,
* window _size == input _size + MIN _LOOKAHEAD &&
* strstart + s - > lookahead <= input _size => more >= MIN _LOOKAHEAD .
* Otherwise , window _size == 2 * WSIZE so more >= 2.
* If there was sliding , more >= WSIZE . So in all cases , more >= 2.
* /
//Assert(more >= 2, "more < 2");
n = read _buf ( s . strm , s . window , s . strstart + s . lookahead , more ) ;
s . lookahead += n ;
/* Initialize the hash value now that we have some input: */
if ( s . lookahead + s . insert >= MIN _MATCH ) {
str = s . strstart - s . insert ;
s . ins _h = s . window [ str ] ;
/* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ str + 1 ] ) & s . hash _mask ;
//#if MIN_MATCH != 3
// Call update_hash() MIN_MATCH-3 more times
//#endif
while ( s . insert ) {
/* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ str + MIN _MATCH - 1 ] ) & s . hash _mask ;
s . prev [ str & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = str ;
str ++ ;
s . insert -- ;
if ( s . lookahead + s . insert < MIN _MATCH ) {
break ;
}
}
}
/ * I f t h e w h o l e i n p u t h a s l e s s t h a n M I N _ M A T C H b y t e s , i n s _ h i s g a r b a g e ,
* but this is not important since only literal bytes will be emitted .
* /
} while ( s . lookahead < MIN _LOOKAHEAD && s . strm . avail _in !== 0 ) ;
/ * I f t h e W I N _ I N I T b y t e s a f t e r t h e e n d o f t h e c u r r e n t d a t a h a v e n e v e r b e e n
* written , then zero those bytes in order to avoid memory check reports of
* the use of uninitialized ( or uninitialised as Julian writes ) bytes by
* the longest match routines . Update the high water mark for the next
* time through here . WIN _INIT is set to MAX _MATCH since the longest match
* routines allow scanning to strstart + MAX _MATCH , ignoring lookahead .
* /
// if (s.high_water < s.window_size) {
// var curr = s.strstart + s.lookahead;
// var init = 0;
//
// if (s.high_water < curr) {
// /* Previous high water mark below current data -- zero WIN_INIT
// * bytes or up to end of window, whichever is less.
// */
// init = s.window_size - curr;
// if (init > WIN_INIT)
// init = WIN_INIT;
// zmemzero(s->window + curr, (unsigned)init);
// s->high_water = curr + init;
// }
// else if (s->high_water < (ulg)curr + WIN_INIT) {
// /* High water mark at or above current data, but below current data
// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
// * to end of window, whichever is less.
// */
// init = (ulg)curr + WIN_INIT - s->high_water;
// if (init > s->window_size - s->high_water)
// init = s->window_size - s->high_water;
// zmemzero(s->window + s->high_water, (unsigned)init);
// s->high_water += init;
// }
// }
//
// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
// "not enough room for search");
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Copy without compression as much as possible from the input stream , return
* the current block state .
* This function does not insert new strings in the dictionary since
* uncompressible data is probably not useful . This function is used
* only for the level = 0 compression option .
* NOTE : this function should be optimized to avoid extra copying from
* window to pending _buf .
* /
function deflate _stored ( s , flush ) {
/ * S t o r e d b l o c k s a r e l i m i t e d t o 0 x f f f f b y t e s , p e n d i n g _ b u f i s l i m i t e d
* to pending _buf _size , and each stored block has a 5 byte header :
* /
var max _block _size = 0xffff ;
if ( max _block _size > s . pending _buf _size - 5 ) {
max _block _size = s . pending _buf _size - 5 ;
}
/* Copy as much as possible from input to output: */
for ( ; ; ) {
/* Fill the window as much as possible: */
if ( s . lookahead <= 1 ) {
//Assert(s->strstart < s->w_size+MAX_DIST(s) ||
// s->block_start >= (long)s->w_size, "slide too late");
// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||
// s.block_start >= s.w_size)) {
// throw new Error("slide too late");
// }
fill _window ( s ) ;
if ( s . lookahead === 0 && flush === Z _NO _FLUSH ) {
return BS _NEED _MORE ;
}
if ( s . lookahead === 0 ) {
break ;
}
/* flush the current block */
}
//Assert(s->block_start >= 0L, "block gone");
// if (s.block_start < 0) throw new Error("block gone");
s . strstart += s . lookahead ;
s . lookahead = 0 ;
/* Emit a stored block if pending_buf will be full: */
var max _start = s . block _start + max _block _size ;
if ( s . strstart === 0 || s . strstart >= max _start ) {
/* strstart == 0 is possible when wraparound on 16-bit machine */
s . lookahead = s . strstart - max _start ;
s . strstart = max _start ;
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
/ * F l u s h i f w e m a y h a v e t o s l i d e , o t h e r w i s e b l o c k _ s t a r t m a y b e c o m e
* negative and the data will be gone :
* /
if ( s . strstart - s . block _start >= ( s . w _size - MIN _LOOKAHEAD ) ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
}
s . insert = 0 ;
if ( flush === Z _FINISH ) {
/*** FLUSH_BLOCK(s, 1); ***/
flush _block _only ( s , true ) ;
if ( s . strm . avail _out === 0 ) {
return BS _FINISH _STARTED ;
}
/***/
return BS _FINISH _DONE ;
}
if ( s . strstart > s . block _start ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
return BS _NEED _MORE ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Compress as much as possible from the input stream , return the current
* block state .
* This function does not perform lazy evaluation of matches and inserts
* new strings in the dictionary only for unmatched strings or for short
* matches . It is used only for the fast compression options .
* /
function deflate _fast ( s , flush ) {
var hash _head ; /* head of the hash chain */
var bflush ; /* set if current block must be flushed */
for ( ; ; ) {
/ * M a k e s u r e t h a t w e a l w a y s h a v e e n o u g h l o o k a h e a d , e x c e p t
* at the end of the input file . We need MAX _MATCH bytes
* for the next match , plus MIN _MATCH bytes to insert the
* string following the next match .
* /
if ( s . lookahead < MIN _LOOKAHEAD ) {
fill _window ( s ) ;
if ( s . lookahead < MIN _LOOKAHEAD && flush === Z _NO _FLUSH ) {
return BS _NEED _MORE ;
}
if ( s . lookahead === 0 ) {
break ; /* flush the current block */
}
}
/ * I n s e r t t h e s t r i n g w i n d o w [ s t r s t a r t . . s t r s t a r t + 2 ] i n t h e
* dictionary , and set hash _head to the head of the hash chain :
* /
hash _head = 0 /*NIL*/ ;
if ( s . lookahead >= MIN _MATCH ) {
/*** INSERT_STRING(s, s.strstart, hash_head); ***/
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ s . strstart + MIN _MATCH - 1 ] ) & s . hash _mask ;
hash _head = s . prev [ s . strstart & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = s . strstart ;
/***/
}
/ * F i n d t h e l o n g e s t m a t c h , d i s c a r d i n g t h o s e < = p r e v _ l e n g t h .
* At this point we have always match _length < MIN _MATCH
* /
if ( hash _head !== 0 /*NIL*/ && ( ( s . strstart - hash _head ) <= ( s . w _size - MIN _LOOKAHEAD ) ) ) {
/ * T o s i m p l i f y t h e c o d e , w e p r e v e n t m a t c h e s w i t h t h e s t r i n g
* of window index 0 ( in particular we have to avoid a match
* of the string with itself at the start of the input file ) .
* /
s . match _length = longest _match ( s , hash _head ) ;
/* longest_match() sets match_start */
}
if ( s . match _length >= MIN _MATCH ) {
// check_match(s, s.strstart, s.match_start, s.match_length); // for debug only
/ * * * _ t r _ t a l l y _ d i s t ( s , s . s t r s t a r t - s . m a t c h _ s t a r t ,
s . match _length - MIN _MATCH , bflush ) ; * * * /
bflush = trees . _tr _tally ( s , s . strstart - s . match _start , s . match _length - MIN _MATCH ) ;
s . lookahead -= s . match _length ;
/ * I n s e r t n e w s t r i n g s i n t h e h a s h t a b l e o n l y i f t h e m a t c h l e n g t h
* is not too large . This saves time but degrades compression .
* /
if ( s . match _length <= s . max _lazy _match /*max_insert_length*/ && s . lookahead >= MIN _MATCH ) {
s . match _length -- ; /* string at strstart already in table */
do {
s . strstart ++ ;
/*** INSERT_STRING(s, s.strstart, hash_head); ***/
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ s . strstart + MIN _MATCH - 1 ] ) & s . hash _mask ;
hash _head = s . prev [ s . strstart & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = s . strstart ;
/***/
/ * s t r s t a r t n e v e r e x c e e d s W S I Z E - M A X _ M A T C H , s o t h e r e a r e
* always MIN _MATCH bytes ahead .
* /
} while ( -- s . match _length !== 0 ) ;
s . strstart ++ ;
} else
{
s . strstart += s . match _length ;
s . match _length = 0 ;
s . ins _h = s . window [ s . strstart ] ;
/* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ s . strstart + 1 ] ) & s . hash _mask ;
//#if MIN_MATCH != 3
// Call UPDATE_HASH() MIN_MATCH-3 more times
//#endif
/ * I f l o o k a h e a d < M I N _ M A T C H , i n s _ h i s g a r b a g e , b u t i t d o e s n o t
* matter since it will be recomputed at next deflate call .
* /
}
} else {
/* No match, output a literal byte */
//Tracevv((stderr,"%c", s.window[s.strstart]));
/*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
bflush = trees . _tr _tally ( s , 0 , s . window [ s . strstart ] ) ;
s . lookahead -- ;
s . strstart ++ ;
}
if ( bflush ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
}
s . insert = ( ( s . strstart < ( MIN _MATCH - 1 ) ) ? s . strstart : MIN _MATCH - 1 ) ;
if ( flush === Z _FINISH ) {
/*** FLUSH_BLOCK(s, 1); ***/
flush _block _only ( s , true ) ;
if ( s . strm . avail _out === 0 ) {
return BS _FINISH _STARTED ;
}
/***/
return BS _FINISH _DONE ;
}
if ( s . last _lit ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
return BS _BLOCK _DONE ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Same as above , but achieves better compression . We use a lazy
* evaluation for matches : a match is finally adopted only if there is
* no better match at the next window position .
* /
function deflate _slow ( s , flush ) {
var hash _head ; /* head of hash chain */
var bflush ; /* set if current block must be flushed */
var max _insert ;
/* Process the input block. */
for ( ; ; ) {
/ * M a k e s u r e t h a t w e a l w a y s h a v e e n o u g h l o o k a h e a d , e x c e p t
* at the end of the input file . We need MAX _MATCH bytes
* for the next match , plus MIN _MATCH bytes to insert the
* string following the next match .
* /
if ( s . lookahead < MIN _LOOKAHEAD ) {
fill _window ( s ) ;
if ( s . lookahead < MIN _LOOKAHEAD && flush === Z _NO _FLUSH ) {
return BS _NEED _MORE ;
}
if ( s . lookahead === 0 ) { break ; } /* flush the current block */
}
/ * I n s e r t t h e s t r i n g w i n d o w [ s t r s t a r t . . s t r s t a r t + 2 ] i n t h e
* dictionary , and set hash _head to the head of the hash chain :
* /
hash _head = 0 /*NIL*/ ;
if ( s . lookahead >= MIN _MATCH ) {
/*** INSERT_STRING(s, s.strstart, hash_head); ***/
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ s . strstart + MIN _MATCH - 1 ] ) & s . hash _mask ;
hash _head = s . prev [ s . strstart & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = s . strstart ;
/***/
}
/ * F i n d t h e l o n g e s t m a t c h , d i s c a r d i n g t h o s e < = p r e v _ l e n g t h .
* /
s . prev _length = s . match _length ;
s . prev _match = s . match _start ;
s . match _length = MIN _MATCH - 1 ;
if ( hash _head !== 0 /*NIL*/ && s . prev _length < s . max _lazy _match &&
s . strstart - hash _head <= ( s . w _size - MIN _LOOKAHEAD ) /*MAX_DIST(s)*/ ) {
/ * T o s i m p l i f y t h e c o d e , w e p r e v e n t m a t c h e s w i t h t h e s t r i n g
* of window index 0 ( in particular we have to avoid a match
* of the string with itself at the start of the input file ) .
* /
s . match _length = longest _match ( s , hash _head ) ;
/* longest_match() sets match_start */
if ( s . match _length <= 5 &&
( s . strategy === Z _FILTERED || ( s . match _length === MIN _MATCH && s . strstart - s . match _start > 4096 /*TOO_FAR*/ ) ) ) {
/ * I f p r e v _ m a t c h i s a l s o M I N _ M A T C H , m a t c h _ s t a r t i s g a r b a g e
* but we will ignore the current match anyway .
* /
s . match _length = MIN _MATCH - 1 ;
}
}
/ * I f t h e r e w a s a m a t c h a t t h e p r e v i o u s s t e p a n d t h e c u r r e n t
* match is not better , output the previous match :
* /
if ( s . prev _length >= MIN _MATCH && s . match _length <= s . prev _length ) {
max _insert = s . strstart + s . lookahead - MIN _MATCH ;
/* Do not insert strings in hash table beyond this. */
//check_match(s, s.strstart-1, s.prev_match, s.prev_length);
/ * * * _ t r _ t a l l y _ d i s t ( s , s . s t r s t a r t - 1 - s . p r e v _ m a t c h ,
s . prev _length - MIN _MATCH , bflush ) ; * * * /
bflush = trees . _tr _tally ( s , s . strstart - 1 - s . prev _match , s . prev _length - MIN _MATCH ) ;
/ * I n s e r t i n h a s h t a b l e a l l s t r i n g s u p t o t h e e n d o f t h e m a t c h .
* strstart - 1 and strstart are already inserted . If there is not
* enough lookahead , the last two strings are not inserted in
* the hash table .
* /
s . lookahead -= s . prev _length - 1 ;
s . prev _length -= 2 ;
do {
if ( ++ s . strstart <= max _insert ) {
/*** INSERT_STRING(s, s.strstart, hash_head); ***/
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ s . strstart + MIN _MATCH - 1 ] ) & s . hash _mask ;
hash _head = s . prev [ s . strstart & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = s . strstart ;
/***/
}
} while ( -- s . prev _length !== 0 ) ;
s . match _available = 0 ;
s . match _length = MIN _MATCH - 1 ;
s . strstart ++ ;
if ( bflush ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
} else if ( s . match _available ) {
/ * I f t h e r e w a s n o m a t c h a t t h e p r e v i o u s p o s i t i o n , o u t p u t a
* single literal . If there was a match but the current match
* is longer , truncate the previous match to a single literal .
* /
//Tracevv((stderr,"%c", s->window[s->strstart-1]));
/*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
bflush = trees . _tr _tally ( s , 0 , s . window [ s . strstart - 1 ] ) ;
if ( bflush ) {
/*** FLUSH_BLOCK_ONLY(s, 0) ***/
flush _block _only ( s , false ) ;
/***/
}
s . strstart ++ ;
s . lookahead -- ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
} else {
/ * T h e r e i s n o p r e v i o u s m a t c h t o c o m p a r e w i t h , w a i t f o r
* the next step to decide .
* /
s . match _available = 1 ;
s . strstart ++ ;
s . lookahead -- ;
}
}
//Assert (flush != Z_NO_FLUSH, "no flush?");
if ( s . match _available ) {
//Tracevv((stderr,"%c", s->window[s->strstart-1]));
/*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
bflush = trees . _tr _tally ( s , 0 , s . window [ s . strstart - 1 ] ) ;
s . match _available = 0 ;
}
s . insert = s . strstart < MIN _MATCH - 1 ? s . strstart : MIN _MATCH - 1 ;
if ( flush === Z _FINISH ) {
/*** FLUSH_BLOCK(s, 1); ***/
flush _block _only ( s , true ) ;
if ( s . strm . avail _out === 0 ) {
return BS _FINISH _STARTED ;
}
/***/
return BS _FINISH _DONE ;
}
if ( s . last _lit ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
return BS _BLOCK _DONE ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* For Z _RLE , simply look for runs of bytes , generate matches only of distance
* one . Do not maintain a hash table . ( It will be regenerated if this run of
* deflate switches away from Z _RLE . )
* /
function deflate _rle ( s , flush ) {
var bflush ; /* set if current block must be flushed */
var prev ; /* byte at distance one to match */
var scan , strend ; /* scan goes up to strend for length of run */
var _win = s . window ;
for ( ; ; ) {
/ * M a k e s u r e t h a t w e a l w a y s h a v e e n o u g h l o o k a h e a d , e x c e p t
* at the end of the input file . We need MAX _MATCH bytes
* for the longest run , plus one for the unrolled loop .
* /
if ( s . lookahead <= MAX _MATCH ) {
fill _window ( s ) ;
if ( s . lookahead <= MAX _MATCH && flush === Z _NO _FLUSH ) {
return BS _NEED _MORE ;
}
if ( s . lookahead === 0 ) { break ; } /* flush the current block */
}
/* See how many times the previous byte repeats */
s . match _length = 0 ;
if ( s . lookahead >= MIN _MATCH && s . strstart > 0 ) {
scan = s . strstart - 1 ;
prev = _win [ scan ] ;
if ( prev === _win [ ++ scan ] && prev === _win [ ++ scan ] && prev === _win [ ++ scan ] ) {
strend = s . strstart + MAX _MATCH ;
do {
/*jshint noempty:false*/
} while ( prev === _win [ ++ scan ] && prev === _win [ ++ scan ] &&
prev === _win [ ++ scan ] && prev === _win [ ++ scan ] &&
prev === _win [ ++ scan ] && prev === _win [ ++ scan ] &&
prev === _win [ ++ scan ] && prev === _win [ ++ scan ] &&
scan < strend ) ;
s . match _length = MAX _MATCH - ( strend - scan ) ;
if ( s . match _length > s . lookahead ) {
s . match _length = s . lookahead ;
}
}
//Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
}
/* Emit match if have run of MIN_MATCH or longer, else emit literal */
if ( s . match _length >= MIN _MATCH ) {
//check_match(s, s.strstart, s.strstart - 1, s.match_length);
/*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/
bflush = trees . _tr _tally ( s , 1 , s . match _length - MIN _MATCH ) ;
s . lookahead -= s . match _length ;
s . strstart += s . match _length ;
s . match _length = 0 ;
} else {
/* No match, output a literal byte */
//Tracevv((stderr,"%c", s->window[s->strstart]));
/*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
bflush = trees . _tr _tally ( s , 0 , s . window [ s . strstart ] ) ;
s . lookahead -- ;
s . strstart ++ ;
}
if ( bflush ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
}
s . insert = 0 ;
if ( flush === Z _FINISH ) {
/*** FLUSH_BLOCK(s, 1); ***/
flush _block _only ( s , true ) ;
if ( s . strm . avail _out === 0 ) {
return BS _FINISH _STARTED ;
}
/***/
return BS _FINISH _DONE ;
}
if ( s . last _lit ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
return BS _BLOCK _DONE ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* For Z _HUFFMAN _ONLY , do not look for matches . Do not maintain a hash table .
* ( It will be regenerated if this run of deflate switches away from Huffman . )
* /
function deflate _huff ( s , flush ) {
var bflush ; /* set if current block must be flushed */
for ( ; ; ) {
/* Make sure that we have a literal to write. */
if ( s . lookahead === 0 ) {
fill _window ( s ) ;
if ( s . lookahead === 0 ) {
if ( flush === Z _NO _FLUSH ) {
return BS _NEED _MORE ;
}
break ; /* flush the current block */
}
}
/* Output a literal byte */
s . match _length = 0 ;
//Tracevv((stderr,"%c", s->window[s->strstart]));
/*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
bflush = trees . _tr _tally ( s , 0 , s . window [ s . strstart ] ) ;
s . lookahead -- ;
s . strstart ++ ;
if ( bflush ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
}
s . insert = 0 ;
if ( flush === Z _FINISH ) {
/*** FLUSH_BLOCK(s, 1); ***/
flush _block _only ( s , true ) ;
if ( s . strm . avail _out === 0 ) {
return BS _FINISH _STARTED ;
}
/***/
return BS _FINISH _DONE ;
}
if ( s . last _lit ) {
/*** FLUSH_BLOCK(s, 0); ***/
flush _block _only ( s , false ) ;
if ( s . strm . avail _out === 0 ) {
return BS _NEED _MORE ;
}
/***/
}
return BS _BLOCK _DONE ;
}
/ * V a l u e s f o r m a x _ l a z y _ m a t c h , g o o d _ m a t c h a n d m a x _ c h a i n _ l e n g t h , d e p e n d i n g o n
* the desired pack level ( 0. . 9 ) . The values given below have been tuned to
* exclude worst case performance for pathological files . Better values may be
* found for specific files .
* /
function Config ( good _length , max _lazy , nice _length , max _chain , func ) {
this . good _length = good _length ;
this . max _lazy = max _lazy ;
this . nice _length = nice _length ;
this . max _chain = max _chain ;
this . func = func ;
}
var configuration _table ;
configuration _table = [
/* good lazy nice chain */
new Config ( 0 , 0 , 0 , 0 , deflate _stored ) , /* 0 store only */
new Config ( 4 , 4 , 8 , 4 , deflate _fast ) , /* 1 max speed, no lazy matches */
new Config ( 4 , 5 , 16 , 8 , deflate _fast ) , /* 2 */
new Config ( 4 , 6 , 32 , 32 , deflate _fast ) , /* 3 */
new Config ( 4 , 4 , 16 , 16 , deflate _slow ) , /* 4 lazy matches */
new Config ( 8 , 16 , 32 , 32 , deflate _slow ) , /* 5 */
new Config ( 8 , 16 , 128 , 128 , deflate _slow ) , /* 6 */
new Config ( 8 , 32 , 128 , 256 , deflate _slow ) , /* 7 */
new Config ( 32 , 128 , 258 , 1024 , deflate _slow ) , /* 8 */
new Config ( 32 , 258 , 258 , 4096 , deflate _slow ) /* 9 max compression */
] ;
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Initialize the "longest match" routines for a new zlib stream
* /
function lm _init ( s ) {
s . window _size = 2 * s . w _size ;
/*** CLEAR_HASH(s); ***/
zero ( s . head ) ; // Fill with NIL (= 0);
/ * S e t t h e d e f a u l t c o n f i g u r a t i o n p a r a m e t e r s :
* /
s . max _lazy _match = configuration _table [ s . level ] . max _lazy ;
s . good _match = configuration _table [ s . level ] . good _length ;
s . nice _match = configuration _table [ s . level ] . nice _length ;
s . max _chain _length = configuration _table [ s . level ] . max _chain ;
s . strstart = 0 ;
s . block _start = 0 ;
s . lookahead = 0 ;
s . insert = 0 ;
s . match _length = s . prev _length = MIN _MATCH - 1 ;
s . match _available = 0 ;
s . ins _h = 0 ;
}
function DeflateState ( ) {
this . strm = null ; /* pointer back to this zlib stream */
this . status = 0 ; /* as the name implies */
this . pending _buf = null ; /* output still pending */
this . pending _buf _size = 0 ; /* size of pending_buf */
this . pending _out = 0 ; /* next pending byte to output to the stream */
this . pending = 0 ; /* nb of bytes in the pending buffer */
this . wrap = 0 ; /* bit 0 true for zlib, bit 1 true for gzip */
this . gzhead = null ; /* gzip header information to write */
this . gzindex = 0 ; /* where in extra, name, or comment */
this . method = Z _DEFLATED ; /* can only be DEFLATED */
this . last _flush = - 1 ; /* value of flush param for previous deflate call */
this . w _size = 0 ; /* LZ77 window size (32K by default) */
this . w _bits = 0 ; /* log2(w_size) (8..16) */
this . w _mask = 0 ; /* w_size - 1 */
this . window = null ;
/ * S l i d i n g w i n d o w . I n p u t b y t e s a r e r e a d i n t o t h e s e c o n d h a l f o f t h e w i n d o w ,
* and move to the first half later to keep a dictionary of at least wSize
* bytes . With this organization , matches are limited to a distance of
* wSize - MAX _MATCH bytes , but this ensures that IO is always
* performed with a length multiple of the block size .
* /
this . window _size = 0 ;
/ * A c t u a l s i z e o f w i n d o w : 2 * w S i z e , e x c e p t w h e n t h e u s e r i n p u t b u f f e r
* is directly used as sliding window .
* /
this . prev = null ;
/ * L i n k t o o l d e r s t r i n g w i t h s a m e h a s h i n d e x . T o l i m i t t h e s i z e o f t h i s
* array to 64 K , this link is maintained only for the last 32 K strings .
* An index in this array is thus a window index modulo 32 K .
* /
this . head = null ; /* Heads of the hash chains or NIL. */
this . ins _h = 0 ; /* hash index of string to be inserted */
this . hash _size = 0 ; /* number of elements in hash table */
this . hash _bits = 0 ; /* log2(hash_size) */
this . hash _mask = 0 ; /* hash_size-1 */
this . hash _shift = 0 ;
/ * N u m b e r o f b i t s b y w h i c h i n s _ h m u s t b e s h i f t e d a t e a c h i n p u t
* step . It must be such that after MIN _MATCH steps , the oldest
* byte no longer takes part in the hash key , that is :
* hash _shift * MIN _MATCH >= hash _bits
* /
this . block _start = 0 ;
/ * W i n d o w p o s i t i o n a t t h e b e g i n n i n g o f t h e c u r r e n t o u t p u t b l o c k . G e t s
* negative when the window is moved backwards .
* /
this . match _length = 0 ; /* length of best match */
this . prev _match = 0 ; /* previous match */
this . match _available = 0 ; /* set if previous match exists */
this . strstart = 0 ; /* start of string to insert */
this . match _start = 0 ; /* start of matching string */
this . lookahead = 0 ; /* number of valid bytes ahead in window */
this . prev _length = 0 ;
/ * L e n g t h o f t h e b e s t m a t c h a t p r e v i o u s s t e p . M a t c h e s n o t g r e a t e r t h a n t h i s
* are discarded . This is used in the lazy match evaluation .
* /
this . max _chain _length = 0 ;
/ * T o s p e e d u p d e f l a t i o n , h a s h c h a i n s a r e n e v e r s e a r c h e d b e y o n d t h i s
* length . A higher limit improves compression ratio but degrades the
* speed .
* /
this . max _lazy _match = 0 ;
/ * A t t e m p t t o f i n d a b e t t e r m a t c h o n l y w h e n t h e c u r r e n t m a t c h i s s t r i c t l y
* smaller than this value . This mechanism is used only for compression
* levels >= 4.
* /
// That's alias to max_lazy_match, don't use directly
//this.max_insert_length = 0;
/ * I n s e r t n e w s t r i n g s i n t h e h a s h t a b l e o n l y i f t h e m a t c h l e n g t h i s n o t
* greater than this length . This saves time but degrades compression .
* max _insert _length is used only for compression levels <= 3.
* /
this . level = 0 ; /* compression level (1..9) */
this . strategy = 0 ; /* favor or force Huffman coding*/
this . good _match = 0 ;
/* Use a faster search when the previous match is longer than this */
this . nice _match = 0 ; /* Stop searching when current match exceeds this */
/* used by trees.c: */
/* Didn't use ct_data typedef below to suppress compiler warning */
// struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
// struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
// struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
// Use flat array of DOUBLE size, with interleaved fata,
// because JS does not support effective
this . dyn _ltree = new utils . Buf16 ( HEAP _SIZE * 2 ) ;
this . dyn _dtree = new utils . Buf16 ( ( 2 * D _CODES + 1 ) * 2 ) ;
this . bl _tree = new utils . Buf16 ( ( 2 * BL _CODES + 1 ) * 2 ) ;
zero ( this . dyn _ltree ) ;
zero ( this . dyn _dtree ) ;
zero ( this . bl _tree ) ;
this . l _desc = null ; /* desc. for literal tree */
this . d _desc = null ; /* desc. for distance tree */
this . bl _desc = null ; /* desc. for bit length tree */
//ush bl_count[MAX_BITS+1];
this . bl _count = new utils . Buf16 ( MAX _BITS + 1 ) ;
/* number of codes at each bit length for an optimal tree */
//int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
this . heap = new utils . Buf16 ( 2 * L _CODES + 1 ) ; /* heap used to build the Huffman trees */
zero ( this . heap ) ;
this . heap _len = 0 ; /* number of elements in the heap */
this . heap _max = 0 ; /* element of largest frequency */
/ * T h e s o n s o f h e a p [ n ] a r e h e a p [ 2 * n ] a n d h e a p [ 2 * n + 1 ] . h e a p [ 0 ] i s n o t u s e d .
* The same heap array is used to build all trees .
* /
this . depth = new utils . Buf16 ( 2 * L _CODES + 1 ) ; //uch depth[2*L_CODES+1];
zero ( this . depth ) ;
/ * D e p t h o f e a c h s u b t r e e u s e d a s t i e b r e a k e r f o r t r e e s o f e q u a l f r e q u e n c y
* /
this . l _buf = 0 ; /* buffer index for literals or lengths */
this . lit _bufsize = 0 ;
/ * S i z e o f m a t c h b u f f e r f o r l i t e r a l s / l e n g t h s . T h e r e a r e 4 r e a s o n s f o r
* limiting lit _bufsize to 64 K :
* - frequencies can be kept in 16 bit counters
* - if compression is not successful for the first block , all input
* data is still in the window so we can still emit a stored block even
* when input comes from standard input . ( This can also be done for
* all blocks if lit _bufsize is not greater than 32 K . )
* - if compression is not successful for a file smaller than 64 K , we can
* even emit a stored file instead of a stored block ( saving 5 bytes ) .
* This is applicable only for zip ( not gzip or zlib ) .
* - creating new Huffman trees less frequently may not provide fast
* adaptation to changes in the input data statistics . ( Take for
* example a binary file with poorly compressible code followed by
* a highly compressible string table . ) Smaller buffer sizes give
* fast adaptation but have of course the overhead of transmitting
* trees more frequently .
* - I can ' t count above 4
* /
this . last _lit = 0 ; /* running index in l_buf */
this . d _buf = 0 ;
/ * B u f f e r i n d e x f o r d i s t a n c e s . T o s i m p l i f y t h e c o d e , d _ b u f a n d l _ b u f h a v e
* the same number of elements . To use different lengths , an extra flag
* array would be necessary .
* /
this . opt _len = 0 ; /* bit length of current block with optimal trees */
this . static _len = 0 ; /* bit length of current block with static trees */
this . matches = 0 ; /* number of string matches in current block */
this . insert = 0 ; /* bytes at end of window left to insert */
this . bi _buf = 0 ;
/ * O u t p u t b u f f e r . b i t s a r e i n s e r t e d s t a r t i n g a t t h e b o t t o m ( l e a s t
* significant bits ) .
* /
this . bi _valid = 0 ;
/ * N u m b e r o f v a l i d b i t s i n b i _ b u f . A l l b i t s a b o v e t h e l a s t v a l i d b i t
* are always zero .
* /
// Used for window memory init. We safely ignore it for JS. That makes
// sense only for pointers and memory check tools.
//this.high_water = 0;
/ * H i g h w a t e r m a r k o f f s e t i n w i n d o w f o r i n i t i a l i z e d b y t e s - - b y t e s a b o v e
* this are set to zero in order to avoid memory check warnings when
* longest match routines access bytes past the input . This is then
* updated to the new high water mark .
* /
}
function deflateResetKeep ( strm ) {
var s ;
if ( ! strm || ! strm . state ) {
return err ( strm , Z _STREAM _ERROR ) ;
}
strm . total _in = strm . total _out = 0 ;
strm . data _type = Z _UNKNOWN ;
s = strm . state ;
s . pending = 0 ;
s . pending _out = 0 ;
if ( s . wrap < 0 ) {
s . wrap = - s . wrap ;
/* was made negative by deflate(..., Z_FINISH); */
}
s . status = ( s . wrap ? INIT _STATE : BUSY _STATE ) ;
strm . adler = ( s . wrap === 2 ) ?
0 // crc32(0, Z_NULL, 0)
:
1 ; // adler32(0, Z_NULL, 0)
s . last _flush = Z _NO _FLUSH ;
trees . _tr _init ( s ) ;
return Z _OK ;
}
function deflateReset ( strm ) {
var ret = deflateResetKeep ( strm ) ;
if ( ret === Z _OK ) {
lm _init ( strm . state ) ;
}
return ret ;
}
function deflateSetHeader ( strm , head ) {
if ( ! strm || ! strm . state ) { return Z _STREAM _ERROR ; }
if ( strm . state . wrap !== 2 ) { return Z _STREAM _ERROR ; }
strm . state . gzhead = head ;
return Z _OK ;
}
function deflateInit2 ( strm , level , method , windowBits , memLevel , strategy ) {
if ( ! strm ) { // === Z_NULL
return Z _STREAM _ERROR ;
}
var wrap = 1 ;
if ( level === Z _DEFAULT _COMPRESSION ) {
level = 6 ;
}
if ( windowBits < 0 ) { /* suppress zlib wrapper */
wrap = 0 ;
windowBits = - windowBits ;
}
else if ( windowBits > 15 ) {
wrap = 2 ; /* write gzip wrapper instead */
windowBits -= 16 ;
}
if ( memLevel < 1 || memLevel > MAX _MEM _LEVEL || method !== Z _DEFLATED ||
windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
strategy < 0 || strategy > Z _FIXED ) {
return err ( strm , Z _STREAM _ERROR ) ;
}
if ( windowBits === 8 ) {
windowBits = 9 ;
}
/* until 256-byte window bug fixed */
var s = new DeflateState ( ) ;
strm . state = s ;
s . strm = strm ;
s . wrap = wrap ;
s . gzhead = null ;
s . w _bits = windowBits ;
s . w _size = 1 << s . w _bits ;
s . w _mask = s . w _size - 1 ;
s . hash _bits = memLevel + 7 ;
s . hash _size = 1 << s . hash _bits ;
s . hash _mask = s . hash _size - 1 ;
s . hash _shift = ~ ~ ( ( s . hash _bits + MIN _MATCH - 1 ) / MIN _MATCH ) ;
s . window = new utils . Buf8 ( s . w _size * 2 ) ;
s . head = new utils . Buf16 ( s . hash _size ) ;
s . prev = new utils . Buf16 ( s . w _size ) ;
// Don't need mem init magic for JS.
//s.high_water = 0; /* nothing written to s->window yet */
s . lit _bufsize = 1 << ( memLevel + 6 ) ; /* 16K elements by default */
s . pending _buf _size = s . lit _bufsize * 4 ;
//overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
//s->pending_buf = (uchf *) overlay;
s . pending _buf = new utils . Buf8 ( s . pending _buf _size ) ;
// It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)
//s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
s . d _buf = 1 * s . lit _bufsize ;
//s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
s . l _buf = ( 1 + 2 ) * s . lit _bufsize ;
s . level = level ;
s . strategy = strategy ;
s . method = method ;
return deflateReset ( strm ) ;
}
function deflateInit ( strm , level ) {
return deflateInit2 ( strm , level , Z _DEFLATED , MAX _WBITS , DEF _MEM _LEVEL , Z _DEFAULT _STRATEGY ) ;
}
function deflate ( strm , flush ) {
var old _flush , s ;
var beg , val ; // for gzip header write only
if ( ! strm || ! strm . state ||
flush > Z _BLOCK || flush < 0 ) {
return strm ? err ( strm , Z _STREAM _ERROR ) : Z _STREAM _ERROR ;
}
s = strm . state ;
if ( ! strm . output ||
( ! strm . input && strm . avail _in !== 0 ) ||
( s . status === FINISH _STATE && flush !== Z _FINISH ) ) {
return err ( strm , ( strm . avail _out === 0 ) ? Z _BUF _ERROR : Z _STREAM _ERROR ) ;
}
s . strm = strm ; /* just in case */
old _flush = s . last _flush ;
s . last _flush = flush ;
/* Write the header */
if ( s . status === INIT _STATE ) {
if ( s . wrap === 2 ) { // GZIP header
strm . adler = 0 ; //crc32(0L, Z_NULL, 0);
put _byte ( s , 31 ) ;
put _byte ( s , 139 ) ;
put _byte ( s , 8 ) ;
if ( ! s . gzhead ) { // s->gzhead == Z_NULL
put _byte ( s , 0 ) ;
put _byte ( s , 0 ) ;
put _byte ( s , 0 ) ;
put _byte ( s , 0 ) ;
put _byte ( s , 0 ) ;
put _byte ( s , s . level === 9 ? 2 :
( s . strategy >= Z _HUFFMAN _ONLY || s . level < 2 ?
4 : 0 ) ) ;
put _byte ( s , OS _CODE ) ;
s . status = BUSY _STATE ;
}
else {
put _byte ( s , ( s . gzhead . text ? 1 : 0 ) +
( s . gzhead . hcrc ? 2 : 0 ) +
( ! s . gzhead . extra ? 0 : 4 ) +
( ! s . gzhead . name ? 0 : 8 ) +
( ! s . gzhead . comment ? 0 : 16 )
) ;
put _byte ( s , s . gzhead . time & 0xff ) ;
put _byte ( s , ( s . gzhead . time >> 8 ) & 0xff ) ;
put _byte ( s , ( s . gzhead . time >> 16 ) & 0xff ) ;
put _byte ( s , ( s . gzhead . time >> 24 ) & 0xff ) ;
put _byte ( s , s . level === 9 ? 2 :
( s . strategy >= Z _HUFFMAN _ONLY || s . level < 2 ?
4 : 0 ) ) ;
put _byte ( s , s . gzhead . os & 0xff ) ;
if ( s . gzhead . extra && s . gzhead . extra . length ) {
put _byte ( s , s . gzhead . extra . length & 0xff ) ;
put _byte ( s , ( s . gzhead . extra . length >> 8 ) & 0xff ) ;
}
if ( s . gzhead . hcrc ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending , 0 ) ;
}
s . gzindex = 0 ;
s . status = EXTRA _STATE ;
}
}
else // DEFLATE header
{
var header = ( Z _DEFLATED + ( ( s . w _bits - 8 ) << 4 ) ) << 8 ;
var level _flags = - 1 ;
if ( s . strategy >= Z _HUFFMAN _ONLY || s . level < 2 ) {
level _flags = 0 ;
} else if ( s . level < 6 ) {
level _flags = 1 ;
} else if ( s . level === 6 ) {
level _flags = 2 ;
} else {
level _flags = 3 ;
}
header |= ( level _flags << 6 ) ;
if ( s . strstart !== 0 ) { header |= PRESET _DICT ; }
header += 31 - ( header % 31 ) ;
s . status = BUSY _STATE ;
putShortMSB ( s , header ) ;
/* Save the adler32 of the preset dictionary: */
if ( s . strstart !== 0 ) {
putShortMSB ( s , strm . adler >>> 16 ) ;
putShortMSB ( s , strm . adler & 0xffff ) ;
}
strm . adler = 1 ; // adler32(0L, Z_NULL, 0);
}
}
//#ifdef GZIP
if ( s . status === EXTRA _STATE ) {
if ( s . gzhead . extra /* != Z_NULL*/ ) {
beg = s . pending ; /* start of bytes to update crc */
while ( s . gzindex < ( s . gzhead . extra . length & 0xffff ) ) {
if ( s . pending === s . pending _buf _size ) {
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
flush _pending ( strm ) ;
beg = s . pending ;
if ( s . pending === s . pending _buf _size ) {
break ;
}
}
put _byte ( s , s . gzhead . extra [ s . gzindex ] & 0xff ) ;
s . gzindex ++ ;
}
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
if ( s . gzindex === s . gzhead . extra . length ) {
s . gzindex = 0 ;
s . status = NAME _STATE ;
}
}
else {
s . status = NAME _STATE ;
}
}
if ( s . status === NAME _STATE ) {
if ( s . gzhead . name /* != Z_NULL*/ ) {
beg = s . pending ; /* start of bytes to update crc */
//int val;
do {
if ( s . pending === s . pending _buf _size ) {
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
flush _pending ( strm ) ;
beg = s . pending ;
if ( s . pending === s . pending _buf _size ) {
val = 1 ;
break ;
}
}
// JS specific: little magic to add zero terminator to end of string
if ( s . gzindex < s . gzhead . name . length ) {
val = s . gzhead . name . charCodeAt ( s . gzindex ++ ) & 0xff ;
} else {
val = 0 ;
}
put _byte ( s , val ) ;
} while ( val !== 0 ) ;
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
if ( val === 0 ) {
s . gzindex = 0 ;
s . status = COMMENT _STATE ;
}
}
else {
s . status = COMMENT _STATE ;
}
}
if ( s . status === COMMENT _STATE ) {
if ( s . gzhead . comment /* != Z_NULL*/ ) {
beg = s . pending ; /* start of bytes to update crc */
//int val;
do {
if ( s . pending === s . pending _buf _size ) {
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
flush _pending ( strm ) ;
beg = s . pending ;
if ( s . pending === s . pending _buf _size ) {
val = 1 ;
break ;
}
}
// JS specific: little magic to add zero terminator to end of string
if ( s . gzindex < s . gzhead . comment . length ) {
val = s . gzhead . comment . charCodeAt ( s . gzindex ++ ) & 0xff ;
} else {
val = 0 ;
}
put _byte ( s , val ) ;
} while ( val !== 0 ) ;
if ( s . gzhead . hcrc && s . pending > beg ) {
strm . adler = crc32 ( strm . adler , s . pending _buf , s . pending - beg , beg ) ;
}
if ( val === 0 ) {
s . status = HCRC _STATE ;
}
}
else {
s . status = HCRC _STATE ;
}
}
if ( s . status === HCRC _STATE ) {
if ( s . gzhead . hcrc ) {
if ( s . pending + 2 > s . pending _buf _size ) {
flush _pending ( strm ) ;
}
if ( s . pending + 2 <= s . pending _buf _size ) {
put _byte ( s , strm . adler & 0xff ) ;
put _byte ( s , ( strm . adler >> 8 ) & 0xff ) ;
strm . adler = 0 ; //crc32(0L, Z_NULL, 0);
s . status = BUSY _STATE ;
}
}
else {
s . status = BUSY _STATE ;
}
}
//#endif
/* Flush as much pending output as possible */
if ( s . pending !== 0 ) {
flush _pending ( strm ) ;
if ( strm . avail _out === 0 ) {
/ * S i n c e a v a i l _ o u t i s 0 , d e f l a t e w i l l b e c a l l e d a g a i n w i t h
* more output space , but possibly with both pending and
* avail _in equal to zero . There won ' t be anything to do ,
* but this is not an error situation so make sure we
* return OK instead of BUF _ERROR at next call of deflate :
* /
s . last _flush = - 1 ;
return Z _OK ;
}
/ * M a k e s u r e t h e r e i s s o m e t h i n g t o d o a n d a v o i d d u p l i c a t e c o n s e c u t i v e
* flushes . For repeated and useless calls with Z _FINISH , we keep
* returning Z _STREAM _END instead of Z _BUF _ERROR .
* /
} else if ( strm . avail _in === 0 && rank ( flush ) <= rank ( old _flush ) &&
flush !== Z _FINISH ) {
return err ( strm , Z _BUF _ERROR ) ;
}
/* User must not provide more input after the first FINISH: */
if ( s . status === FINISH _STATE && strm . avail _in !== 0 ) {
return err ( strm , Z _BUF _ERROR ) ;
}
/ * S t a r t a n e w b l o c k o r c o n t i n u e t h e c u r r e n t o n e .
* /
if ( strm . avail _in !== 0 || s . lookahead !== 0 ||
( flush !== Z _NO _FLUSH && s . status !== FINISH _STATE ) ) {
var bstate = ( s . strategy === Z _HUFFMAN _ONLY ) ? deflate _huff ( s , flush ) :
( s . strategy === Z _RLE ? deflate _rle ( s , flush ) :
configuration _table [ s . level ] . func ( s , flush ) ) ;
if ( bstate === BS _FINISH _STARTED || bstate === BS _FINISH _DONE ) {
s . status = FINISH _STATE ;
}
if ( bstate === BS _NEED _MORE || bstate === BS _FINISH _STARTED ) {
if ( strm . avail _out === 0 ) {
s . last _flush = - 1 ;
/* avoid BUF_ERROR next call, see above */
}
return Z _OK ;
/ * I f f l u s h ! = Z _ N O _ F L U S H & & a v a i l _ o u t = = 0 , t h e n e x t c a l l
* of deflate should use the same flush parameter to make sure
* that the flush is complete . So we don ' t have to output an
* empty block here , this will be done at next call . This also
* ensures that for a very small output buffer , we emit at most
* one empty block .
* /
}
if ( bstate === BS _BLOCK _DONE ) {
if ( flush === Z _PARTIAL _FLUSH ) {
trees . _tr _align ( s ) ;
}
else if ( flush !== Z _BLOCK ) { /* FULL_FLUSH or SYNC_FLUSH */
trees . _tr _stored _block ( s , 0 , 0 , false ) ;
/ * F o r a f u l l f l u s h , t h i s e m p t y b l o c k w i l l b e r e c o g n i z e d
* as a special marker by inflate _sync ( ) .
* /
if ( flush === Z _FULL _FLUSH ) {
/*** CLEAR_HASH(s); ***/ /* forget history */
zero ( s . head ) ; // Fill with NIL (= 0);
if ( s . lookahead === 0 ) {
s . strstart = 0 ;
s . block _start = 0 ;
s . insert = 0 ;
}
}
}
flush _pending ( strm ) ;
if ( strm . avail _out === 0 ) {
s . last _flush = - 1 ; /* avoid BUF_ERROR at next call, see above */
return Z _OK ;
}
}
}
//Assert(strm->avail_out > 0, "bug2");
//if (strm.avail_out <= 0) { throw new Error("bug2");}
if ( flush !== Z _FINISH ) { return Z _OK ; }
if ( s . wrap <= 0 ) { return Z _STREAM _END ; }
/* Write the trailer */
if ( s . wrap === 2 ) {
put _byte ( s , strm . adler & 0xff ) ;
put _byte ( s , ( strm . adler >> 8 ) & 0xff ) ;
put _byte ( s , ( strm . adler >> 16 ) & 0xff ) ;
put _byte ( s , ( strm . adler >> 24 ) & 0xff ) ;
put _byte ( s , strm . total _in & 0xff ) ;
put _byte ( s , ( strm . total _in >> 8 ) & 0xff ) ;
put _byte ( s , ( strm . total _in >> 16 ) & 0xff ) ;
put _byte ( s , ( strm . total _in >> 24 ) & 0xff ) ;
}
else
{
putShortMSB ( s , strm . adler >>> 16 ) ;
putShortMSB ( s , strm . adler & 0xffff ) ;
}
flush _pending ( strm ) ;
/ * I f a v a i l _ o u t i s z e r o , t h e a p p l i c a t i o n w i l l c a l l d e f l a t e a g a i n
* to flush the rest .
* /
if ( s . wrap > 0 ) { s . wrap = - s . wrap ; }
/* write the trailer only once! */
return s . pending !== 0 ? Z _OK : Z _STREAM _END ;
}
function deflateEnd ( strm ) {
var status ;
if ( ! strm /*== Z_NULL*/ || ! strm . state /*== Z_NULL*/ ) {
return Z _STREAM _ERROR ;
}
status = strm . state . status ;
if ( status !== INIT _STATE &&
status !== EXTRA _STATE &&
status !== NAME _STATE &&
status !== COMMENT _STATE &&
status !== HCRC _STATE &&
status !== BUSY _STATE &&
status !== FINISH _STATE
) {
return err ( strm , Z _STREAM _ERROR ) ;
}
strm . state = null ;
return status === BUSY _STATE ? err ( strm , Z _DATA _ERROR ) : Z _OK ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Initializes the compression dictionary from the given byte
* sequence without producing any compressed output .
* /
function deflateSetDictionary ( strm , dictionary ) {
var dictLength = dictionary . length ;
var s ;
var str , n ;
var wrap ;
var avail ;
var next ;
var input ;
var tmpDict ;
if ( ! strm /*== Z_NULL*/ || ! strm . state /*== Z_NULL*/ ) {
return Z _STREAM _ERROR ;
}
s = strm . state ;
wrap = s . wrap ;
if ( wrap === 2 || ( wrap === 1 && s . status !== INIT _STATE ) || s . lookahead ) {
return Z _STREAM _ERROR ;
}
/* when using zlib wrappers, compute Adler-32 for provided dictionary */
if ( wrap === 1 ) {
/* adler32(strm->adler, dictionary, dictLength); */
strm . adler = adler32 ( strm . adler , dictionary , dictLength , 0 ) ;
}
s . wrap = 0 ; /* avoid computing Adler-32 in read_buf */
/* if dictionary would fill window, just replace the history */
if ( dictLength >= s . w _size ) {
if ( wrap === 0 ) { /* already empty otherwise */
/*** CLEAR_HASH(s); ***/
zero ( s . head ) ; // Fill with NIL (= 0);
s . strstart = 0 ;
s . block _start = 0 ;
s . insert = 0 ;
}
/* use the tail */
// dictionary = dictionary.slice(dictLength - s.w_size);
tmpDict = new utils . Buf8 ( s . w _size ) ;
utils . arraySet ( tmpDict , dictionary , dictLength - s . w _size , s . w _size , 0 ) ;
dictionary = tmpDict ;
dictLength = s . w _size ;
}
/* insert dictionary into window and hash */
avail = strm . avail _in ;
next = strm . next _in ;
input = strm . input ;
strm . avail _in = dictLength ;
strm . next _in = 0 ;
strm . input = dictionary ;
fill _window ( s ) ;
while ( s . lookahead >= MIN _MATCH ) {
str = s . strstart ;
n = s . lookahead - ( MIN _MATCH - 1 ) ;
do {
/* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
s . ins _h = ( ( s . ins _h << s . hash _shift ) ^ s . window [ str + MIN _MATCH - 1 ] ) & s . hash _mask ;
s . prev [ str & s . w _mask ] = s . head [ s . ins _h ] ;
s . head [ s . ins _h ] = str ;
str ++ ;
} while ( -- n ) ;
s . strstart = str ;
s . lookahead = MIN _MATCH - 1 ;
fill _window ( s ) ;
}
s . strstart += s . lookahead ;
s . block _start = s . strstart ;
s . insert = s . lookahead ;
s . lookahead = 0 ;
s . match _length = s . prev _length = MIN _MATCH - 1 ;
s . match _available = 0 ;
strm . next _in = next ;
strm . input = input ;
strm . avail _in = avail ;
s . wrap = wrap ;
return Z _OK ;
}
exports . deflateInit = deflateInit ;
exports . deflateInit2 = deflateInit2 ;
exports . deflateReset = deflateReset ;
exports . deflateResetKeep = deflateResetKeep ;
exports . deflateSetHeader = deflateSetHeader ;
exports . deflate = deflate ;
exports . deflateEnd = deflateEnd ;
exports . deflateSetDictionary = deflateSetDictionary ;
exports . deflateInfo = 'pako deflate (from Nodeca project)' ;
/ * N o t i m p l e m e n t e d
exports . deflateBound = deflateBound ;
exports . deflateCopy = deflateCopy ;
exports . deflateParams = deflateParams ;
exports . deflatePending = deflatePending ;
exports . deflatePrime = deflatePrime ;
exports . deflateTune = deflateTune ;
* /
} , { "../utils/common" : 24 , "./adler32" : 26 , "./crc32" : 28 , "./messages" : 34 , "./trees" : 35 } ] , 30 : [ function ( require , module , exports ) {
'use strict' ;
function GZheader ( ) {
/* true if compressed data believed to be text */
this . text = 0 ;
/* modification time */
this . time = 0 ;
/* extra flags (not used when writing a gzip file) */
this . xflags = 0 ;
/* operating system */
this . os = 0 ;
/* pointer to extra field or Z_NULL if none */
this . extra = null ;
/* extra field length (valid if extra != Z_NULL) */
this . extra _len = 0 ; // Actually, we don't need it in JS,
// but leave for few code modifications
//
// Setup limits is not necessary because in js we should not preallocate memory
// for inflate use constant limit in 65536 bytes
//
/* space at extra (only when reading header) */
// this.extra_max = 0;
/* pointer to zero-terminated file name or Z_NULL */
this . name = '' ;
/* space at name (only when reading header) */
// this.name_max = 0;
/* pointer to zero-terminated comment or Z_NULL */
this . comment = '' ;
/* space at comment (only when reading header) */
// this.comm_max = 0;
/* true if there was or will be a header crc */
this . hcrc = 0 ;
/* true when done reading gzip header (not used when writing a gzip file) */
this . done = false ;
}
module . exports = GZheader ;
} , { } ] , 31 : [ function ( require , module , exports ) {
'use strict' ;
// See state defs from inflate.js
var BAD = 30 ; /* got a data error -- remain here until reset */
var TYPE = 12 ; /* i: waiting for type bits, including last-flag bit */
/ *
Decode literal , length , and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available , an end - of - block is encountered , or a data error is encountered .
When large enough input and output buffers are supplied to inflate ( ) , for
example , a 16 K input buffer and a 64 K output buffer , more than 95 % of the
inflate execution time is spent in this routine .
Entry assumptions :
state . mode === LEN
strm . avail _in >= 6
strm . avail _out >= 258
start >= strm . avail _out
state . bits < 8
On return , state . mode is one of :
LEN -- ran out of enough output space or enough available input
TYPE -- reached end of block code , inflate ( ) to interpret next block
BAD -- error in block data
Notes :
- The maximum input bits used by a length / distance pair is 15 bits for the
length code , 5 bits for the length extra , 15 bits for the distance code ,
and 13 bits for the distance extra . This totals 48 bits , or six bytes .
Therefore if strm . avail _in >= 6 , then there is enough input to avoid
checking for available input while decoding .
- The maximum bytes that a single length / distance pair can output is 258
bytes , which is the maximum length that can be coded . inflate _fast ( )
requires strm . avail _out >= 258 for each loop to avoid checking for
output space .
* /
module . exports = function inflate _fast ( strm , start ) {
var state ;
var _in ; /* local strm.input */
var last ; /* have enough input while in < last */
var _out ; /* local strm.output */
var beg ; /* inflate()'s initial strm.output */
var end ; /* while out < end, enough space available */
//#ifdef INFLATE_STRICT
var dmax ; /* maximum distance from zlib header */
//#endif
var wsize ; /* window size or zero if not using window */
var whave ; /* valid bytes in the window */
var wnext ; /* window write index */
// Use `s_window` instead `window`, avoid conflict with instrumentation tools
var s _window ; /* allocated sliding window, if wsize != 0 */
var hold ; /* local strm.hold */
var bits ; /* local strm.bits */
var lcode ; /* local strm.lencode */
var dcode ; /* local strm.distcode */
var lmask ; /* mask for first level of length codes */
var dmask ; /* mask for first level of distance codes */
var here ; /* retrieved table entry */
var op ; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
var len ; /* match length, unused bytes */
var dist ; /* match distance */
var from ; /* where to copy match from */
var from _source ;
var input , output ; // JS specific, because we have no pointers
/* copy state to local variables */
state = strm . state ;
//here = state.here;
_in = strm . next _in ;
input = strm . input ;
last = _in + ( strm . avail _in - 5 ) ;
_out = strm . next _out ;
output = strm . output ;
beg = _out - ( start - strm . avail _out ) ;
end = _out + ( strm . avail _out - 257 ) ;
//#ifdef INFLATE_STRICT
dmax = state . dmax ;
//#endif
wsize = state . wsize ;
whave = state . whave ;
wnext = state . wnext ;
s _window = state . window ;
hold = state . hold ;
bits = state . bits ;
lcode = state . lencode ;
dcode = state . distcode ;
lmask = ( 1 << state . lenbits ) - 1 ;
dmask = ( 1 << state . distbits ) - 1 ;
/ * d e c o d e l i t e r a l s a n d l e n g t h / d i s t a n c e s u n t i l e n d - o f - b l o c k o r n o t e n o u g h
input data or output space * /
top :
do {
if ( bits < 15 ) {
hold += input [ _in ++ ] << bits ;
bits += 8 ;
hold += input [ _in ++ ] << bits ;
bits += 8 ;
}
here = lcode [ hold & lmask ] ;
dolen :
for ( ; ; ) { // Goto emulation
op = here >>> 24 /*here.bits*/ ;
hold >>>= op ;
bits -= op ;
op = ( here >>> 16 ) & 0xff /*here.op*/ ;
if ( op === 0 ) { /* literal */
//Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
// "inflate: literal '%c'\n" :
// "inflate: literal 0x%02x\n", here.val));
output [ _out ++ ] = here & 0xffff /*here.val*/ ;
}
else if ( op & 16 ) { /* length base */
len = here & 0xffff /*here.val*/ ;
op &= 15 ; /* number of extra bits */
if ( op ) {
if ( bits < op ) {
hold += input [ _in ++ ] << bits ;
bits += 8 ;
}
len += hold & ( ( 1 << op ) - 1 ) ;
hold >>>= op ;
bits -= op ;
}
//Tracevv((stderr, "inflate: length %u\n", len));
if ( bits < 15 ) {
hold += input [ _in ++ ] << bits ;
bits += 8 ;
hold += input [ _in ++ ] << bits ;
bits += 8 ;
}
here = dcode [ hold & dmask ] ;
dodist :
for ( ; ; ) { // goto emulation
op = here >>> 24 /*here.bits*/ ;
hold >>>= op ;
bits -= op ;
op = ( here >>> 16 ) & 0xff /*here.op*/ ;
if ( op & 16 ) { /* distance base */
dist = here & 0xffff /*here.val*/ ;
op &= 15 ; /* number of extra bits */
if ( bits < op ) {
hold += input [ _in ++ ] << bits ;
bits += 8 ;
if ( bits < op ) {
hold += input [ _in ++ ] << bits ;
bits += 8 ;
}
}
dist += hold & ( ( 1 << op ) - 1 ) ;
//#ifdef INFLATE_STRICT
if ( dist > dmax ) {
strm . msg = 'invalid distance too far back' ;
state . mode = BAD ;
break top ;
}
//#endif
hold >>>= op ;
bits -= op ;
//Tracevv((stderr, "inflate: distance %u\n", dist));
op = _out - beg ; /* max distance in output */
if ( dist > op ) { /* see if copy from window */
op = dist - op ; /* distance back in window */
if ( op > whave ) {
if ( state . sane ) {
strm . msg = 'invalid distance too far back' ;
state . mode = BAD ;
break top ;
}
// (!) This block is disabled in zlib defailts,
// don't enable it for binary compatibility
//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
// if (len <= op - whave) {
// do {
// output[_out++] = 0;
// } while (--len);
// continue top;
// }
// len -= op - whave;
// do {
// output[_out++] = 0;
// } while (--op > whave);
// if (op === 0) {
// from = _out - dist;
// do {
// output[_out++] = output[from++];
// } while (--len);
// continue top;
// }
//#endif
}
from = 0 ; // window index
from _source = s _window ;
if ( wnext === 0 ) { /* very common case */
from += wsize - op ;
if ( op < len ) { /* some from window */
len -= op ;
do {
output [ _out ++ ] = s _window [ from ++ ] ;
} while ( -- op ) ;
from = _out - dist ; /* rest from output */
from _source = output ;
}
}
else if ( wnext < op ) { /* wrap around window */
from += wsize + wnext - op ;
op -= wnext ;
if ( op < len ) { /* some from end of window */
len -= op ;
do {
output [ _out ++ ] = s _window [ from ++ ] ;
} while ( -- op ) ;
from = 0 ;
if ( wnext < len ) { /* some from start of window */
op = wnext ;
len -= op ;
do {
output [ _out ++ ] = s _window [ from ++ ] ;
} while ( -- op ) ;
from = _out - dist ; /* rest from output */
from _source = output ;
}
}
}
else { /* contiguous in window */
from += wnext - op ;
if ( op < len ) { /* some from window */
len -= op ;
do {
output [ _out ++ ] = s _window [ from ++ ] ;
} while ( -- op ) ;
from = _out - dist ; /* rest from output */
from _source = output ;
}
}
while ( len > 2 ) {
output [ _out ++ ] = from _source [ from ++ ] ;
output [ _out ++ ] = from _source [ from ++ ] ;
output [ _out ++ ] = from _source [ from ++ ] ;
len -= 3 ;
}
if ( len ) {
output [ _out ++ ] = from _source [ from ++ ] ;
if ( len > 1 ) {
output [ _out ++ ] = from _source [ from ++ ] ;
}
}
}
else {
from = _out - dist ; /* copy direct from output */
do { /* minimum length is three */
output [ _out ++ ] = output [ from ++ ] ;
output [ _out ++ ] = output [ from ++ ] ;
output [ _out ++ ] = output [ from ++ ] ;
len -= 3 ;
} while ( len > 2 ) ;
if ( len ) {
output [ _out ++ ] = output [ from ++ ] ;
if ( len > 1 ) {
output [ _out ++ ] = output [ from ++ ] ;
}
}
}
}
else if ( ( op & 64 ) === 0 ) { /* 2nd level distance code */
here = dcode [ ( here & 0xffff ) /*here.val*/ + ( hold & ( ( 1 << op ) - 1 ) ) ] ;
continue dodist ;
}
else {
strm . msg = 'invalid distance code' ;
state . mode = BAD ;
break top ;
}
break ; // need to emulate goto via "continue"
}
}
else if ( ( op & 64 ) === 0 ) { /* 2nd level length code */
here = lcode [ ( here & 0xffff ) /*here.val*/ + ( hold & ( ( 1 << op ) - 1 ) ) ] ;
continue dolen ;
}
else if ( op & 32 ) { /* end-of-block */
//Tracevv((stderr, "inflate: end of block\n"));
state . mode = TYPE ;
break top ;
}
else {
strm . msg = 'invalid literal/length code' ;
state . mode = BAD ;
break top ;
}
break ; // need to emulate goto via "continue"
}
} while ( _in < last && _out < end ) ;
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3 ;
_in -= len ;
bits -= len << 3 ;
hold &= ( 1 << bits ) - 1 ;
/* update state and return */
strm . next _in = _in ;
strm . next _out = _out ;
strm . avail _in = ( _in < last ? 5 + ( last - _in ) : 5 - ( _in - last ) ) ;
strm . avail _out = ( _out < end ? 257 + ( end - _out ) : 257 - ( _out - end ) ) ;
state . hold = hold ;
state . bits = bits ;
return ;
} ;
} , { } ] , 32 : [ function ( require , module , exports ) {
'use strict' ;
var utils = require ( '../utils/common' ) ;
var adler32 = require ( './adler32' ) ;
var crc32 = require ( './crc32' ) ;
var inflate _fast = require ( './inffast' ) ;
var inflate _table = require ( './inftrees' ) ;
var CODES = 0 ;
var LENS = 1 ;
var DISTS = 2 ;
/* Public constants ==========================================================*/
/* ===========================================================================*/
/* Allowed flush values; see deflate() and inflate() below for details */
//var Z_NO_FLUSH = 0;
//var Z_PARTIAL_FLUSH = 1;
//var Z_SYNC_FLUSH = 2;
//var Z_FULL_FLUSH = 3;
var Z _FINISH = 4 ;
var Z _BLOCK = 5 ;
var Z _TREES = 6 ;
/ * R e t u r n c o d e s f o r t h e c o m p r e s s i o n / d e c o m p r e s s i o n f u n c t i o n s . N e g a t i v e v a l u e s
* are errors , positive values are used for special but normal events .
* /
var Z _OK = 0 ;
var Z _STREAM _END = 1 ;
var Z _NEED _DICT = 2 ;
//var Z_ERRNO = -1;
var Z _STREAM _ERROR = - 2 ;
var Z _DATA _ERROR = - 3 ;
var Z _MEM _ERROR = - 4 ;
var Z _BUF _ERROR = - 5 ;
//var Z_VERSION_ERROR = -6;
/* The deflate compression method */
var Z _DEFLATED = 8 ;
/* STATES ====================================================================*/
/* ===========================================================================*/
var HEAD = 1 ; /* i: waiting for magic header */
var FLAGS = 2 ; /* i: waiting for method and flags (gzip) */
var TIME = 3 ; /* i: waiting for modification time (gzip) */
var OS = 4 ; /* i: waiting for extra flags and operating system (gzip) */
var EXLEN = 5 ; /* i: waiting for extra length (gzip) */
var EXTRA = 6 ; /* i: waiting for extra bytes (gzip) */
var NAME = 7 ; /* i: waiting for end of file name (gzip) */
var COMMENT = 8 ; /* i: waiting for end of comment (gzip) */
var HCRC = 9 ; /* i: waiting for header crc (gzip) */
var DICTID = 10 ; /* i: waiting for dictionary check value */
var DICT = 11 ; /* waiting for inflateSetDictionary() call */
var TYPE = 12 ; /* i: waiting for type bits, including last-flag bit */
var TYPEDO = 13 ; /* i: same, but skip check to exit inflate on new block */
var STORED = 14 ; /* i: waiting for stored size (length and complement) */
var COPY _ = 15 ; /* i/o: same as COPY below, but only first time in */
var COPY = 16 ; /* i/o: waiting for input or output to copy stored block */
var TABLE = 17 ; /* i: waiting for dynamic block table lengths */
var LENLENS = 18 ; /* i: waiting for code length code lengths */
var CODELENS = 19 ; /* i: waiting for length/lit and distance code lengths */
var LEN _ = 20 ; /* i: same as LEN below, but only first time in */
var LEN = 21 ; /* i: waiting for length/lit/eob code */
var LENEXT = 22 ; /* i: waiting for length extra bits */
var DIST = 23 ; /* i: waiting for distance code */
var DISTEXT = 24 ; /* i: waiting for distance extra bits */
var MATCH = 25 ; /* o: waiting for output space to copy string */
var LIT = 26 ; /* o: waiting for output space to write literal */
var CHECK = 27 ; /* i: waiting for 32-bit check value */
var LENGTH = 28 ; /* i: waiting for 32-bit length (gzip) */
var DONE = 29 ; /* finished check, done -- remain here until reset */
var BAD = 30 ; /* got a data error -- remain here until reset */
var MEM = 31 ; /* got an inflate() memory error -- remain here until reset */
var SYNC = 32 ; /* looking for synchronization bytes to restart inflate() */
/* ===========================================================================*/
var ENOUGH _LENS = 852 ;
var ENOUGH _DISTS = 592 ;
//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);
var MAX _WBITS = 15 ;
/* 32K LZ77 window */
var DEF _WBITS = MAX _WBITS ;
function zswap32 ( q ) {
return ( ( ( q >>> 24 ) & 0xff ) +
( ( q >>> 8 ) & 0xff00 ) +
( ( q & 0xff00 ) << 8 ) +
( ( q & 0xff ) << 24 ) ) ;
}
function InflateState ( ) {
this . mode = 0 ; /* current inflate mode */
this . last = false ; /* true if processing last block */
this . wrap = 0 ; /* bit 0 true for zlib, bit 1 true for gzip */
this . havedict = false ; /* true if dictionary provided */
this . flags = 0 ; /* gzip header method and flags (0 if zlib) */
this . dmax = 0 ; /* zlib header max distance (INFLATE_STRICT) */
this . check = 0 ; /* protected copy of check value */
this . total = 0 ; /* protected copy of output count */
// TODO: may be {}
this . head = null ; /* where to save gzip header information */
/* sliding window */
this . wbits = 0 ; /* log base 2 of requested window size */
this . wsize = 0 ; /* window size or zero if not using window */
this . whave = 0 ; /* valid bytes in the window */
this . wnext = 0 ; /* window write index */
this . window = null ; /* allocated sliding window, if needed */
/* bit accumulator */
this . hold = 0 ; /* input bit accumulator */
this . bits = 0 ; /* number of bits in "in" */
/* for string and stored block copying */
this . length = 0 ; /* literal or length of data to copy */
this . offset = 0 ; /* distance back to copy string from */
/* for table and code decoding */
this . extra = 0 ; /* extra bits needed */
/* fixed and dynamic code tables */
this . lencode = null ; /* starting table for length/literal codes */
this . distcode = null ; /* starting table for distance codes */
this . lenbits = 0 ; /* index bits for lencode */
this . distbits = 0 ; /* index bits for distcode */
/* dynamic table building */
this . ncode = 0 ; /* number of code length code lengths */
this . nlen = 0 ; /* number of length code lengths */
this . ndist = 0 ; /* number of distance code lengths */
this . have = 0 ; /* number of code lengths in lens[] */
this . next = null ; /* next available space in codes[] */
this . lens = new utils . Buf16 ( 320 ) ; /* temporary storage for code lengths */
this . work = new utils . Buf16 ( 288 ) ; /* work area for code table building */
/ *
because we don ' t have pointers in js , we use lencode and distcode directly
as buffers so we don ' t need codes
* /
//this.codes = new utils.Buf32(ENOUGH); /* space for code tables */
this . lendyn = null ; /* dynamic table for length/literal codes (JS specific) */
this . distdyn = null ; /* dynamic table for distance codes (JS specific) */
this . sane = 0 ; /* if false, allow invalid distance too far */
this . back = 0 ; /* bits back of last unprocessed length/lit */
this . was = 0 ; /* initial length of match */
}
function inflateResetKeep ( strm ) {
var state ;
if ( ! strm || ! strm . state ) { return Z _STREAM _ERROR ; }
state = strm . state ;
strm . total _in = strm . total _out = state . total = 0 ;
strm . msg = '' ; /*Z_NULL*/
if ( state . wrap ) { /* to support ill-conceived Java test suite */
strm . adler = state . wrap & 1 ;
}
state . mode = HEAD ;
state . last = 0 ;
state . havedict = 0 ;
state . dmax = 32768 ;
state . head = null /*Z_NULL*/ ;
state . hold = 0 ;
state . bits = 0 ;
//state.lencode = state.distcode = state.next = state.codes;
state . lencode = state . lendyn = new utils . Buf32 ( ENOUGH _LENS ) ;
state . distcode = state . distdyn = new utils . Buf32 ( ENOUGH _DISTS ) ;
state . sane = 1 ;
state . back = - 1 ;
//Tracev((stderr, "inflate: reset\n"));
return Z _OK ;
}
function inflateReset ( strm ) {
var state ;
if ( ! strm || ! strm . state ) { return Z _STREAM _ERROR ; }
state = strm . state ;
state . wsize = 0 ;
state . whave = 0 ;
state . wnext = 0 ;
return inflateResetKeep ( strm ) ;
}
function inflateReset2 ( strm , windowBits ) {
var wrap ;
var state ;
/* get the state */
if ( ! strm || ! strm . state ) { return Z _STREAM _ERROR ; }
state = strm . state ;
/* extract wrap request from windowBits parameter */
if ( windowBits < 0 ) {
wrap = 0 ;
windowBits = - windowBits ;
}
else {
wrap = ( windowBits >> 4 ) + 1 ;
if ( windowBits < 48 ) {
windowBits &= 15 ;
}
}
/* set number of window bits, free window if different */
if ( windowBits && ( windowBits < 8 || windowBits > 15 ) ) {
return Z _STREAM _ERROR ;
}
if ( state . window !== null && state . wbits !== windowBits ) {
state . window = null ;
}
/* update state and reset the rest of it */
state . wrap = wrap ;
state . wbits = windowBits ;
return inflateReset ( strm ) ;
}
function inflateInit2 ( strm , windowBits ) {
var ret ;
var state ;
if ( ! strm ) { return Z _STREAM _ERROR ; }
//strm.msg = Z_NULL; /* in case we return an error */
state = new InflateState ( ) ;
//if (state === Z_NULL) return Z_MEM_ERROR;
//Tracev((stderr, "inflate: allocated\n"));
strm . state = state ;
state . window = null /*Z_NULL*/ ;
ret = inflateReset2 ( strm , windowBits ) ;
if ( ret !== Z _OK ) {
strm . state = null /*Z_NULL*/ ;
}
return ret ;
}
function inflateInit ( strm ) {
return inflateInit2 ( strm , DEF _WBITS ) ;
}
/ *
Return state with length and distance decoding tables and index sizes set to
fixed code decoding . Normally this returns fixed tables from inffixed . h .
If BUILDFIXED is defined , then instead this routine builds the tables the
first time it ' s called , and returns those tables the first time and
thereafter . This reduces the size of the code by about 2 K bytes , in
exchange for a little execution time . However , BUILDFIXED should not be
used for threaded applications , since the rewriting of the tables and virgin
may not be thread - safe .
* /
var virgin = true ;
var lenfix , distfix ; // We have no pointers in JS, so keep tables separate
function fixedtables ( state ) {
/* build fixed huffman tables if first call (may not be thread safe) */
if ( virgin ) {
var sym ;
lenfix = new utils . Buf32 ( 512 ) ;
distfix = new utils . Buf32 ( 32 ) ;
/* literal/length table */
sym = 0 ;
while ( sym < 144 ) { state . lens [ sym ++ ] = 8 ; }
while ( sym < 256 ) { state . lens [ sym ++ ] = 9 ; }
while ( sym < 280 ) { state . lens [ sym ++ ] = 7 ; }
while ( sym < 288 ) { state . lens [ sym ++ ] = 8 ; }
inflate _table ( LENS , state . lens , 0 , 288 , lenfix , 0 , state . work , { bits : 9 } ) ;
/* distance table */
sym = 0 ;
while ( sym < 32 ) { state . lens [ sym ++ ] = 5 ; }
inflate _table ( DISTS , state . lens , 0 , 32 , distfix , 0 , state . work , { bits : 5 } ) ;
/* do this just once */
virgin = false ;
}
state . lencode = lenfix ;
state . lenbits = 9 ;
state . distcode = distfix ;
state . distbits = 5 ;
}
/ *
Update the window with the last wsize ( normally 32 K ) bytes written before
returning . If window does not exist yet , create it . This is only called
when a window is already in use , or when output has been written during this
inflate call , but the end of the deflate stream has not been reached yet .
It is also called to create a window for dictionary data when a dictionary
is loaded .
Providing output buffers larger than 32 K to inflate ( ) should provide a speed
advantage , since only the last 32 K of output is copied to the sliding window
upon return from inflate ( ) , and since all distances after the first 32 K of
output will fall in the output data , making match copies simpler and faster .
The advantage may be dependent on the size of the processor ' s data caches .
* /
function updatewindow ( strm , src , end , copy ) {
var dist ;
var state = strm . state ;
/* if it hasn't been done already, allocate space for the window */
if ( state . window === null ) {
state . wsize = 1 << state . wbits ;
state . wnext = 0 ;
state . whave = 0 ;
state . window = new utils . Buf8 ( state . wsize ) ;
}
/* copy state->wsize or less output bytes into the circular window */
if ( copy >= state . wsize ) {
utils . arraySet ( state . window , src , end - state . wsize , state . wsize , 0 ) ;
state . wnext = 0 ;
state . whave = state . wsize ;
}
else {
dist = state . wsize - state . wnext ;
if ( dist > copy ) {
dist = copy ;
}
//zmemcpy(state->window + state->wnext, end - copy, dist);
utils . arraySet ( state . window , src , end - copy , dist , state . wnext ) ;
copy -= dist ;
if ( copy ) {
//zmemcpy(state->window, end - copy, copy);
utils . arraySet ( state . window , src , end - copy , copy , 0 ) ;
state . wnext = copy ;
state . whave = state . wsize ;
}
else {
state . wnext += dist ;
if ( state . wnext === state . wsize ) { state . wnext = 0 ; }
if ( state . whave < state . wsize ) { state . whave += dist ; }
}
}
return 0 ;
}
function inflate ( strm , flush ) {
var state ;
var input , output ; // input/output buffers
var next ; /* next input INDEX */
var put ; /* next output INDEX */
var have , left ; /* available input and output */
var hold ; /* bit buffer */
var bits ; /* bits in bit buffer */
var _in , _out ; /* save starting available input and output */
var copy ; /* number of stored or match bytes to copy */
var from ; /* where to copy match bytes from */
var from _source ;
var here = 0 ; /* current decoding table entry */
var here _bits , here _op , here _val ; // paked "here" denormalized (JS specific)
//var last; /* parent table entry */
var last _bits , last _op , last _val ; // paked "last" denormalized (JS specific)
var len ; /* length to copy for repeats, bits to drop */
var ret ; /* return code */
var hbuf = new utils . Buf8 ( 4 ) ; /* buffer for gzip header crc calculation */
var opts ;
var n ; // temporary var for NEED_BITS
var order = /* permutation of code lengths */
[ 16 , 17 , 18 , 0 , 8 , 7 , 9 , 6 , 10 , 5 , 11 , 4 , 12 , 3 , 13 , 2 , 14 , 1 , 15 ] ;
if ( ! strm || ! strm . state || ! strm . output ||
( ! strm . input && strm . avail _in !== 0 ) ) {
return Z _STREAM _ERROR ;
}
state = strm . state ;
if ( state . mode === TYPE ) { state . mode = TYPEDO ; } /* skip check */
//--- LOAD() ---
put = strm . next _out ;
output = strm . output ;
left = strm . avail _out ;
next = strm . next _in ;
input = strm . input ;
have = strm . avail _in ;
hold = state . hold ;
bits = state . bits ;
//---
_in = have ;
_out = left ;
ret = Z _OK ;
inf _leave : // goto emulation
for ( ; ; ) {
switch ( state . mode ) {
case HEAD :
if ( state . wrap === 0 ) {
state . mode = TYPEDO ;
break ;
}
//=== NEEDBITS(16);
while ( bits < 16 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( ( state . wrap & 2 ) && hold === 0x8b1f ) { /* gzip header */
state . check = 0 /*crc32(0L, Z_NULL, 0)*/ ;
//=== CRC2(state.check, hold);
hbuf [ 0 ] = hold & 0xff ;
hbuf [ 1 ] = ( hold >>> 8 ) & 0xff ;
state . check = crc32 ( state . check , hbuf , 2 , 0 ) ;
//===//
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = FLAGS ;
break ;
}
state . flags = 0 ; /* expect zlib header */
if ( state . head ) {
state . head . done = false ;
}
if ( ! ( state . wrap & 1 ) || /* check if zlib header allowed */
( ( ( hold & 0xff ) /*BITS(8)*/ << 8 ) + ( hold >> 8 ) ) % 31 ) {
strm . msg = 'incorrect header check' ;
state . mode = BAD ;
break ;
}
if ( ( hold & 0x0f ) /*BITS(4)*/ !== Z _DEFLATED ) {
strm . msg = 'unknown compression method' ;
state . mode = BAD ;
break ;
}
//--- DROPBITS(4) ---//
hold >>>= 4 ;
bits -= 4 ;
//---//
len = ( hold & 0x0f ) /*BITS(4)*/ + 8 ;
if ( state . wbits === 0 ) {
state . wbits = len ;
}
else if ( len > state . wbits ) {
strm . msg = 'invalid window size' ;
state . mode = BAD ;
break ;
}
state . dmax = 1 << len ;
//Tracev((stderr, "inflate: zlib header ok\n"));
strm . adler = state . check = 1 /*adler32(0L, Z_NULL, 0)*/ ;
state . mode = hold & 0x200 ? DICTID : TYPE ;
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
break ;
case FLAGS :
//=== NEEDBITS(16); */
while ( bits < 16 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . flags = hold ;
if ( ( state . flags & 0xff ) !== Z _DEFLATED ) {
strm . msg = 'unknown compression method' ;
state . mode = BAD ;
break ;
}
if ( state . flags & 0xe000 ) {
strm . msg = 'unknown header flags set' ;
state . mode = BAD ;
break ;
}
if ( state . head ) {
state . head . text = ( ( hold >> 8 ) & 1 ) ;
}
if ( state . flags & 0x0200 ) {
//=== CRC2(state.check, hold);
hbuf [ 0 ] = hold & 0xff ;
hbuf [ 1 ] = ( hold >>> 8 ) & 0xff ;
state . check = crc32 ( state . check , hbuf , 2 , 0 ) ;
//===//
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = TIME ;
/* falls through */
case TIME :
//=== NEEDBITS(32); */
while ( bits < 32 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( state . head ) {
state . head . time = hold ;
}
if ( state . flags & 0x0200 ) {
//=== CRC4(state.check, hold)
hbuf [ 0 ] = hold & 0xff ;
hbuf [ 1 ] = ( hold >>> 8 ) & 0xff ;
hbuf [ 2 ] = ( hold >>> 16 ) & 0xff ;
hbuf [ 3 ] = ( hold >>> 24 ) & 0xff ;
state . check = crc32 ( state . check , hbuf , 4 , 0 ) ;
//===
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = OS ;
/* falls through */
case OS :
//=== NEEDBITS(16); */
while ( bits < 16 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( state . head ) {
state . head . xflags = ( hold & 0xff ) ;
state . head . os = ( hold >> 8 ) ;
}
if ( state . flags & 0x0200 ) {
//=== CRC2(state.check, hold);
hbuf [ 0 ] = hold & 0xff ;
hbuf [ 1 ] = ( hold >>> 8 ) & 0xff ;
state . check = crc32 ( state . check , hbuf , 2 , 0 ) ;
//===//
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = EXLEN ;
/* falls through */
case EXLEN :
if ( state . flags & 0x0400 ) {
//=== NEEDBITS(16); */
while ( bits < 16 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . length = hold ;
if ( state . head ) {
state . head . extra _len = hold ;
}
if ( state . flags & 0x0200 ) {
//=== CRC2(state.check, hold);
hbuf [ 0 ] = hold & 0xff ;
hbuf [ 1 ] = ( hold >>> 8 ) & 0xff ;
state . check = crc32 ( state . check , hbuf , 2 , 0 ) ;
//===//
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
}
else if ( state . head ) {
state . head . extra = null /*Z_NULL*/ ;
}
state . mode = EXTRA ;
/* falls through */
case EXTRA :
if ( state . flags & 0x0400 ) {
copy = state . length ;
if ( copy > have ) { copy = have ; }
if ( copy ) {
if ( state . head ) {
len = state . head . extra _len - state . length ;
if ( ! state . head . extra ) {
// Use untyped array for more conveniend processing later
state . head . extra = new Array ( state . head . extra _len ) ;
}
utils . arraySet (
state . head . extra ,
input ,
next ,
// extra field is limited to 65536 bytes
// - no need for additional size check
copy ,
/*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/
len
) ;
//zmemcpy(state.head.extra + len, next,
// len + copy > state.head.extra_max ?
// state.head.extra_max - len : copy);
}
if ( state . flags & 0x0200 ) {
state . check = crc32 ( state . check , input , copy , next ) ;
}
have -= copy ;
next += copy ;
state . length -= copy ;
}
if ( state . length ) { break inf _leave ; }
}
state . length = 0 ;
state . mode = NAME ;
/* falls through */
case NAME :
if ( state . flags & 0x0800 ) {
if ( have === 0 ) { break inf _leave ; }
copy = 0 ;
do {
// TODO: 2 or 1 bytes?
len = input [ next + copy ++ ] ;
/* use constant limit because in js we should not preallocate memory */
if ( state . head && len &&
( state . length < 65536 /*state.head.name_max*/ ) ) {
state . head . name += String . fromCharCode ( len ) ;
}
} while ( len && copy < have ) ;
if ( state . flags & 0x0200 ) {
state . check = crc32 ( state . check , input , copy , next ) ;
}
have -= copy ;
next += copy ;
if ( len ) { break inf _leave ; }
}
else if ( state . head ) {
state . head . name = null ;
}
state . length = 0 ;
state . mode = COMMENT ;
/* falls through */
case COMMENT :
if ( state . flags & 0x1000 ) {
if ( have === 0 ) { break inf _leave ; }
copy = 0 ;
do {
len = input [ next + copy ++ ] ;
/* use constant limit because in js we should not preallocate memory */
if ( state . head && len &&
( state . length < 65536 /*state.head.comm_max*/ ) ) {
state . head . comment += String . fromCharCode ( len ) ;
}
} while ( len && copy < have ) ;
if ( state . flags & 0x0200 ) {
state . check = crc32 ( state . check , input , copy , next ) ;
}
have -= copy ;
next += copy ;
if ( len ) { break inf _leave ; }
}
else if ( state . head ) {
state . head . comment = null ;
}
state . mode = HCRC ;
/* falls through */
case HCRC :
if ( state . flags & 0x0200 ) {
//=== NEEDBITS(16); */
while ( bits < 16 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( hold !== ( state . check & 0xffff ) ) {
strm . msg = 'header crc mismatch' ;
state . mode = BAD ;
break ;
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
}
if ( state . head ) {
state . head . hcrc = ( ( state . flags >> 9 ) & 1 ) ;
state . head . done = true ;
}
strm . adler = state . check = 0 ;
state . mode = TYPE ;
break ;
case DICTID :
//=== NEEDBITS(32); */
while ( bits < 32 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
strm . adler = state . check = zswap32 ( hold ) ;
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = DICT ;
/* falls through */
case DICT :
if ( state . havedict === 0 ) {
//--- RESTORE() ---
strm . next _out = put ;
strm . avail _out = left ;
strm . next _in = next ;
strm . avail _in = have ;
state . hold = hold ;
state . bits = bits ;
//---
return Z _NEED _DICT ;
}
strm . adler = state . check = 1 /*adler32(0L, Z_NULL, 0)*/ ;
state . mode = TYPE ;
/* falls through */
case TYPE :
if ( flush === Z _BLOCK || flush === Z _TREES ) { break inf _leave ; }
/* falls through */
case TYPEDO :
if ( state . last ) {
//--- BYTEBITS() ---//
hold >>>= bits & 7 ;
bits -= bits & 7 ;
//---//
state . mode = CHECK ;
break ;
}
//=== NEEDBITS(3); */
while ( bits < 3 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . last = ( hold & 0x01 ) /*BITS(1)*/ ;
//--- DROPBITS(1) ---//
hold >>>= 1 ;
bits -= 1 ;
//---//
switch ( ( hold & 0x03 ) /*BITS(2)*/ ) {
case 0 : /* stored block */
//Tracev((stderr, "inflate: stored block%s\n",
// state.last ? " (last)" : ""));
state . mode = STORED ;
break ;
case 1 : /* fixed block */
fixedtables ( state ) ;
//Tracev((stderr, "inflate: fixed codes block%s\n",
// state.last ? " (last)" : ""));
state . mode = LEN _ ; /* decode codes */
if ( flush === Z _TREES ) {
//--- DROPBITS(2) ---//
hold >>>= 2 ;
bits -= 2 ;
//---//
break inf _leave ;
}
break ;
case 2 : /* dynamic block */
//Tracev((stderr, "inflate: dynamic codes block%s\n",
// state.last ? " (last)" : ""));
state . mode = TABLE ;
break ;
case 3 :
strm . msg = 'invalid block type' ;
state . mode = BAD ;
}
//--- DROPBITS(2) ---//
hold >>>= 2 ;
bits -= 2 ;
//---//
break ;
case STORED :
//--- BYTEBITS() ---// /* go to byte boundary */
hold >>>= bits & 7 ;
bits -= bits & 7 ;
//---//
//=== NEEDBITS(32); */
while ( bits < 32 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( ( hold & 0xffff ) !== ( ( hold >>> 16 ) ^ 0xffff ) ) {
strm . msg = 'invalid stored block lengths' ;
state . mode = BAD ;
break ;
}
state . length = hold & 0xffff ;
//Tracev((stderr, "inflate: stored length %u\n",
// state.length));
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
state . mode = COPY _ ;
if ( flush === Z _TREES ) { break inf _leave ; }
/* falls through */
case COPY _ :
state . mode = COPY ;
/* falls through */
case COPY :
copy = state . length ;
if ( copy ) {
if ( copy > have ) { copy = have ; }
if ( copy > left ) { copy = left ; }
if ( copy === 0 ) { break inf _leave ; }
//--- zmemcpy(put, next, copy); ---
utils . arraySet ( output , input , next , copy , put ) ;
//---//
have -= copy ;
next += copy ;
left -= copy ;
put += copy ;
state . length -= copy ;
break ;
}
//Tracev((stderr, "inflate: stored end\n"));
state . mode = TYPE ;
break ;
case TABLE :
//=== NEEDBITS(14); */
while ( bits < 14 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . nlen = ( hold & 0x1f ) /*BITS(5)*/ + 257 ;
//--- DROPBITS(5) ---//
hold >>>= 5 ;
bits -= 5 ;
//---//
state . ndist = ( hold & 0x1f ) /*BITS(5)*/ + 1 ;
//--- DROPBITS(5) ---//
hold >>>= 5 ;
bits -= 5 ;
//---//
state . ncode = ( hold & 0x0f ) /*BITS(4)*/ + 4 ;
//--- DROPBITS(4) ---//
hold >>>= 4 ;
bits -= 4 ;
//---//
//#ifndef PKZIP_BUG_WORKAROUND
if ( state . nlen > 286 || state . ndist > 30 ) {
strm . msg = 'too many length or distance symbols' ;
state . mode = BAD ;
break ;
}
//#endif
//Tracev((stderr, "inflate: table sizes ok\n"));
state . have = 0 ;
state . mode = LENLENS ;
/* falls through */
case LENLENS :
while ( state . have < state . ncode ) {
//=== NEEDBITS(3);
while ( bits < 3 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . lens [ order [ state . have ++ ] ] = ( hold & 0x07 ) ; //BITS(3);
//--- DROPBITS(3) ---//
hold >>>= 3 ;
bits -= 3 ;
//---//
}
while ( state . have < 19 ) {
state . lens [ order [ state . have ++ ] ] = 0 ;
}
// We have separate tables & no pointers. 2 commented lines below not needed.
//state.next = state.codes;
//state.lencode = state.next;
// Switch to use dynamic table
state . lencode = state . lendyn ;
state . lenbits = 7 ;
opts = { bits : state . lenbits } ;
ret = inflate _table ( CODES , state . lens , 0 , 19 , state . lencode , 0 , state . work , opts ) ;
state . lenbits = opts . bits ;
if ( ret ) {
strm . msg = 'invalid code lengths set' ;
state . mode = BAD ;
break ;
}
//Tracev((stderr, "inflate: code lengths ok\n"));
state . have = 0 ;
state . mode = CODELENS ;
/* falls through */
case CODELENS :
while ( state . have < state . nlen + state . ndist ) {
for ( ; ; ) {
here = state . lencode [ hold & ( ( 1 << state . lenbits ) - 1 ) ] ; /*BITS(state.lenbits)*/
here _bits = here >>> 24 ;
here _op = ( here >>> 16 ) & 0xff ;
here _val = here & 0xffff ;
if ( ( here _bits ) <= bits ) { break ; }
//--- PULLBYTE() ---//
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
//---//
}
if ( here _val < 16 ) {
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
state . lens [ state . have ++ ] = here _val ;
}
else {
if ( here _val === 16 ) {
//=== NEEDBITS(here.bits + 2);
n = here _bits + 2 ;
while ( bits < n ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
if ( state . have === 0 ) {
strm . msg = 'invalid bit length repeat' ;
state . mode = BAD ;
break ;
}
len = state . lens [ state . have - 1 ] ;
copy = 3 + ( hold & 0x03 ) ; //BITS(2);
//--- DROPBITS(2) ---//
hold >>>= 2 ;
bits -= 2 ;
//---//
}
else if ( here _val === 17 ) {
//=== NEEDBITS(here.bits + 3);
n = here _bits + 3 ;
while ( bits < n ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
len = 0 ;
copy = 3 + ( hold & 0x07 ) ; //BITS(3);
//--- DROPBITS(3) ---//
hold >>>= 3 ;
bits -= 3 ;
//---//
}
else {
//=== NEEDBITS(here.bits + 7);
n = here _bits + 7 ;
while ( bits < n ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
len = 0 ;
copy = 11 + ( hold & 0x7f ) ; //BITS(7);
//--- DROPBITS(7) ---//
hold >>>= 7 ;
bits -= 7 ;
//---//
}
if ( state . have + copy > state . nlen + state . ndist ) {
strm . msg = 'invalid bit length repeat' ;
state . mode = BAD ;
break ;
}
while ( copy -- ) {
state . lens [ state . have ++ ] = len ;
}
}
}
/* handle error breaks in while */
if ( state . mode === BAD ) { break ; }
/* check for end-of-block code (better have one) */
if ( state . lens [ 256 ] === 0 ) {
strm . msg = 'invalid code -- missing end-of-block' ;
state . mode = BAD ;
break ;
}
/ * b u i l d c o d e t a b l e s - - n o t e : d o n o t c h a n g e t h e l e n b i t s o r d i s t b i t s
values here ( 9 and 6 ) without reading the comments in inftrees . h
concerning the ENOUGH constants , which depend on those values * /
state . lenbits = 9 ;
opts = { bits : state . lenbits } ;
ret = inflate _table ( LENS , state . lens , 0 , state . nlen , state . lencode , 0 , state . work , opts ) ;
// We have separate tables & no pointers. 2 commented lines below not needed.
// state.next_index = opts.table_index;
state . lenbits = opts . bits ;
// state.lencode = state.next;
if ( ret ) {
strm . msg = 'invalid literal/lengths set' ;
state . mode = BAD ;
break ;
}
state . distbits = 6 ;
//state.distcode.copy(state.codes);
// Switch to use dynamic table
state . distcode = state . distdyn ;
opts = { bits : state . distbits } ;
ret = inflate _table ( DISTS , state . lens , state . nlen , state . ndist , state . distcode , 0 , state . work , opts ) ;
// We have separate tables & no pointers. 2 commented lines below not needed.
// state.next_index = opts.table_index;
state . distbits = opts . bits ;
// state.distcode = state.next;
if ( ret ) {
strm . msg = 'invalid distances set' ;
state . mode = BAD ;
break ;
}
//Tracev((stderr, 'inflate: codes ok\n'));
state . mode = LEN _ ;
if ( flush === Z _TREES ) { break inf _leave ; }
/* falls through */
case LEN _ :
state . mode = LEN ;
/* falls through */
case LEN :
if ( have >= 6 && left >= 258 ) {
//--- RESTORE() ---
strm . next _out = put ;
strm . avail _out = left ;
strm . next _in = next ;
strm . avail _in = have ;
state . hold = hold ;
state . bits = bits ;
//---
inflate _fast ( strm , _out ) ;
//--- LOAD() ---
put = strm . next _out ;
output = strm . output ;
left = strm . avail _out ;
next = strm . next _in ;
input = strm . input ;
have = strm . avail _in ;
hold = state . hold ;
bits = state . bits ;
//---
if ( state . mode === TYPE ) {
state . back = - 1 ;
}
break ;
}
state . back = 0 ;
for ( ; ; ) {
here = state . lencode [ hold & ( ( 1 << state . lenbits ) - 1 ) ] ; /*BITS(state.lenbits)*/
here _bits = here >>> 24 ;
here _op = ( here >>> 16 ) & 0xff ;
here _val = here & 0xffff ;
if ( here _bits <= bits ) { break ; }
//--- PULLBYTE() ---//
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
//---//
}
if ( here _op && ( here _op & 0xf0 ) === 0 ) {
last _bits = here _bits ;
last _op = here _op ;
last _val = here _val ;
for ( ; ; ) {
here = state . lencode [ last _val +
( ( hold & ( ( 1 << ( last _bits + last _op ) ) - 1 ) ) /*BITS(last.bits + last.op)*/ >> last _bits ) ] ;
here _bits = here >>> 24 ;
here _op = ( here >>> 16 ) & 0xff ;
here _val = here & 0xffff ;
if ( ( last _bits + here _bits ) <= bits ) { break ; }
//--- PULLBYTE() ---//
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
//---//
}
//--- DROPBITS(last.bits) ---//
hold >>>= last _bits ;
bits -= last _bits ;
//---//
state . back += last _bits ;
}
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
state . back += here _bits ;
state . length = here _val ;
if ( here _op === 0 ) {
//Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
// "inflate: literal '%c'\n" :
// "inflate: literal 0x%02x\n", here.val));
state . mode = LIT ;
break ;
}
if ( here _op & 32 ) {
//Tracevv((stderr, "inflate: end of block\n"));
state . back = - 1 ;
state . mode = TYPE ;
break ;
}
if ( here _op & 64 ) {
strm . msg = 'invalid literal/length code' ;
state . mode = BAD ;
break ;
}
state . extra = here _op & 15 ;
state . mode = LENEXT ;
/* falls through */
case LENEXT :
if ( state . extra ) {
//=== NEEDBITS(state.extra);
n = state . extra ;
while ( bits < n ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . length += hold & ( ( 1 << state . extra ) - 1 ) /*BITS(state.extra)*/ ;
//--- DROPBITS(state.extra) ---//
hold >>>= state . extra ;
bits -= state . extra ;
//---//
state . back += state . extra ;
}
//Tracevv((stderr, "inflate: length %u\n", state.length));
state . was = state . length ;
state . mode = DIST ;
/* falls through */
case DIST :
for ( ; ; ) {
here = state . distcode [ hold & ( ( 1 << state . distbits ) - 1 ) ] ; /*BITS(state.distbits)*/
here _bits = here >>> 24 ;
here _op = ( here >>> 16 ) & 0xff ;
here _val = here & 0xffff ;
if ( ( here _bits ) <= bits ) { break ; }
//--- PULLBYTE() ---//
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
//---//
}
if ( ( here _op & 0xf0 ) === 0 ) {
last _bits = here _bits ;
last _op = here _op ;
last _val = here _val ;
for ( ; ; ) {
here = state . distcode [ last _val +
( ( hold & ( ( 1 << ( last _bits + last _op ) ) - 1 ) ) /*BITS(last.bits + last.op)*/ >> last _bits ) ] ;
here _bits = here >>> 24 ;
here _op = ( here >>> 16 ) & 0xff ;
here _val = here & 0xffff ;
if ( ( last _bits + here _bits ) <= bits ) { break ; }
//--- PULLBYTE() ---//
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
//---//
}
//--- DROPBITS(last.bits) ---//
hold >>>= last _bits ;
bits -= last _bits ;
//---//
state . back += last _bits ;
}
//--- DROPBITS(here.bits) ---//
hold >>>= here _bits ;
bits -= here _bits ;
//---//
state . back += here _bits ;
if ( here _op & 64 ) {
strm . msg = 'invalid distance code' ;
state . mode = BAD ;
break ;
}
state . offset = here _val ;
state . extra = ( here _op ) & 15 ;
state . mode = DISTEXT ;
/* falls through */
case DISTEXT :
if ( state . extra ) {
//=== NEEDBITS(state.extra);
n = state . extra ;
while ( bits < n ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
state . offset += hold & ( ( 1 << state . extra ) - 1 ) /*BITS(state.extra)*/ ;
//--- DROPBITS(state.extra) ---//
hold >>>= state . extra ;
bits -= state . extra ;
//---//
state . back += state . extra ;
}
//#ifdef INFLATE_STRICT
if ( state . offset > state . dmax ) {
strm . msg = 'invalid distance too far back' ;
state . mode = BAD ;
break ;
}
//#endif
//Tracevv((stderr, "inflate: distance %u\n", state.offset));
state . mode = MATCH ;
/* falls through */
case MATCH :
if ( left === 0 ) { break inf _leave ; }
copy = _out - left ;
if ( state . offset > copy ) { /* copy from window */
copy = state . offset - copy ;
if ( copy > state . whave ) {
if ( state . sane ) {
strm . msg = 'invalid distance too far back' ;
state . mode = BAD ;
break ;
}
// (!) This block is disabled in zlib defailts,
// don't enable it for binary compatibility
//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
// Trace((stderr, "inflate.c too far\n"));
// copy -= state.whave;
// if (copy > state.length) { copy = state.length; }
// if (copy > left) { copy = left; }
// left -= copy;
// state.length -= copy;
// do {
// output[put++] = 0;
// } while (--copy);
// if (state.length === 0) { state.mode = LEN; }
// break;
//#endif
}
if ( copy > state . wnext ) {
copy -= state . wnext ;
from = state . wsize - copy ;
}
else {
from = state . wnext - copy ;
}
if ( copy > state . length ) { copy = state . length ; }
from _source = state . window ;
}
else { /* copy from output */
from _source = output ;
from = put - state . offset ;
copy = state . length ;
}
if ( copy > left ) { copy = left ; }
left -= copy ;
state . length -= copy ;
do {
output [ put ++ ] = from _source [ from ++ ] ;
} while ( -- copy ) ;
if ( state . length === 0 ) { state . mode = LEN ; }
break ;
case LIT :
if ( left === 0 ) { break inf _leave ; }
output [ put ++ ] = state . length ;
left -- ;
state . mode = LEN ;
break ;
case CHECK :
if ( state . wrap ) {
//=== NEEDBITS(32);
while ( bits < 32 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
// Use '|' insdead of '+' to make sure that result is signed
hold |= input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
_out -= left ;
strm . total _out += _out ;
state . total += _out ;
if ( _out ) {
strm . adler = state . check =
/*UPDATE(state.check, put - _out, _out);*/
( state . flags ? crc32 ( state . check , output , _out , put - _out ) : adler32 ( state . check , output , _out , put - _out ) ) ;
}
_out = left ;
// NB: crc32 stored as signed 32-bit int, zswap32 returns signed too
if ( ( state . flags ? hold : zswap32 ( hold ) ) !== state . check ) {
strm . msg = 'incorrect data check' ;
state . mode = BAD ;
break ;
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
//Tracev((stderr, "inflate: check matches trailer\n"));
}
state . mode = LENGTH ;
/* falls through */
case LENGTH :
if ( state . wrap && state . flags ) {
//=== NEEDBITS(32);
while ( bits < 32 ) {
if ( have === 0 ) { break inf _leave ; }
have -- ;
hold += input [ next ++ ] << bits ;
bits += 8 ;
}
//===//
if ( hold !== ( state . total & 0xffffffff ) ) {
strm . msg = 'incorrect length check' ;
state . mode = BAD ;
break ;
}
//=== INITBITS();
hold = 0 ;
bits = 0 ;
//===//
//Tracev((stderr, "inflate: length matches trailer\n"));
}
state . mode = DONE ;
/* falls through */
case DONE :
ret = Z _STREAM _END ;
break inf _leave ;
case BAD :
ret = Z _DATA _ERROR ;
break inf _leave ;
case MEM :
return Z _MEM _ERROR ;
case SYNC :
/* falls through */
default :
return Z _STREAM _ERROR ;
}
}
// inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave"
/ *
Return from inflate ( ) , updating the total counts and the check value .
If there was no progress during the inflate ( ) call , return a buffer
error . Call updatewindow ( ) to create and / or update the window state .
Note : a memory error from inflate ( ) is non - recoverable .
* /
//--- RESTORE() ---
strm . next _out = put ;
strm . avail _out = left ;
strm . next _in = next ;
strm . avail _in = have ;
state . hold = hold ;
state . bits = bits ;
//---
if ( state . wsize || ( _out !== strm . avail _out && state . mode < BAD &&
( state . mode < CHECK || flush !== Z _FINISH ) ) ) {
if ( updatewindow ( strm , strm . output , strm . next _out , _out - strm . avail _out ) ) {
state . mode = MEM ;
return Z _MEM _ERROR ;
}
}
_in -= strm . avail _in ;
_out -= strm . avail _out ;
strm . total _in += _in ;
strm . total _out += _out ;
state . total += _out ;
if ( state . wrap && _out ) {
strm . adler = state . check = /*UPDATE(state.check, strm.next_out - _out, _out);*/
( state . flags ? crc32 ( state . check , output , _out , strm . next _out - _out ) : adler32 ( state . check , output , _out , strm . next _out - _out ) ) ;
}
strm . data _type = state . bits + ( state . last ? 64 : 0 ) +
( state . mode === TYPE ? 128 : 0 ) +
( state . mode === LEN _ || state . mode === COPY _ ? 256 : 0 ) ;
if ( ( ( _in === 0 && _out === 0 ) || flush === Z _FINISH ) && ret === Z _OK ) {
ret = Z _BUF _ERROR ;
}
return ret ;
}
function inflateEnd ( strm ) {
if ( ! strm || ! strm . state /*|| strm->zfree == (free_func)0*/ ) {
return Z _STREAM _ERROR ;
}
var state = strm . state ;
if ( state . window ) {
state . window = null ;
}
strm . state = null ;
return Z _OK ;
}
function inflateGetHeader ( strm , head ) {
var state ;
/* check state */
if ( ! strm || ! strm . state ) { return Z _STREAM _ERROR ; }
state = strm . state ;
if ( ( state . wrap & 2 ) === 0 ) { return Z _STREAM _ERROR ; }
/* save header structure */
state . head = head ;
head . done = false ;
return Z _OK ;
}
function inflateSetDictionary ( strm , dictionary ) {
var dictLength = dictionary . length ;
var state ;
var dictid ;
var ret ;
/* check state */
if ( ! strm /* == Z_NULL */ || ! strm . state /* == Z_NULL */ ) { return Z _STREAM _ERROR ; }
state = strm . state ;
if ( state . wrap !== 0 && state . mode !== DICT ) {
return Z _STREAM _ERROR ;
}
/* check for correct dictionary identifier */
if ( state . mode === DICT ) {
dictid = 1 ; /* adler32(0, null, 0)*/
/* dictid = adler32(dictid, dictionary, dictLength); */
dictid = adler32 ( dictid , dictionary , dictLength , 0 ) ;
if ( dictid !== state . check ) {
return Z _DATA _ERROR ;
}
}
/ * c o p y d i c t i o n a r y t o w i n d o w u s i n g u p d a t e w i n d o w ( ) , w h i c h w i l l a m e n d t h e
existing dictionary if appropriate * /
ret = updatewindow ( strm , dictionary , dictLength , dictLength ) ;
if ( ret ) {
state . mode = MEM ;
return Z _MEM _ERROR ;
}
state . havedict = 1 ;
// Tracev((stderr, "inflate: dictionary set\n"));
return Z _OK ;
}
exports . inflateReset = inflateReset ;
exports . inflateReset2 = inflateReset2 ;
exports . inflateResetKeep = inflateResetKeep ;
exports . inflateInit = inflateInit ;
exports . inflateInit2 = inflateInit2 ;
exports . inflate = inflate ;
exports . inflateEnd = inflateEnd ;
exports . inflateGetHeader = inflateGetHeader ;
exports . inflateSetDictionary = inflateSetDictionary ;
exports . inflateInfo = 'pako inflate (from Nodeca project)' ;
/ * N o t i m p l e m e n t e d
exports . inflateCopy = inflateCopy ;
exports . inflateGetDictionary = inflateGetDictionary ;
exports . inflateMark = inflateMark ;
exports . inflatePrime = inflatePrime ;
exports . inflateSync = inflateSync ;
exports . inflateSyncPoint = inflateSyncPoint ;
exports . inflateUndermine = inflateUndermine ;
* /
} , { "../utils/common" : 24 , "./adler32" : 26 , "./crc32" : 28 , "./inffast" : 31 , "./inftrees" : 33 } ] , 33 : [ function ( require , module , exports ) {
'use strict' ;
var utils = require ( '../utils/common' ) ;
var MAXBITS = 15 ;
var ENOUGH _LENS = 852 ;
var ENOUGH _DISTS = 592 ;
//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);
var CODES = 0 ;
var LENS = 1 ;
var DISTS = 2 ;
var lbase = [ /* Length codes 257..285 base */
3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 13 , 15 , 17 , 19 , 23 , 27 , 31 ,
35 , 43 , 51 , 59 , 67 , 83 , 99 , 115 , 131 , 163 , 195 , 227 , 258 , 0 , 0
] ;
var lext = [ /* Length codes 257..285 extra */
16 , 16 , 16 , 16 , 16 , 16 , 16 , 16 , 17 , 17 , 17 , 17 , 18 , 18 , 18 , 18 ,
19 , 19 , 19 , 19 , 20 , 20 , 20 , 20 , 21 , 21 , 21 , 21 , 16 , 72 , 78
] ;
var dbase = [ /* Distance codes 0..29 base */
1 , 2 , 3 , 4 , 5 , 7 , 9 , 13 , 17 , 25 , 33 , 49 , 65 , 97 , 129 , 193 ,
257 , 385 , 513 , 769 , 1025 , 1537 , 2049 , 3073 , 4097 , 6145 ,
8193 , 12289 , 16385 , 24577 , 0 , 0
] ;
var dext = [ /* Distance codes 0..29 extra */
16 , 16 , 16 , 16 , 17 , 17 , 18 , 18 , 19 , 19 , 20 , 20 , 21 , 21 , 22 , 22 ,
23 , 23 , 24 , 24 , 25 , 25 , 26 , 26 , 27 , 27 ,
28 , 28 , 29 , 29 , 64 , 64
] ;
module . exports = function inflate _table ( type , lens , lens _index , codes , table , table _index , work , opts )
{
var bits = opts . bits ;
//here = opts.here; /* table entry for duplication */
var len = 0 ; /* a code's length in bits */
var sym = 0 ; /* index of code symbols */
var min = 0 , max = 0 ; /* minimum and maximum code lengths */
var root = 0 ; /* number of index bits for root table */
var curr = 0 ; /* number of index bits for current table */
var drop = 0 ; /* code bits to drop for sub-table */
var left = 0 ; /* number of prefix codes available */
var used = 0 ; /* code entries in table used */
var huff = 0 ; /* Huffman code */
var incr ; /* for incrementing code, index */
var fill ; /* index for replicating entries */
var low ; /* low bits for current root entry */
var mask ; /* mask for low root bits */
var next ; /* next available space in table */
var base = null ; /* base value table to use */
var base _index = 0 ;
// var shoextra; /* extra bits table to use */
var end ; /* use base and extra for symbol > end */
var count = new utils . Buf16 ( MAXBITS + 1 ) ; //[MAXBITS+1]; /* number of codes of each length */
var offs = new utils . Buf16 ( MAXBITS + 1 ) ; //[MAXBITS+1]; /* offsets in table for each length */
var extra = null ;
var extra _index = 0 ;
var here _bits , here _op , here _val ;
/ *
Process a set of code lengths to create a canonical Huffman code . The
code lengths are lens [ 0. . codes - 1 ] . Each length corresponds to the
symbols 0. . codes - 1. The Huffman code is generated by first sorting the
symbols by length from short to long , and retaining the symbol order
for codes with equal lengths . Then the code starts with all zero bits
for the first code of the shortest length , and the codes are integer
increments for the same length , and zeros are appended as the length
increases . For the deflate format , these bits are stored backwards
from their more natural integer increment ordering , and so when the
decoding tables are built in the large loop below , the integer codes
are incremented backwards .
This routine assumes , but does not check , that all of the entries in
lens [ ] are in the range 0. . MAXBITS . The caller must assure this .
1. . MAXBITS is interpreted as that code length . zero means that that
symbol does not occur in this code .
The codes are sorted by computing a count of codes for each length ,
creating from that a table of starting indices for each length in the
sorted table , and then entering the symbols in order in the sorted
table . The sorted table is work [ ] , with that space being provided by
the caller .
The length counts are used for other purposes as well , i . e . finding
the minimum and maximum length codes , determining if there are any
codes at all , checking for a valid set of lengths , and looking ahead
at length counts to determine sub - table sizes when building the
decoding tables .
* /
/* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
for ( len = 0 ; len <= MAXBITS ; len ++ ) {
count [ len ] = 0 ;
}
for ( sym = 0 ; sym < codes ; sym ++ ) {
count [ lens [ lens _index + sym ] ] ++ ;
}
/* bound code lengths, force root to be within code lengths */
root = bits ;
for ( max = MAXBITS ; max >= 1 ; max -- ) {
if ( count [ max ] !== 0 ) { break ; }
}
if ( root > max ) {
root = max ;
}
if ( max === 0 ) { /* no symbols to code at all */
//table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */
//table.bits[opts.table_index] = 1; //here.bits = (var char)1;
//table.val[opts.table_index++] = 0; //here.val = (var short)0;
table [ table _index ++ ] = ( 1 << 24 ) | ( 64 << 16 ) | 0 ;
//table.op[opts.table_index] = 64;
//table.bits[opts.table_index] = 1;
//table.val[opts.table_index++] = 0;
table [ table _index ++ ] = ( 1 << 24 ) | ( 64 << 16 ) | 0 ;
opts . bits = 1 ;
return 0 ; /* no symbols, but wait for decoding to report error */
}
for ( min = 1 ; min < max ; min ++ ) {
if ( count [ min ] !== 0 ) { break ; }
}
if ( root < min ) {
root = min ;
}
/* check for an over-subscribed or incomplete set of lengths */
left = 1 ;
for ( len = 1 ; len <= MAXBITS ; len ++ ) {
left <<= 1 ;
left -= count [ len ] ;
if ( left < 0 ) {
return - 1 ;
} /* over-subscribed */
}
if ( left > 0 && ( type === CODES || max !== 1 ) ) {
return - 1 ; /* incomplete set */
}
/* generate offsets into symbol table for each length for sorting */
offs [ 1 ] = 0 ;
for ( len = 1 ; len < MAXBITS ; len ++ ) {
offs [ len + 1 ] = offs [ len ] + count [ len ] ;
}
/* sort symbols by length, by symbol order within each length */
for ( sym = 0 ; sym < codes ; sym ++ ) {
if ( lens [ lens _index + sym ] !== 0 ) {
work [ offs [ lens [ lens _index + sym ] ] ++ ] = sym ;
}
}
/ *
Create and fill in decoding tables . In this loop , the table being
filled is at next and has curr index bits . The code being used is huff
with length len . That code is converted to an index by dropping drop
bits off of the bottom . For codes where len is less than drop + curr ,
those top drop + curr - len bits are incremented through all values to
fill the table with replicated entries .
root is the number of index bits for the root table . When len exceeds
root , sub - tables are created pointed to by the root entry with an index
of the low root bits of huff . This is saved in low to check for when a
new sub - table should be started . drop is zero when the root table is
being filled , and drop is root when sub - tables are being filled .
When a new sub - table is needed , it is necessary to look ahead in the
code lengths to determine what size sub - table is needed . The length
counts are used for this , and so count [ ] is decremented as codes are
entered in the tables .
used keeps track of how many table entries have been allocated from the
provided * table space . It is checked for LENS and DIST tables against
the constants ENOUGH _LENS and ENOUGH _DISTS to guard against changes in
the initial root table size constants . See the comments in inftrees . h
for more information .
sym increments through all symbols , and the loop terminates when
all codes of length max , i . e . all codes , have been processed . This
routine permits incomplete codes , so another loop after this one fills
in the rest of the decoding tables with invalid code markers .
* /
/* set up for code type */
// poor man optimization - use if-else instead of switch,
// to avoid deopts in old v8
if ( type === CODES ) {
base = extra = work ; /* dummy value--not used */
end = 19 ;
} else if ( type === LENS ) {
base = lbase ;
base _index -= 257 ;
extra = lext ;
extra _index -= 257 ;
end = 256 ;
} else { /* DISTS */
base = dbase ;
extra = dext ;
end = - 1 ;
}
/* initialize opts for loop */
huff = 0 ; /* starting code */
sym = 0 ; /* starting code symbol */
len = min ; /* starting code length */
next = table _index ; /* current table to fill in */
curr = root ; /* current table index bits */
drop = 0 ; /* current bits to drop from code for index */
low = - 1 ; /* trigger new sub-table when len > root */
used = 1 << root ; /* use root table entries */
mask = used - 1 ; /* mask for comparing low */
/* check available table space */
if ( ( type === LENS && used > ENOUGH _LENS ) ||
( type === DISTS && used > ENOUGH _DISTS ) ) {
return 1 ;
}
var i = 0 ;
/* process all codes and make table entries */
for ( ; ; ) {
i ++ ;
/* create table entry */
here _bits = len - drop ;
if ( work [ sym ] < end ) {
here _op = 0 ;
here _val = work [ sym ] ;
}
else if ( work [ sym ] > end ) {
here _op = extra [ extra _index + work [ sym ] ] ;
here _val = base [ base _index + work [ sym ] ] ;
}
else {
here _op = 32 + 64 ; /* end of block */
here _val = 0 ;
}
/* replicate for those indices with low len bits equal to huff */
incr = 1 << ( len - drop ) ;
fill = 1 << curr ;
min = fill ; /* save offset to next table */
do {
fill -= incr ;
table [ next + ( huff >> drop ) + fill ] = ( here _bits << 24 ) | ( here _op << 16 ) | here _val | 0 ;
} while ( fill !== 0 ) ;
/* backwards increment the len-bit code huff */
incr = 1 << ( len - 1 ) ;
while ( huff & incr ) {
incr >>= 1 ;
}
if ( incr !== 0 ) {
huff &= incr - 1 ;
huff += incr ;
} else {
huff = 0 ;
}
/* go to next symbol, update count, len */
sym ++ ;
if ( -- count [ len ] === 0 ) {
if ( len === max ) { break ; }
len = lens [ lens _index + work [ sym ] ] ;
}
/* create new sub-table if needed */
if ( len > root && ( huff & mask ) !== low ) {
/* if first time, transition to sub-tables */
if ( drop === 0 ) {
drop = root ;
}
/* increment past last table */
next += min ; /* here min is 1 << curr */
/* determine length of next table */
curr = len - drop ;
left = 1 << curr ;
while ( curr + drop < max ) {
left -= count [ curr + drop ] ;
if ( left <= 0 ) { break ; }
curr ++ ;
left <<= 1 ;
}
/* check for enough space */
used += 1 << curr ;
if ( ( type === LENS && used > ENOUGH _LENS ) ||
( type === DISTS && used > ENOUGH _DISTS ) ) {
return 1 ;
}
/* point entry in root table to sub-table */
low = huff & mask ;
/ * t a b l e . o p [ l o w ] = c u r r ;
table . bits [ low ] = root ;
table . val [ low ] = next - opts . table _index ; * /
table [ low ] = ( root << 24 ) | ( curr << 16 ) | ( next - table _index ) | 0 ;
}
}
/ * f i l l i n r e m a i n i n g t a b l e e n t r y i f c o d e i s i n c o m p l e t e ( g u a r a n t e e d t o h a v e
at most one remaining entry , since if the code is incomplete , the
maximum code length that was allowed to get this far is one bit ) * /
if ( huff !== 0 ) {
//table.op[next + huff] = 64; /* invalid code marker */
//table.bits[next + huff] = len - drop;
//table.val[next + huff] = 0;
table [ next + huff ] = ( ( len - drop ) << 24 ) | ( 64 << 16 ) | 0 ;
}
/* set return parameters */
//opts.table_index += used;
opts . bits = root ;
return 0 ;
} ;
} , { "../utils/common" : 24 } ] , 34 : [ function ( require , module , exports ) {
'use strict' ;
module . exports = {
2 : 'need dictionary' , /* Z_NEED_DICT 2 */
1 : 'stream end' , /* Z_STREAM_END 1 */
0 : '' , /* Z_OK 0 */
'-1' : 'file error' , /* Z_ERRNO (-1) */
'-2' : 'stream error' , /* Z_STREAM_ERROR (-2) */
'-3' : 'data error' , /* Z_DATA_ERROR (-3) */
'-4' : 'insufficient memory' , /* Z_MEM_ERROR (-4) */
'-5' : 'buffer error' , /* Z_BUF_ERROR (-5) */
'-6' : 'incompatible version' /* Z_VERSION_ERROR (-6) */
} ;
} , { } ] , 35 : [ function ( require , module , exports ) {
'use strict' ;
var utils = require ( '../utils/common' ) ;
/* Public constants ==========================================================*/
/* ===========================================================================*/
//var Z_FILTERED = 1;
//var Z_HUFFMAN_ONLY = 2;
//var Z_RLE = 3;
var Z _FIXED = 4 ;
//var Z_DEFAULT_STRATEGY = 0;
/* Possible values of the data_type field (though see inflate()) */
var Z _BINARY = 0 ;
var Z _TEXT = 1 ;
//var Z_ASCII = 1; // = Z_TEXT
var Z _UNKNOWN = 2 ;
/*============================================================================*/
function zero ( buf ) { var len = buf . length ; while ( -- len >= 0 ) { buf [ len ] = 0 ; } }
// From zutil.h
var STORED _BLOCK = 0 ;
var STATIC _TREES = 1 ;
var DYN _TREES = 2 ;
/* The three kinds of block type */
var MIN _MATCH = 3 ;
var MAX _MATCH = 258 ;
/* The minimum and maximum match lengths */
// From deflate.h
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Internal compression state .
* /
var LENGTH _CODES = 29 ;
/* number of length codes, not counting the special END_BLOCK code */
var LITERALS = 256 ;
/* number of literal bytes 0..255 */
var L _CODES = LITERALS + 1 + LENGTH _CODES ;
/* number of Literal or Length codes, including the END_BLOCK code */
var D _CODES = 30 ;
/* number of distance codes */
var BL _CODES = 19 ;
/* number of codes used to transfer the bit lengths */
var HEAP _SIZE = 2 * L _CODES + 1 ;
/* maximum heap size */
var MAX _BITS = 15 ;
/* All codes must not exceed MAX_BITS bits */
var Buf _size = 16 ;
/* size of bit buffer in bi_buf */
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Constants
* /
var MAX _BL _BITS = 7 ;
/* Bit length codes must not exceed MAX_BL_BITS bits */
var END _BLOCK = 256 ;
/* end of block literal code */
var REP _3 _6 = 16 ;
/* repeat previous bit length 3-6 times (2 bits of repeat count) */
var REPZ _3 _10 = 17 ;
/* repeat a zero length 3-10 times (3 bits of repeat count) */
var REPZ _11 _138 = 18 ;
/* repeat a zero length 11-138 times (7 bits of repeat count) */
/* eslint-disable comma-spacing,array-bracket-spacing */
var extra _lbits = /* extra bits for each length code */
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 2 , 2 , 2 , 2 , 3 , 3 , 3 , 3 , 4 , 4 , 4 , 4 , 5 , 5 , 5 , 5 , 0 ] ;
var extra _dbits = /* extra bits for each distance code */
[ 0 , 0 , 0 , 0 , 1 , 1 , 2 , 2 , 3 , 3 , 4 , 4 , 5 , 5 , 6 , 6 , 7 , 7 , 8 , 8 , 9 , 9 , 10 , 10 , 11 , 11 , 12 , 12 , 13 , 13 ] ;
var extra _blbits = /* extra bits for each bit length code */
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 2 , 3 , 7 ] ;
var bl _order =
[ 16 , 17 , 18 , 0 , 8 , 7 , 9 , 6 , 10 , 5 , 11 , 4 , 12 , 3 , 13 , 2 , 14 , 1 , 15 ] ;
/* eslint-enable comma-spacing,array-bracket-spacing */
/ * T h e l e n g t h s o f t h e b i t l e n g t h c o d e s a r e s e n t i n o r d e r o f d e c r e a s i n g
* probability , to avoid transmitting the lengths for unused bit length codes .
* /
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Local data . These are initialized only once .
* /
// We pre-fill arrays with 0 to avoid uninitialized gaps
var DIST _CODE _LEN = 512 ; /* see definition of array dist_code below */
// !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1
var static _ltree = new Array ( ( L _CODES + 2 ) * 2 ) ;
zero ( static _ltree ) ;
/ * T h e s t a t i c l i t e r a l t r e e . S i n c e t h e b i t l e n g t h s a r e i m p o s e d , t h e r e i s n o
* need for the L _CODES extra codes used during heap construction . However
* The codes 286 and 287 are needed to build a canonical tree ( see _tr _init
* below ) .
* /
var static _dtree = new Array ( D _CODES * 2 ) ;
zero ( static _dtree ) ;
/ * T h e s t a t i c d i s t a n c e t r e e . ( A c t u a l l y a t r i v i a l t r e e s i n c e a l l c o d e s u s e
* 5 bits . )
* /
var _dist _code = new Array ( DIST _CODE _LEN ) ;
zero ( _dist _code ) ;
/ * D i s t a n c e c o d e s . T h e f i r s t 2 5 6 v a l u e s c o r r e s p o n d t o t h e d i s t a n c e s
* 3 . . 258 , the last 256 values correspond to the top 8 bits of
* the 15 bit distances .
* /
var _length _code = new Array ( MAX _MATCH - MIN _MATCH + 1 ) ;
zero ( _length _code ) ;
/* length code for each normalized match length (0 == MIN_MATCH) */
var base _length = new Array ( LENGTH _CODES ) ;
zero ( base _length ) ;
/* First normalized length for each code (0 = MIN_MATCH) */
var base _dist = new Array ( D _CODES ) ;
zero ( base _dist ) ;
/* First normalized distance for each code (0 = distance of 1) */
function StaticTreeDesc ( static _tree , extra _bits , extra _base , elems , max _length ) {
this . static _tree = static _tree ; /* static tree or NULL */
this . extra _bits = extra _bits ; /* extra bits for each code or NULL */
this . extra _base = extra _base ; /* base index for extra_bits */
this . elems = elems ; /* max number of elements in the tree */
this . max _length = max _length ; /* max bit length for the codes */
// show if `static_tree` has data or dummy - needed for monomorphic objects
this . has _stree = static _tree && static _tree . length ;
}
var static _l _desc ;
var static _d _desc ;
var static _bl _desc ;
function TreeDesc ( dyn _tree , stat _desc ) {
this . dyn _tree = dyn _tree ; /* the dynamic tree */
this . max _code = 0 ; /* largest code with non zero frequency */
this . stat _desc = stat _desc ; /* the corresponding static tree */
}
function d _code ( dist ) {
return dist < 256 ? _dist _code [ dist ] : _dist _code [ 256 + ( dist >>> 7 ) ] ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Output a short LSB first on the stream .
* IN assertion : there is enough room in pendingBuf .
* /
function put _short ( s , w ) {
// put_byte(s, (uch)((w) & 0xff));
// put_byte(s, (uch)((ush)(w) >> 8));
s . pending _buf [ s . pending ++ ] = ( w ) & 0xff ;
s . pending _buf [ s . pending ++ ] = ( w >>> 8 ) & 0xff ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send a value on a given number of bits .
* IN assertion : length <= 16 and value fits in length bits .
* /
function send _bits ( s , value , length ) {
if ( s . bi _valid > ( Buf _size - length ) ) {
s . bi _buf |= ( value << s . bi _valid ) & 0xffff ;
put _short ( s , s . bi _buf ) ;
s . bi _buf = value >> ( Buf _size - s . bi _valid ) ;
s . bi _valid += length - Buf _size ;
} else {
s . bi _buf |= ( value << s . bi _valid ) & 0xffff ;
s . bi _valid += length ;
}
}
function send _code ( s , c , tree ) {
send _bits ( s , tree [ c * 2 ] /*.Code*/ , tree [ c * 2 + 1 ] /*.Len*/ ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Reverse the first len bits of a code , using straightforward code ( a faster
* method would use a table )
* IN assertion : 1 <= len <= 15
* /
function bi _reverse ( code , len ) {
var res = 0 ;
do {
res |= code & 1 ;
code >>>= 1 ;
res <<= 1 ;
} while ( -- len > 0 ) ;
return res >>> 1 ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Flush the bit buffer , keeping at most 7 bits in it .
* /
function bi _flush ( s ) {
if ( s . bi _valid === 16 ) {
put _short ( s , s . bi _buf ) ;
s . bi _buf = 0 ;
s . bi _valid = 0 ;
} else if ( s . bi _valid >= 8 ) {
s . pending _buf [ s . pending ++ ] = s . bi _buf & 0xff ;
s . bi _buf >>= 8 ;
s . bi _valid -= 8 ;
}
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Compute the optimal bit lengths for a tree and update the total bit length
* for the current block .
* IN assertion : the fields freq and dad are set , heap [ heap _max ] and
* above are the tree nodes sorted by increasing frequency .
* OUT assertions : the field len is set to the optimal bit length , the
* array bl _count contains the frequencies for each bit length .
* The length opt _len is updated ; static _len is also updated if stree is
* not null .
* /
function gen _bitlen ( s , desc )
// deflate_state *s;
// tree_desc *desc; /* the tree descriptor */
{
var tree = desc . dyn _tree ;
var max _code = desc . max _code ;
var stree = desc . stat _desc . static _tree ;
var has _stree = desc . stat _desc . has _stree ;
var extra = desc . stat _desc . extra _bits ;
var base = desc . stat _desc . extra _base ;
var max _length = desc . stat _desc . max _length ;
var h ; /* heap index */
var n , m ; /* iterate over the tree elements */
var bits ; /* bit length */
var xbits ; /* extra bits */
var f ; /* frequency */
var overflow = 0 ; /* number of elements with bit length too large */
for ( bits = 0 ; bits <= MAX _BITS ; bits ++ ) {
s . bl _count [ bits ] = 0 ;
}
/ * I n a f i r s t p a s s , c o m p u t e t h e o p t i m a l b i t l e n g t h s ( w h i c h m a y
* overflow in the case of the bit length tree ) .
* /
tree [ s . heap [ s . heap _max ] * 2 + 1 ] /*.Len*/ = 0 ; /* root of the heap */
for ( h = s . heap _max + 1 ; h < HEAP _SIZE ; h ++ ) {
n = s . heap [ h ] ;
bits = tree [ tree [ n * 2 + 1 ] /*.Dad*/ * 2 + 1 ] /*.Len*/ + 1 ;
if ( bits > max _length ) {
bits = max _length ;
overflow ++ ;
}
tree [ n * 2 + 1 ] /*.Len*/ = bits ;
/* We overwrite tree[n].Dad which is no longer needed */
if ( n > max _code ) { continue ; } /* not a leaf node */
s . bl _count [ bits ] ++ ;
xbits = 0 ;
if ( n >= base ) {
xbits = extra [ n - base ] ;
}
f = tree [ n * 2 ] /*.Freq*/ ;
s . opt _len += f * ( bits + xbits ) ;
if ( has _stree ) {
s . static _len += f * ( stree [ n * 2 + 1 ] /*.Len*/ + xbits ) ;
}
}
if ( overflow === 0 ) { return ; }
// Trace((stderr,"\nbit length overflow\n"));
/* This happens for example on obj2 and pic of the Calgary corpus */
/* Find the first bit length which could increase: */
do {
bits = max _length - 1 ;
while ( s . bl _count [ bits ] === 0 ) { bits -- ; }
s . bl _count [ bits ] -- ; /* move one leaf down the tree */
s . bl _count [ bits + 1 ] += 2 ; /* move one overflow item as its brother */
s . bl _count [ max _length ] -- ;
/ * T h e b r o t h e r o f t h e o v e r f l o w i t e m a l s o m o v e s o n e s t e p u p ,
* but this does not affect bl _count [ max _length ]
* /
overflow -= 2 ;
} while ( overflow > 0 ) ;
/ * N o w r e c o m p u t e a l l b i t l e n g t h s , s c a n n i n g i n i n c r e a s i n g f r e q u e n c y .
* h is still equal to HEAP _SIZE . ( It is simpler to reconstruct all
* lengths instead of fixing only the wrong ones . This idea is taken
* from 'ar' written by Haruhiko Okumura . )
* /
for ( bits = max _length ; bits !== 0 ; bits -- ) {
n = s . bl _count [ bits ] ;
while ( n !== 0 ) {
m = s . heap [ -- h ] ;
if ( m > max _code ) { continue ; }
if ( tree [ m * 2 + 1 ] /*.Len*/ !== bits ) {
// Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
s . opt _len += ( bits - tree [ m * 2 + 1 ] /*.Len*/ ) * tree [ m * 2 ] /*.Freq*/ ;
tree [ m * 2 + 1 ] /*.Len*/ = bits ;
}
n -- ;
}
}
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Generate the codes for a given tree and bit counts ( which need not be
* optimal ) .
* IN assertion : the array bl _count contains the bit length statistics for
* the given tree and the field len is set for all tree elements .
* OUT assertion : the field code is set for all tree elements of non
* zero code length .
* /
function gen _codes ( tree , max _code , bl _count )
// ct_data *tree; /* the tree to decorate */
// int max_code; /* largest code with non zero frequency */
// ushf *bl_count; /* number of codes at each bit length */
{
var next _code = new Array ( MAX _BITS + 1 ) ; /* next code value for each bit length */
var code = 0 ; /* running code value */
var bits ; /* bit index */
var n ; /* code index */
/ * T h e d i s t r i b u t i o n c o u n t s a r e f i r s t u s e d t o g e n e r a t e t h e c o d e v a l u e s
* without bit reversal .
* /
for ( bits = 1 ; bits <= MAX _BITS ; bits ++ ) {
next _code [ bits ] = code = ( code + bl _count [ bits - 1 ] ) << 1 ;
}
/ * C h e c k t h a t t h e b i t c o u n t s i n b l _ c o u n t a r e c o n s i s t e n t . T h e l a s t c o d e
* must be all ones .
* /
//Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
// "inconsistent bit counts");
//Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
for ( n = 0 ; n <= max _code ; n ++ ) {
var len = tree [ n * 2 + 1 ] /*.Len*/ ;
if ( len === 0 ) { continue ; }
/* Now reverse the bits */
tree [ n * 2 ] /*.Code*/ = bi _reverse ( next _code [ len ] ++ , len ) ;
//Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
// n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
}
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Initialize the various 'constant' tables .
* /
function tr _static _init ( ) {
var n ; /* iterates over tree elements */
var bits ; /* bit counter */
var length ; /* length value */
var code ; /* code value */
var dist ; /* distance index */
var bl _count = new Array ( MAX _BITS + 1 ) ;
/* number of codes at each bit length for an optimal tree */
// do check in _tr_init()
//if (static_init_done) return;
/* For some embedded targets, global variables are not initialized: */
/ * # i f d e f N O _ I N I T _ G L O B A L _ P O I N T E R S
static _l _desc . static _tree = static _ltree ;
static _l _desc . extra _bits = extra _lbits ;
static _d _desc . static _tree = static _dtree ;
static _d _desc . extra _bits = extra _dbits ;
static _bl _desc . extra _bits = extra _blbits ;
# endif * /
/* Initialize the mapping length (0..255) -> length code (0..28) */
length = 0 ;
for ( code = 0 ; code < LENGTH _CODES - 1 ; code ++ ) {
base _length [ code ] = length ;
for ( n = 0 ; n < ( 1 << extra _lbits [ code ] ) ; n ++ ) {
_length _code [ length ++ ] = code ;
}
}
//Assert (length == 256, "tr_static_init: length != 256");
/ * N o t e t h a t t h e l e n g t h 2 5 5 ( m a t c h l e n g t h 2 5 8 ) c a n b e r e p r e s e n t e d
* in two different ways : code 284 + 5 bits or code 285 , so we
* overwrite length _code [ 255 ] to use the best encoding :
* /
_length _code [ length - 1 ] = code ;
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */
dist = 0 ;
for ( code = 0 ; code < 16 ; code ++ ) {
base _dist [ code ] = dist ;
for ( n = 0 ; n < ( 1 << extra _dbits [ code ] ) ; n ++ ) {
_dist _code [ dist ++ ] = code ;
}
}
//Assert (dist == 256, "tr_static_init: dist != 256");
dist >>= 7 ; /* from now on, all distances are divided by 128 */
for ( ; code < D _CODES ; code ++ ) {
base _dist [ code ] = dist << 7 ;
for ( n = 0 ; n < ( 1 << ( extra _dbits [ code ] - 7 ) ) ; n ++ ) {
_dist _code [ 256 + dist ++ ] = code ;
}
}
//Assert (dist == 256, "tr_static_init: 256+dist != 512");
/* Construct the codes of the static literal tree */
for ( bits = 0 ; bits <= MAX _BITS ; bits ++ ) {
bl _count [ bits ] = 0 ;
}
n = 0 ;
while ( n <= 143 ) {
static _ltree [ n * 2 + 1 ] /*.Len*/ = 8 ;
n ++ ;
bl _count [ 8 ] ++ ;
}
while ( n <= 255 ) {
static _ltree [ n * 2 + 1 ] /*.Len*/ = 9 ;
n ++ ;
bl _count [ 9 ] ++ ;
}
while ( n <= 279 ) {
static _ltree [ n * 2 + 1 ] /*.Len*/ = 7 ;
n ++ ;
bl _count [ 7 ] ++ ;
}
while ( n <= 287 ) {
static _ltree [ n * 2 + 1 ] /*.Len*/ = 8 ;
n ++ ;
bl _count [ 8 ] ++ ;
}
/ * C o d e s 2 8 6 a n d 2 8 7 d o n o t e x i s t , b u t w e m u s t i n c l u d e t h e m i n t h e
* tree construction to get a canonical Huffman tree ( longest code
* all ones )
* /
gen _codes ( static _ltree , L _CODES + 1 , bl _count ) ;
/* The static distance tree is trivial: */
for ( n = 0 ; n < D _CODES ; n ++ ) {
static _dtree [ n * 2 + 1 ] /*.Len*/ = 5 ;
static _dtree [ n * 2 ] /*.Code*/ = bi _reverse ( n , 5 ) ;
}
// Now data ready and we can init static trees
static _l _desc = new StaticTreeDesc ( static _ltree , extra _lbits , LITERALS + 1 , L _CODES , MAX _BITS ) ;
static _d _desc = new StaticTreeDesc ( static _dtree , extra _dbits , 0 , D _CODES , MAX _BITS ) ;
static _bl _desc = new StaticTreeDesc ( new Array ( 0 ) , extra _blbits , 0 , BL _CODES , MAX _BL _BITS ) ;
//static_init_done = true;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Initialize a new block .
* /
function init _block ( s ) {
var n ; /* iterates over tree elements */
/* Initialize the trees. */
for ( n = 0 ; n < L _CODES ; n ++ ) { s . dyn _ltree [ n * 2 ] /*.Freq*/ = 0 ; }
for ( n = 0 ; n < D _CODES ; n ++ ) { s . dyn _dtree [ n * 2 ] /*.Freq*/ = 0 ; }
for ( n = 0 ; n < BL _CODES ; n ++ ) { s . bl _tree [ n * 2 ] /*.Freq*/ = 0 ; }
s . dyn _ltree [ END _BLOCK * 2 ] /*.Freq*/ = 1 ;
s . opt _len = s . static _len = 0 ;
s . last _lit = s . matches = 0 ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Flush the bit buffer and align the output on a byte boundary
* /
function bi _windup ( s )
{
if ( s . bi _valid > 8 ) {
put _short ( s , s . bi _buf ) ;
} else if ( s . bi _valid > 0 ) {
//put_byte(s, (Byte)s->bi_buf);
s . pending _buf [ s . pending ++ ] = s . bi _buf ;
}
s . bi _buf = 0 ;
s . bi _valid = 0 ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Copy a stored block , storing first the length and its
* one ' s complement if requested .
* /
function copy _block ( s , buf , len , header )
//DeflateState *s;
//charf *buf; /* the input data */
//unsigned len; /* its length */
//int header; /* true if block header must be written */
{
bi _windup ( s ) ; /* align on byte boundary */
if ( header ) {
put _short ( s , len ) ;
put _short ( s , ~ len ) ;
}
// while (len--) {
// put_byte(s, *buf++);
// }
utils . arraySet ( s . pending _buf , s . window , buf , len , s . pending ) ;
s . pending += len ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Compares to subtrees , using the tree depth as tie breaker when
* the subtrees have equal frequency . This minimizes the worst case length .
* /
function smaller ( tree , n , m , depth ) {
var _n2 = n * 2 ;
var _m2 = m * 2 ;
return ( tree [ _n2 ] /*.Freq*/ < tree [ _m2 ] /*.Freq*/ ||
( tree [ _n2 ] /*.Freq*/ === tree [ _m2 ] /*.Freq*/ && depth [ n ] <= depth [ m ] ) ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Restore the heap property by moving down the tree starting at node k ,
* exchanging a node with the smallest of its two sons if necessary , stopping
* when the heap property is re - established ( each father smaller than its
* two sons ) .
* /
function pqdownheap ( s , tree , k )
// deflate_state *s;
// ct_data *tree; /* the tree to restore */
// int k; /* node to move down */
{
var v = s . heap [ k ] ;
var j = k << 1 ; /* left son of k */
while ( j <= s . heap _len ) {
/* Set j to the smallest of the two sons: */
if ( j < s . heap _len &&
smaller ( tree , s . heap [ j + 1 ] , s . heap [ j ] , s . depth ) ) {
j ++ ;
}
/* Exit if v is smaller than both sons */
if ( smaller ( tree , v , s . heap [ j ] , s . depth ) ) { break ; }
/* Exchange v with the smallest son */
s . heap [ k ] = s . heap [ j ] ;
k = j ;
/* And continue down the tree, setting j to the left son of k */
j <<= 1 ;
}
s . heap [ k ] = v ;
}
// inlined manually
// var SMALLEST = 1;
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send the block data compressed using the given Huffman trees
* /
function compress _block ( s , ltree , dtree )
// deflate_state *s;
// const ct_data *ltree; /* literal tree */
// const ct_data *dtree; /* distance tree */
{
var dist ; /* distance of matched string */
var lc ; /* match length or unmatched char (if dist == 0) */
var lx = 0 ; /* running index in l_buf */
var code ; /* the code to send */
var extra ; /* number of extra bits to send */
if ( s . last _lit !== 0 ) {
do {
dist = ( s . pending _buf [ s . d _buf + lx * 2 ] << 8 ) | ( s . pending _buf [ s . d _buf + lx * 2 + 1 ] ) ;
lc = s . pending _buf [ s . l _buf + lx ] ;
lx ++ ;
if ( dist === 0 ) {
send _code ( s , lc , ltree ) ; /* send a literal byte */
//Tracecv(isgraph(lc), (stderr," '%c' ", lc));
} else {
/* Here, lc is the match length - MIN_MATCH */
code = _length _code [ lc ] ;
send _code ( s , code + LITERALS + 1 , ltree ) ; /* send the length code */
extra = extra _lbits [ code ] ;
if ( extra !== 0 ) {
lc -= base _length [ code ] ;
send _bits ( s , lc , extra ) ; /* send the extra length bits */
}
dist -- ; /* dist is now the match distance - 1 */
code = d _code ( dist ) ;
//Assert (code < D_CODES, "bad d_code");
send _code ( s , code , dtree ) ; /* send the distance code */
extra = extra _dbits [ code ] ;
if ( extra !== 0 ) {
dist -= base _dist [ code ] ;
send _bits ( s , dist , extra ) ; /* send the extra distance bits */
}
} /* literal or match pair ? */
/* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
//Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
// "pendingBuf overflow");
} while ( lx < s . last _lit ) ;
}
send _code ( s , END _BLOCK , ltree ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Construct one Huffman tree and assigns the code bit strings and lengths .
* Update the total bit length for the current block .
* IN assertion : the field freq is set for all tree elements .
* OUT assertions : the fields len and code are set to the optimal bit length
* and corresponding code . The length opt _len is updated ; static _len is
* also updated if stree is not null . The field max _code is set .
* /
function build _tree ( s , desc )
// deflate_state *s;
// tree_desc *desc; /* the tree descriptor */
{
var tree = desc . dyn _tree ;
var stree = desc . stat _desc . static _tree ;
var has _stree = desc . stat _desc . has _stree ;
var elems = desc . stat _desc . elems ;
var n , m ; /* iterate over heap elements */
var max _code = - 1 ; /* largest code with non zero frequency */
var node ; /* new node being created */
/ * C o n s t r u c t t h e i n i t i a l h e a p , w i t h l e a s t f r e q u e n t e l e m e n t i n
* heap [ SMALLEST ] . The sons of heap [ n ] are heap [ 2 * n ] and heap [ 2 * n + 1 ] .
* heap [ 0 ] is not used .
* /
s . heap _len = 0 ;
s . heap _max = HEAP _SIZE ;
for ( n = 0 ; n < elems ; n ++ ) {
if ( tree [ n * 2 ] /*.Freq*/ !== 0 ) {
s . heap [ ++ s . heap _len ] = max _code = n ;
s . depth [ n ] = 0 ;
} else {
tree [ n * 2 + 1 ] /*.Len*/ = 0 ;
}
}
/ * T h e p k z i p f o r m a t r e q u i r e s t h a t a t l e a s t o n e d i s t a n c e c o d e e x i s t s ,
* and that at least one bit should be sent even if there is only one
* possible code . So to avoid special checks later on we force at least
* two codes of non zero frequency .
* /
while ( s . heap _len < 2 ) {
node = s . heap [ ++ s . heap _len ] = ( max _code < 2 ? ++ max _code : 0 ) ;
tree [ node * 2 ] /*.Freq*/ = 1 ;
s . depth [ node ] = 0 ;
s . opt _len -- ;
if ( has _stree ) {
s . static _len -= stree [ node * 2 + 1 ] /*.Len*/ ;
}
/* node is 0 or 1 so it does not have extra bits */
}
desc . max _code = max _code ;
/ * T h e e l e m e n t s h e a p [ h e a p _ l e n / 2 + 1 . . h e a p _ l e n ] a r e l e a v e s o f t h e t r e e ,
* establish sub - heaps of increasing lengths :
* /
for ( n = ( s . heap _len >> 1 /*int /2*/ ) ; n >= 1 ; n -- ) { pqdownheap ( s , tree , n ) ; }
/ * C o n s t r u c t t h e H u f f m a n t r e e b y r e p e a t e d l y c o m b i n i n g t h e l e a s t t w o
* frequent nodes .
* /
node = elems ; /* next internal node of the tree */
do {
//pqremove(s, tree, n); /* n = node of least frequency */
/*** pqremove ***/
n = s . heap [ 1 /*SMALLEST*/ ] ;
s . heap [ 1 /*SMALLEST*/ ] = s . heap [ s . heap _len -- ] ;
pqdownheap ( s , tree , 1 /*SMALLEST*/ ) ;
/***/
m = s . heap [ 1 /*SMALLEST*/ ] ; /* m = node of next least frequency */
s . heap [ -- s . heap _max ] = n ; /* keep the nodes sorted by frequency */
s . heap [ -- s . heap _max ] = m ;
/* Create a new node father of n and m */
tree [ node * 2 ] /*.Freq*/ = tree [ n * 2 ] /*.Freq*/ + tree [ m * 2 ] /*.Freq*/ ;
s . depth [ node ] = ( s . depth [ n ] >= s . depth [ m ] ? s . depth [ n ] : s . depth [ m ] ) + 1 ;
tree [ n * 2 + 1 ] /*.Dad*/ = tree [ m * 2 + 1 ] /*.Dad*/ = node ;
/* and insert the new node in the heap */
s . heap [ 1 /*SMALLEST*/ ] = node ++ ;
pqdownheap ( s , tree , 1 /*SMALLEST*/ ) ;
} while ( s . heap _len >= 2 ) ;
s . heap [ -- s . heap _max ] = s . heap [ 1 /*SMALLEST*/ ] ;
/ * A t t h i s p o i n t , t h e f i e l d s f r e q a n d d a d a r e s e t . W e c a n n o w
* generate the bit lengths .
* /
gen _bitlen ( s , desc ) ;
/* The field len is now set, we can generate the bit codes */
gen _codes ( tree , max _code , s . bl _count ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree .
* /
function scan _tree ( s , tree , max _code )
// deflate_state *s;
// ct_data *tree; /* the tree to be scanned */
// int max_code; /* and its largest code of non zero frequency */
{
var n ; /* iterates over all tree elements */
var prevlen = - 1 ; /* last emitted length */
var curlen ; /* length of current code */
var nextlen = tree [ 0 * 2 + 1 ] /*.Len*/ ; /* length of next code */
var count = 0 ; /* repeat count of the current code */
var max _count = 7 ; /* max repeat count */
var min _count = 4 ; /* min repeat count */
if ( nextlen === 0 ) {
max _count = 138 ;
min _count = 3 ;
}
tree [ ( max _code + 1 ) * 2 + 1 ] /*.Len*/ = 0xffff ; /* guard */
for ( n = 0 ; n <= max _code ; n ++ ) {
curlen = nextlen ;
nextlen = tree [ ( n + 1 ) * 2 + 1 ] /*.Len*/ ;
if ( ++ count < max _count && curlen === nextlen ) {
continue ;
} else if ( count < min _count ) {
s . bl _tree [ curlen * 2 ] /*.Freq*/ += count ;
} else if ( curlen !== 0 ) {
if ( curlen !== prevlen ) { s . bl _tree [ curlen * 2 ] /*.Freq*/ ++ ; }
s . bl _tree [ REP _3 _6 * 2 ] /*.Freq*/ ++ ;
} else if ( count <= 10 ) {
s . bl _tree [ REPZ _3 _10 * 2 ] /*.Freq*/ ++ ;
} else {
s . bl _tree [ REPZ _11 _138 * 2 ] /*.Freq*/ ++ ;
}
count = 0 ;
prevlen = curlen ;
if ( nextlen === 0 ) {
max _count = 138 ;
min _count = 3 ;
} else if ( curlen === nextlen ) {
max _count = 6 ;
min _count = 3 ;
} else {
max _count = 7 ;
min _count = 4 ;
}
}
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send a literal or distance tree in compressed form , using the codes in
* bl _tree .
* /
function send _tree ( s , tree , max _code )
// deflate_state *s;
// ct_data *tree; /* the tree to be scanned */
// int max_code; /* and its largest code of non zero frequency */
{
var n ; /* iterates over all tree elements */
var prevlen = - 1 ; /* last emitted length */
var curlen ; /* length of current code */
var nextlen = tree [ 0 * 2 + 1 ] /*.Len*/ ; /* length of next code */
var count = 0 ; /* repeat count of the current code */
var max _count = 7 ; /* max repeat count */
var min _count = 4 ; /* min repeat count */
/* tree[max_code+1].Len = -1; */ /* guard already set */
if ( nextlen === 0 ) {
max _count = 138 ;
min _count = 3 ;
}
for ( n = 0 ; n <= max _code ; n ++ ) {
curlen = nextlen ;
nextlen = tree [ ( n + 1 ) * 2 + 1 ] /*.Len*/ ;
if ( ++ count < max _count && curlen === nextlen ) {
continue ;
} else if ( count < min _count ) {
do { send _code ( s , curlen , s . bl _tree ) ; } while ( -- count !== 0 ) ;
} else if ( curlen !== 0 ) {
if ( curlen !== prevlen ) {
send _code ( s , curlen , s . bl _tree ) ;
count -- ;
}
//Assert(count >= 3 && count <= 6, " 3_6?");
send _code ( s , REP _3 _6 , s . bl _tree ) ;
send _bits ( s , count - 3 , 2 ) ;
} else if ( count <= 10 ) {
send _code ( s , REPZ _3 _10 , s . bl _tree ) ;
send _bits ( s , count - 3 , 3 ) ;
} else {
send _code ( s , REPZ _11 _138 , s . bl _tree ) ;
send _bits ( s , count - 11 , 7 ) ;
}
count = 0 ;
prevlen = curlen ;
if ( nextlen === 0 ) {
max _count = 138 ;
min _count = 3 ;
} else if ( curlen === nextlen ) {
max _count = 6 ;
min _count = 3 ;
} else {
max _count = 7 ;
min _count = 4 ;
}
}
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Construct the Huffman tree for the bit lengths and return the index in
* bl _order of the last bit length code to send .
* /
function build _bl _tree ( s ) {
var max _blindex ; /* index of last bit length code of non zero freq */
/* Determine the bit length frequencies for literal and distance trees */
scan _tree ( s , s . dyn _ltree , s . l _desc . max _code ) ;
scan _tree ( s , s . dyn _dtree , s . d _desc . max _code ) ;
/* Build the bit length tree: */
build _tree ( s , s . bl _desc ) ;
/ * o p t _ l e n n o w i n c l u d e s t h e l e n g t h o f t h e t r e e r e p r e s e n t a t i o n s , e x c e p t
* the lengths of the bit lengths codes and the 5 + 5 + 4 bits for the counts .
* /
/ * D e t e r m i n e t h e n u m b e r o f b i t l e n g t h c o d e s t o s e n d . T h e p k z i p f o r m a t
* requires that at least 4 bit length codes be sent . ( appnote . txt says
* 3 but the actual value used is 4. )
* /
for ( max _blindex = BL _CODES - 1 ; max _blindex >= 3 ; max _blindex -- ) {
if ( s . bl _tree [ bl _order [ max _blindex ] * 2 + 1 ] /*.Len*/ !== 0 ) {
break ;
}
}
/* Update opt_len to include the bit length tree and counts */
s . opt _len += 3 * ( max _blindex + 1 ) + 5 + 5 + 4 ;
//Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
// s->opt_len, s->static_len));
return max _blindex ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send the header for a block using dynamic Huffman trees : the counts , the
* lengths of the bit length codes , the literal tree and the distance tree .
* IN assertion : lcodes >= 257 , dcodes >= 1 , blcodes >= 4.
* /
function send _all _trees ( s , lcodes , dcodes , blcodes )
// deflate_state *s;
// int lcodes, dcodes, blcodes; /* number of codes for each tree */
{
var rank ; /* index in bl_order */
//Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
//Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
// "too many codes");
//Tracev((stderr, "\nbl counts: "));
send _bits ( s , lcodes - 257 , 5 ) ; /* not +255 as stated in appnote.txt */
send _bits ( s , dcodes - 1 , 5 ) ;
send _bits ( s , blcodes - 4 , 4 ) ; /* not -3 as stated in appnote.txt */
for ( rank = 0 ; rank < blcodes ; rank ++ ) {
//Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send _bits ( s , s . bl _tree [ bl _order [ rank ] * 2 + 1 ] /*.Len*/ , 3 ) ;
}
//Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
send _tree ( s , s . dyn _ltree , lcodes - 1 ) ; /* literal tree */
//Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
send _tree ( s , s . dyn _dtree , dcodes - 1 ) ; /* distance tree */
//Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Check if the data type is TEXT or BINARY , using the following algorithm :
* - TEXT if the two conditions below are satisfied :
* a ) There are no non - portable control characters belonging to the
* "black list" ( 0. . 6 , 14. . 25 , 28. . 31 ) .
* b ) There is at least one printable character belonging to the
* "white list" ( 9 { TAB } , 10 { LF } , 13 { CR } , 32. . 255 ) .
* - BINARY otherwise .
* - The following partially - portable control characters form a
* "gray list" that is ignored in this detection algorithm :
* ( 7 { BEL } , 8 { BS } , 11 { VT } , 12 { FF } , 26 { SUB } , 27 { ESC } ) .
* IN assertion : the fields Freq of dyn _ltree are set .
* /
function detect _data _type ( s ) {
/ * b l a c k _ m a s k i s t h e b i t m a s k o f b l a c k - l i s t e d b y t e s
* set bits 0. . 6 , 14. . 25 , and 28. . 31
* 0xf3ffc07f = binary 11110011111111111100000001111111
* /
var black _mask = 0xf3ffc07f ;
var n ;
/* Check for non-textual ("black-listed") bytes. */
for ( n = 0 ; n <= 31 ; n ++ , black _mask >>>= 1 ) {
if ( ( black _mask & 1 ) && ( s . dyn _ltree [ n * 2 ] /*.Freq*/ !== 0 ) ) {
return Z _BINARY ;
}
}
/* Check for textual ("white-listed") bytes. */
if ( s . dyn _ltree [ 9 * 2 ] /*.Freq*/ !== 0 || s . dyn _ltree [ 10 * 2 ] /*.Freq*/ !== 0 ||
s . dyn _ltree [ 13 * 2 ] /*.Freq*/ !== 0 ) {
return Z _TEXT ;
}
for ( n = 32 ; n < LITERALS ; n ++ ) {
if ( s . dyn _ltree [ n * 2 ] /*.Freq*/ !== 0 ) {
return Z _TEXT ;
}
}
/ * T h e r e a r e n o " b l a c k - l i s t e d " o r " w h i t e - l i s t e d " b y t e s :
* this stream either is empty or has tolerated ( "gray-listed" ) bytes only .
* /
return Z _BINARY ;
}
var static _init _done = false ;
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Initialize the tree data structures for a new zlib stream .
* /
function _tr _init ( s )
{
if ( ! static _init _done ) {
tr _static _init ( ) ;
static _init _done = true ;
}
s . l _desc = new TreeDesc ( s . dyn _ltree , static _l _desc ) ;
s . d _desc = new TreeDesc ( s . dyn _dtree , static _d _desc ) ;
s . bl _desc = new TreeDesc ( s . bl _tree , static _bl _desc ) ;
s . bi _buf = 0 ;
s . bi _valid = 0 ;
/* Initialize the first block of the first file: */
init _block ( s ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send a stored block
* /
function _tr _stored _block ( s , buf , stored _len , last )
//DeflateState *s;
//charf *buf; /* input block */
//ulg stored_len; /* length of input block */
//int last; /* one if this is the last block for a file */
{
send _bits ( s , ( STORED _BLOCK << 1 ) + ( last ? 1 : 0 ) , 3 ) ; /* send block type */
copy _block ( s , buf , stored _len , true ) ; /* with header */
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Send one empty static block to give enough lookahead for inflate .
* This takes 10 bits , of which 7 may remain in the bit buffer .
* /
function _tr _align ( s ) {
send _bits ( s , STATIC _TREES << 1 , 3 ) ;
send _code ( s , END _BLOCK , static _ltree ) ;
bi _flush ( s ) ;
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Determine the best encoding for the current block : dynamic trees , static
* trees or store , and output the encoded block to the zip file .
* /
function _tr _flush _block ( s , buf , stored _len , last )
//DeflateState *s;
//charf *buf; /* input block, or NULL if too old */
//ulg stored_len; /* length of input block */
//int last; /* one if this is the last block for a file */
{
var opt _lenb , static _lenb ; /* opt_len and static_len in bytes */
var max _blindex = 0 ; /* index of last bit length code of non zero freq */
/* Build the Huffman trees unless a stored block is forced */
if ( s . level > 0 ) {
/* Check if the file is binary or text */
if ( s . strm . data _type === Z _UNKNOWN ) {
s . strm . data _type = detect _data _type ( s ) ;
}
/* Construct the literal and distance trees */
build _tree ( s , s . l _desc ) ;
// Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
// s->static_len));
build _tree ( s , s . d _desc ) ;
// Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
// s->static_len));
/ * A t t h i s p o i n t , o p t _ l e n a n d s t a t i c _ l e n a r e t h e t o t a l b i t l e n g t h s o f
* the compressed block data , excluding the tree representations .
* /
/ * B u i l d t h e b i t l e n g t h t r e e f o r t h e a b o v e t w o t r e e s , a n d g e t t h e i n d e x
* in bl _order of the last bit length code to send .
* /
max _blindex = build _bl _tree ( s ) ;
/* Determine the best encoding. Compute the block lengths in bytes. */
opt _lenb = ( s . opt _len + 3 + 7 ) >>> 3 ;
static _lenb = ( s . static _len + 3 + 7 ) >>> 3 ;
// Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
// opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
// s->last_lit));
if ( static _lenb <= opt _lenb ) { opt _lenb = static _lenb ; }
} else {
// Assert(buf != (char*)0, "lost buf");
opt _lenb = static _lenb = stored _len + 5 ; /* force a stored block */
}
if ( ( stored _len + 4 <= opt _lenb ) && ( buf !== - 1 ) ) {
/* 4: two words for the lengths */
/ * T h e t e s t b u f ! = N U L L i s o n l y n e c e s s a r y i f L I T _ B U F S I Z E > W S I Z E .
* Otherwise we can ' t have processed more than WSIZE input bytes since
* the last block flush , because compression would have been
* successful . If LIT _BUFSIZE <= WSIZE , it is never too late to
* transform a block into a stored block .
* /
_tr _stored _block ( s , buf , stored _len , last ) ;
} else if ( s . strategy === Z _FIXED || static _lenb === opt _lenb ) {
send _bits ( s , ( STATIC _TREES << 1 ) + ( last ? 1 : 0 ) , 3 ) ;
compress _block ( s , static _ltree , static _dtree ) ;
} else {
send _bits ( s , ( DYN _TREES << 1 ) + ( last ? 1 : 0 ) , 3 ) ;
send _all _trees ( s , s . l _desc . max _code + 1 , s . d _desc . max _code + 1 , max _blindex + 1 ) ;
compress _block ( s , s . dyn _ltree , s . dyn _dtree ) ;
}
// Assert (s->compressed_len == s->bits_sent, "bad compressed size");
/ * T h e a b o v e c h e c k i s m a d e m o d 2 ^ 3 2 , f o r f i l e s l a r g e r t h a n 5 1 2 M B
* and uLong implemented on 32 bits .
* /
init _block ( s ) ;
if ( last ) {
bi _windup ( s ) ;
}
// Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
// s->compressed_len-7*last));
}
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Save the match info and tally the frequency counts . Return true if
* the current block must be flushed .
* /
function _tr _tally ( s , dist , lc )
// deflate_state *s;
// unsigned dist; /* distance of matched string */
// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
{
//var out_length, in_length, dcode;
s . pending _buf [ s . d _buf + s . last _lit * 2 ] = ( dist >>> 8 ) & 0xff ;
s . pending _buf [ s . d _buf + s . last _lit * 2 + 1 ] = dist & 0xff ;
s . pending _buf [ s . l _buf + s . last _lit ] = lc & 0xff ;
s . last _lit ++ ;
if ( dist === 0 ) {
/* lc is the unmatched char */
s . dyn _ltree [ lc * 2 ] /*.Freq*/ ++ ;
} else {
s . matches ++ ;
/* Here, lc is the match length - MIN_MATCH */
dist -- ; /* dist = match distance - 1 */
//Assert((ush)dist < (ush)MAX_DIST(s) &&
// (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
// (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
s . dyn _ltree [ ( _length _code [ lc ] + LITERALS + 1 ) * 2 ] /*.Freq*/ ++ ;
s . dyn _dtree [ d _code ( dist ) * 2 ] /*.Freq*/ ++ ;
}
// (!) This block is disabled in zlib defailts,
// don't enable it for binary compatibility
//#ifdef TRUNCATE_BLOCK
// /* Try to guess if it is profitable to stop the current block here */
// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {
// /* Compute an upper bound for the compressed length */
// out_length = s.last_lit*8;
// in_length = s.strstart - s.block_start;
//
// for (dcode = 0; dcode < D_CODES; dcode++) {
// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);
// }
// out_length >>>= 3;
// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
// // s->last_lit, in_length, out_length,
// // 100L - out_length*100L/in_length));
// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {
// return true;
// }
// }
//#endif
return ( s . last _lit === s . lit _bufsize - 1 ) ;
/ * W e a v o i d e q u a l i t y w i t h l i t _ b u f s i z e b e c a u s e o f w r a p a r o u n d a t 6 4 K
* on 16 bit machines and because stored blocks are restricted to
* 64 K - 1 bytes .
* /
}
exports . _tr _init = _tr _init ;
exports . _tr _stored _block = _tr _stored _block ;
exports . _tr _flush _block = _tr _flush _block ;
exports . _tr _tally = _tr _tally ;
exports . _tr _align = _tr _align ;
} , { "../utils/common" : 24 } ] , 36 : [ function ( require , module , exports ) {
'use strict' ;
function ZStream ( ) {
/* next input byte */
this . input = null ; // JS specific, because we have no pointers
this . next _in = 0 ;
/* number of bytes available at input */
this . avail _in = 0 ;
/* total number of input bytes read so far */
this . total _in = 0 ;
/* next output byte should be put there */
this . output = null ; // JS specific, because we have no pointers
this . next _out = 0 ;
/* remaining free space at output */
this . avail _out = 0 ;
/* total number of bytes output so far */
this . total _out = 0 ;
/* last error message, NULL if no error */
this . msg = '' /*Z_NULL*/ ;
/* not visible by applications */
this . state = null ;
/* best guess about the data type: binary or text */
this . data _type = 2 /*Z_UNKNOWN*/ ;
/* adler32 value of the uncompressed data */
this . adler = 0 ;
}
module . exports = ZStream ;
} , { } ] , 37 : [ function ( require , module , exports ) {
// shim for using process in browser
var process = module . exports = { } ;
// cached from whatever global is present so that test runners that stub it
// don't break things. But we need to wrap it in a try catch in case it is
// wrapped in strict mode code which doesn't define any globals. It's inside a
// function because try/catches deoptimize in certain engines.
var cachedSetTimeout ;
var cachedClearTimeout ;
function defaultSetTimout ( ) {
throw new Error ( 'setTimeout has not been defined' ) ;
}
function defaultClearTimeout ( ) {
throw new Error ( 'clearTimeout has not been defined' ) ;
}
( function ( ) {
try {
if ( typeof setTimeout === 'function' ) {
cachedSetTimeout = setTimeout ;
} else {
cachedSetTimeout = defaultSetTimout ;
}
} catch ( e ) {
cachedSetTimeout = defaultSetTimout ;
}
try {
if ( typeof clearTimeout === 'function' ) {
cachedClearTimeout = clearTimeout ;
} else {
cachedClearTimeout = defaultClearTimeout ;
}
} catch ( e ) {
cachedClearTimeout = defaultClearTimeout ;
}
} ( ) )
function runTimeout ( fun ) {
if ( cachedSetTimeout === setTimeout ) {
//normal enviroments in sane situations
return setTimeout ( fun , 0 ) ;
}
// if setTimeout wasn't available but was latter defined
if ( ( cachedSetTimeout === defaultSetTimout || ! cachedSetTimeout ) && setTimeout ) {
cachedSetTimeout = setTimeout ;
return setTimeout ( fun , 0 ) ;
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedSetTimeout ( fun , 0 ) ;
} catch ( e ) {
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedSetTimeout . call ( null , fun , 0 ) ;
} catch ( e ) {
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error
return cachedSetTimeout . call ( this , fun , 0 ) ;
}
}
}
function runClearTimeout ( marker ) {
if ( cachedClearTimeout === clearTimeout ) {
//normal enviroments in sane situations
return clearTimeout ( marker ) ;
}
// if clearTimeout wasn't available but was latter defined
if ( ( cachedClearTimeout === defaultClearTimeout || ! cachedClearTimeout ) && clearTimeout ) {
cachedClearTimeout = clearTimeout ;
return clearTimeout ( marker ) ;
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedClearTimeout ( marker ) ;
} catch ( e ) {
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedClearTimeout . call ( null , marker ) ;
} catch ( e ) {
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.
// Some versions of I.E. have different rules for clearTimeout vs setTimeout
return cachedClearTimeout . call ( this , marker ) ;
}
}
}
var queue = [ ] ;
var draining = false ;
var currentQueue ;
var queueIndex = - 1 ;
function cleanUpNextTick ( ) {
if ( ! draining || ! currentQueue ) {
return ;
}
draining = false ;
if ( currentQueue . length ) {
queue = currentQueue . concat ( queue ) ;
} else {
queueIndex = - 1 ;
}
if ( queue . length ) {
drainQueue ( ) ;
}
}
function drainQueue ( ) {
if ( draining ) {
return ;
}
var timeout = runTimeout ( cleanUpNextTick ) ;
draining = true ;
var len = queue . length ;
while ( len ) {
currentQueue = queue ;
queue = [ ] ;
while ( ++ queueIndex < len ) {
if ( currentQueue ) {
currentQueue [ queueIndex ] . run ( ) ;
}
}
queueIndex = - 1 ;
len = queue . length ;
}
currentQueue = null ;
draining = false ;
runClearTimeout ( timeout ) ;
}
process . nextTick = function ( fun ) {
var args = new Array ( arguments . length - 1 ) ;
if ( arguments . length > 1 ) {
for ( var i = 1 ; i < arguments . length ; i ++ ) {
args [ i - 1 ] = arguments [ i ] ;
}
}
queue . push ( new Item ( fun , args ) ) ;
if ( queue . length === 1 && ! draining ) {
runTimeout ( drainQueue ) ;
}
} ;
// v8 likes predictible objects
function Item ( fun , array ) {
this . fun = fun ;
this . array = array ;
}
Item . prototype . run = function ( ) {
this . fun . apply ( null , this . array ) ;
} ;
process . title = 'browser' ;
process . browser = true ;
process . env = { } ;
process . argv = [ ] ;
process . version = '' ; // empty string to avoid regexp issues
process . versions = { } ;
function noop ( ) { }
process . on = noop ;
process . addListener = noop ;
process . once = noop ;
process . off = noop ;
process . removeListener = noop ;
process . removeAllListeners = noop ;
process . emit = noop ;
process . binding = function ( name ) {
throw new Error ( 'process.binding is not supported' ) ;
} ;
process . cwd = function ( ) { return '/' } ;
process . chdir = function ( dir ) {
throw new Error ( 'process.chdir is not supported' ) ;
} ;
process . umask = function ( ) { return 0 ; } ;
} , { } ] , 38 : [ function ( require , module , exports ) {
( function ( Buffer ) {
/ * *
* Convert a typed array to a Buffer without a copy
*
* Author : Feross Aboukhadijeh < feross @ feross . org > < http : //feross.org>
* License : MIT
*
* ` npm install typedarray-to-buffer `
* /
module . exports = function ( arr ) {
if ( typeof Buffer . _augment === 'function' && Buffer . TYPED _ARRAY _SUPPORT ) {
// If `Buffer` is from the `buffer` module and this browser supports typed arrays,
// then augment it with all the `Buffer` methods.
return Buffer . _augment ( arr )
} else {
// Otherwise, fallback to creating a `Buffer` with a copy.
return new Buffer ( arr )
}
}
} ) . call ( this , require ( "buffer" ) . Buffer )
} , { "buffer" : 6 } ] , 39 : [ function ( require , module , exports ) {
if ( typeof Object . create === 'function' ) {
// implementation from standard node.js 'util' module
module . exports = function inherits ( ctor , superCtor ) {
ctor . super _ = superCtor
ctor . prototype = Object . create ( superCtor . prototype , {
constructor : {
value : ctor ,
enumerable : false ,
writable : true ,
configurable : true
}
} ) ;
} ;
} else {
// old school shim for old browsers
module . exports = function inherits ( ctor , superCtor ) {
ctor . super _ = superCtor
var TempCtor = function ( ) { }
TempCtor . prototype = superCtor . prototype
ctor . prototype = new TempCtor ( )
ctor . prototype . constructor = ctor
}
}
} , { } ] , 40 : [ function ( require , module , exports ) {
module . exports = function isBuffer ( arg ) {
return arg && typeof arg === 'object'
&& typeof arg . copy === 'function'
&& typeof arg . fill === 'function'
&& typeof arg . readUInt8 === 'function' ;
}
} , { } ] , 41 : [ function ( require , module , exports ) {
( function ( process , global ) {
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
var formatRegExp = /%[sdj%]/g ;
exports . format = function ( f ) {
if ( ! isString ( f ) ) {
var objects = [ ] ;
for ( var i = 0 ; i < arguments . length ; i ++ ) {
objects . push ( inspect ( arguments [ i ] ) ) ;
}
return objects . join ( ' ' ) ;
}
var i = 1 ;
var args = arguments ;
var len = args . length ;
var str = String ( f ) . replace ( formatRegExp , function ( x ) {
if ( x === '%%' ) return '%' ;
if ( i >= len ) return x ;
switch ( x ) {
case '%s' : return String ( args [ i ++ ] ) ;
case '%d' : return Number ( args [ i ++ ] ) ;
case '%j' :
try {
return JSON . stringify ( args [ i ++ ] ) ;
} catch ( _ ) {
return '[Circular]' ;
}
default :
return x ;
}
} ) ;
for ( var x = args [ i ] ; i < len ; x = args [ ++ i ] ) {
if ( isNull ( x ) || ! isObject ( x ) ) {
str += ' ' + x ;
} else {
str += ' ' + inspect ( x ) ;
}
}
return str ;
} ;
// Mark that a method should not be used.
// Returns a modified function which warns once by default.
// If --no-deprecation is set, then it is a no-op.
exports . deprecate = function ( fn , msg ) {
// Allow for deprecating things in the process of starting up.
if ( isUndefined ( global . process ) ) {
return function ( ) {
return exports . deprecate ( fn , msg ) . apply ( this , arguments ) ;
} ;
}
if ( process . noDeprecation === true ) {
return fn ;
}
var warned = false ;
function deprecated ( ) {
if ( ! warned ) {
if ( process . throwDeprecation ) {
throw new Error ( msg ) ;
} else if ( process . traceDeprecation ) {
console . trace ( msg ) ;
} else {
console . error ( msg ) ;
}
warned = true ;
}
return fn . apply ( this , arguments ) ;
}
return deprecated ;
} ;
var debugs = { } ;
var debugEnviron ;
exports . debuglog = function ( set ) {
if ( isUndefined ( debugEnviron ) )
debugEnviron = process . env . NODE _DEBUG || '' ;
set = set . toUpperCase ( ) ;
if ( ! debugs [ set ] ) {
if ( new RegExp ( '\\b' + set + '\\b' , 'i' ) . test ( debugEnviron ) ) {
var pid = process . pid ;
debugs [ set ] = function ( ) {
var msg = exports . format . apply ( exports , arguments ) ;
console . error ( '%s %d: %s' , set , pid , msg ) ;
} ;
} else {
debugs [ set ] = function ( ) { } ;
}
}
return debugs [ set ] ;
} ;
/ * *
* Echos the value of a value . Trys to print the value out
* in the best way possible given the different types .
*
* @ param { Object } obj The object to print out .
* @ param { Object } opts Optional options object that alters the output .
* /
/* legacy: obj, showHidden, depth, colors*/
function inspect ( obj , opts ) {
// default options
var ctx = {
seen : [ ] ,
stylize : stylizeNoColor
} ;
// legacy...
if ( arguments . length >= 3 ) ctx . depth = arguments [ 2 ] ;
if ( arguments . length >= 4 ) ctx . colors = arguments [ 3 ] ;
if ( isBoolean ( opts ) ) {
// legacy...
ctx . showHidden = opts ;
} else if ( opts ) {
// got an "options" object
exports . _extend ( ctx , opts ) ;
}
// set default options
if ( isUndefined ( ctx . showHidden ) ) ctx . showHidden = false ;
if ( isUndefined ( ctx . depth ) ) ctx . depth = 2 ;
if ( isUndefined ( ctx . colors ) ) ctx . colors = false ;
if ( isUndefined ( ctx . customInspect ) ) ctx . customInspect = true ;
if ( ctx . colors ) ctx . stylize = stylizeWithColor ;
return formatValue ( ctx , obj , ctx . depth ) ;
}
exports . inspect = inspect ;
// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics
inspect . colors = {
'bold' : [ 1 , 22 ] ,
'italic' : [ 3 , 23 ] ,
'underline' : [ 4 , 24 ] ,
'inverse' : [ 7 , 27 ] ,
'white' : [ 37 , 39 ] ,
'grey' : [ 90 , 39 ] ,
'black' : [ 30 , 39 ] ,
'blue' : [ 34 , 39 ] ,
'cyan' : [ 36 , 39 ] ,
'green' : [ 32 , 39 ] ,
'magenta' : [ 35 , 39 ] ,
'red' : [ 31 , 39 ] ,
'yellow' : [ 33 , 39 ]
} ;
// Don't use 'blue' not visible on cmd.exe
inspect . styles = {
'special' : 'cyan' ,
'number' : 'yellow' ,
'boolean' : 'yellow' ,
'undefined' : 'grey' ,
'null' : 'bold' ,
'string' : 'green' ,
'date' : 'magenta' ,
// "name": intentionally not styling
'regexp' : 'red'
} ;
function stylizeWithColor ( str , styleType ) {
var style = inspect . styles [ styleType ] ;
if ( style ) {
return '\u001b[' + inspect . colors [ style ] [ 0 ] + 'm' + str +
'\u001b[' + inspect . colors [ style ] [ 1 ] + 'm' ;
} else {
return str ;
}
}
function stylizeNoColor ( str , styleType ) {
return str ;
}
function arrayToHash ( array ) {
var hash = { } ;
array . forEach ( function ( val , idx ) {
hash [ val ] = true ;
} ) ;
return hash ;
}
function formatValue ( ctx , value , recurseTimes ) {
// Provide a hook for user-specified inspect functions.
// Check that value is an object with an inspect function on it
if ( ctx . customInspect &&
value &&
isFunction ( value . inspect ) &&
// Filter out the util module, it's inspect function is special
value . inspect !== exports . inspect &&
// Also filter out any prototype objects using the circular check.
! ( value . constructor && value . constructor . prototype === value ) ) {
var ret = value . inspect ( recurseTimes , ctx ) ;
if ( ! isString ( ret ) ) {
ret = formatValue ( ctx , ret , recurseTimes ) ;
}
return ret ;
}
// Primitive types cannot have properties
var primitive = formatPrimitive ( ctx , value ) ;
if ( primitive ) {
return primitive ;
}
// Look up the keys of the object.
var keys = Object . keys ( value ) ;
var visibleKeys = arrayToHash ( keys ) ;
if ( ctx . showHidden ) {
keys = Object . getOwnPropertyNames ( value ) ;
}
// IE doesn't make error fields non-enumerable
// http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx
if ( isError ( value )
&& ( keys . indexOf ( 'message' ) >= 0 || keys . indexOf ( 'description' ) >= 0 ) ) {
return formatError ( value ) ;
}
// Some type of object without properties can be shortcutted.
if ( keys . length === 0 ) {
if ( isFunction ( value ) ) {
var name = value . name ? ': ' + value . name : '' ;
return ctx . stylize ( '[Function' + name + ']' , 'special' ) ;
}
if ( isRegExp ( value ) ) {
return ctx . stylize ( RegExp . prototype . toString . call ( value ) , 'regexp' ) ;
}
if ( isDate ( value ) ) {
return ctx . stylize ( Date . prototype . toString . call ( value ) , 'date' ) ;
}
if ( isError ( value ) ) {
return formatError ( value ) ;
}
}
var base = '' , array = false , braces = [ '{' , '}' ] ;
// Make Array say that they are Array
if ( isArray ( value ) ) {
array = true ;
braces = [ '[' , ']' ] ;
}
// Make functions say that they are functions
if ( isFunction ( value ) ) {
var n = value . name ? ': ' + value . name : '' ;
base = ' [Function' + n + ']' ;
}
// Make RegExps say that they are RegExps
if ( isRegExp ( value ) ) {
base = ' ' + RegExp . prototype . toString . call ( value ) ;
}
// Make dates with properties first say the date
if ( isDate ( value ) ) {
base = ' ' + Date . prototype . toUTCString . call ( value ) ;
}
// Make error with message first say the error
if ( isError ( value ) ) {
base = ' ' + formatError ( value ) ;
}
if ( keys . length === 0 && ( ! array || value . length == 0 ) ) {
return braces [ 0 ] + base + braces [ 1 ] ;
}
if ( recurseTimes < 0 ) {
if ( isRegExp ( value ) ) {
return ctx . stylize ( RegExp . prototype . toString . call ( value ) , 'regexp' ) ;
} else {
return ctx . stylize ( '[Object]' , 'special' ) ;
}
}
ctx . seen . push ( value ) ;
var output ;
if ( array ) {
output = formatArray ( ctx , value , recurseTimes , visibleKeys , keys ) ;
} else {
output = keys . map ( function ( key ) {
return formatProperty ( ctx , value , recurseTimes , visibleKeys , key , array ) ;
} ) ;
}
ctx . seen . pop ( ) ;
return reduceToSingleString ( output , base , braces ) ;
}
function formatPrimitive ( ctx , value ) {
if ( isUndefined ( value ) )
return ctx . stylize ( 'undefined' , 'undefined' ) ;
if ( isString ( value ) ) {
var simple = '\'' + JSON . stringify ( value ) . replace ( /^"|"$/g , '' )
. replace ( /'/g , "\\'" )
. replace ( /\\"/g , '"' ) + '\'' ;
return ctx . stylize ( simple , 'string' ) ;
}
if ( isNumber ( value ) )
return ctx . stylize ( '' + value , 'number' ) ;
if ( isBoolean ( value ) )
return ctx . stylize ( '' + value , 'boolean' ) ;
// For some reason typeof null is "object", so special case here.
if ( isNull ( value ) )
return ctx . stylize ( 'null' , 'null' ) ;
}
function formatError ( value ) {
return '[' + Error . prototype . toString . call ( value ) + ']' ;
}
function formatArray ( ctx , value , recurseTimes , visibleKeys , keys ) {
var output = [ ] ;
for ( var i = 0 , l = value . length ; i < l ; ++ i ) {
if ( hasOwnProperty ( value , String ( i ) ) ) {
output . push ( formatProperty ( ctx , value , recurseTimes , visibleKeys ,
String ( i ) , true ) ) ;
} else {
output . push ( '' ) ;
}
}
keys . forEach ( function ( key ) {
if ( ! key . match ( /^\d+$/ ) ) {
output . push ( formatProperty ( ctx , value , recurseTimes , visibleKeys ,
key , true ) ) ;
}
} ) ;
return output ;
}
function formatProperty ( ctx , value , recurseTimes , visibleKeys , key , array ) {
var name , str , desc ;
desc = Object . getOwnPropertyDescriptor ( value , key ) || { value : value [ key ] } ;
if ( desc . get ) {
if ( desc . set ) {
str = ctx . stylize ( '[Getter/Setter]' , 'special' ) ;
} else {
str = ctx . stylize ( '[Getter]' , 'special' ) ;
}
} else {
if ( desc . set ) {
str = ctx . stylize ( '[Setter]' , 'special' ) ;
}
}
if ( ! hasOwnProperty ( visibleKeys , key ) ) {
name = '[' + key + ']' ;
}
if ( ! str ) {
if ( ctx . seen . indexOf ( desc . value ) < 0 ) {
if ( isNull ( recurseTimes ) ) {
str = formatValue ( ctx , desc . value , null ) ;
} else {
str = formatValue ( ctx , desc . value , recurseTimes - 1 ) ;
}
if ( str . indexOf ( '\n' ) > - 1 ) {
if ( array ) {
str = str . split ( '\n' ) . map ( function ( line ) {
return ' ' + line ;
} ) . join ( '\n' ) . substr ( 2 ) ;
} else {
str = '\n' + str . split ( '\n' ) . map ( function ( line ) {
return ' ' + line ;
} ) . join ( '\n' ) ;
}
}
} else {
str = ctx . stylize ( '[Circular]' , 'special' ) ;
}
}
if ( isUndefined ( name ) ) {
if ( array && key . match ( /^\d+$/ ) ) {
return str ;
}
name = JSON . stringify ( '' + key ) ;
if ( name . match ( /^"([a-zA-Z_][a-zA-Z_0-9]*)"$/ ) ) {
name = name . substr ( 1 , name . length - 2 ) ;
name = ctx . stylize ( name , 'name' ) ;
} else {
name = name . replace ( /'/g , "\\'" )
. replace ( /\\"/g , '"' )
. replace ( /(^"|"$)/g , "'" ) ;
name = ctx . stylize ( name , 'string' ) ;
}
}
return name + ': ' + str ;
}
function reduceToSingleString ( output , base , braces ) {
var numLinesEst = 0 ;
var length = output . reduce ( function ( prev , cur ) {
numLinesEst ++ ;
if ( cur . indexOf ( '\n' ) >= 0 ) numLinesEst ++ ;
return prev + cur . replace ( /\u001b\[\d\d?m/g , '' ) . length + 1 ;
} , 0 ) ;
if ( length > 60 ) {
return braces [ 0 ] +
( base === '' ? '' : base + '\n ' ) +
' ' +
output . join ( ',\n ' ) +
' ' +
braces [ 1 ] ;
}
return braces [ 0 ] + base + ' ' + output . join ( ', ' ) + ' ' + braces [ 1 ] ;
}
// NOTE: These type checking functions intentionally don't use `instanceof`
// because it is fragile and can be easily faked with `Object.create()`.
function isArray ( ar ) {
return Array . isArray ( ar ) ;
}
exports . isArray = isArray ;
function isBoolean ( arg ) {
return typeof arg === 'boolean' ;
}
exports . isBoolean = isBoolean ;
function isNull ( arg ) {
return arg === null ;
}
exports . isNull = isNull ;
function isNullOrUndefined ( arg ) {
return arg == null ;
}
exports . isNullOrUndefined = isNullOrUndefined ;
function isNumber ( arg ) {
return typeof arg === 'number' ;
}
exports . isNumber = isNumber ;
function isString ( arg ) {
return typeof arg === 'string' ;
}
exports . isString = isString ;
function isSymbol ( arg ) {
return typeof arg === 'symbol' ;
}
exports . isSymbol = isSymbol ;
function isUndefined ( arg ) {
return arg === void 0 ;
}
exports . isUndefined = isUndefined ;
function isRegExp ( re ) {
return isObject ( re ) && objectToString ( re ) === '[object RegExp]' ;
}
exports . isRegExp = isRegExp ;
function isObject ( arg ) {
return typeof arg === 'object' && arg !== null ;
}
exports . isObject = isObject ;
function isDate ( d ) {
return isObject ( d ) && objectToString ( d ) === '[object Date]' ;
}
exports . isDate = isDate ;
function isError ( e ) {
return isObject ( e ) &&
( objectToString ( e ) === '[object Error]' || e instanceof Error ) ;
}
exports . isError = isError ;
function isFunction ( arg ) {
return typeof arg === 'function' ;
}
exports . isFunction = isFunction ;
function isPrimitive ( arg ) {
return arg === null ||
typeof arg === 'boolean' ||
typeof arg === 'number' ||
typeof arg === 'string' ||
typeof arg === 'symbol' || // ES6 symbol
typeof arg === 'undefined' ;
}
exports . isPrimitive = isPrimitive ;
exports . isBuffer = require ( './support/isBuffer' ) ;
function objectToString ( o ) {
return Object . prototype . toString . call ( o ) ;
}
function pad ( n ) {
return n < 10 ? '0' + n . toString ( 10 ) : n . toString ( 10 ) ;
}
var months = [ 'Jan' , 'Feb' , 'Mar' , 'Apr' , 'May' , 'Jun' , 'Jul' , 'Aug' , 'Sep' ,
'Oct' , 'Nov' , 'Dec' ] ;
// 26 Feb 16:19:34
function timestamp ( ) {
var d = new Date ( ) ;
var time = [ pad ( d . getHours ( ) ) ,
pad ( d . getMinutes ( ) ) ,
pad ( d . getSeconds ( ) ) ] . join ( ':' ) ;
return [ d . getDate ( ) , months [ d . getMonth ( ) ] , time ] . join ( ' ' ) ;
}
// log is just a thin wrapper to console.log that prepends a timestamp
exports . log = function ( ) {
console . log ( '%s - %s' , timestamp ( ) , exports . format . apply ( exports , arguments ) ) ;
} ;
/ * *
* Inherit the prototype methods from one constructor into another .
*
* The Function . prototype . inherits from lang . js rewritten as a standalone
* function ( not on Function . prototype ) . NOTE : If this file is to be loaded
* during bootstrapping this function needs to be rewritten using some native
* functions as prototype setup using normal JavaScript does not work as
* expected during bootstrapping ( see mirror . js in r114903 ) .
*
* @ param { function } ctor Constructor function which needs to inherit the
* prototype .
* @ param { function } superCtor Constructor function to inherit prototype from .
* /
exports . inherits = require ( 'inherits' ) ;
exports . _extend = function ( origin , add ) {
// Don't do anything if add isn't an object
if ( ! add || ! isObject ( add ) ) return origin ;
var keys = Object . keys ( add ) ;
var i = keys . length ;
while ( i -- ) {
origin [ keys [ i ] ] = add [ keys [ i ] ] ;
}
return origin ;
} ;
function hasOwnProperty ( obj , prop ) {
return Object . prototype . hasOwnProperty . call ( obj , prop ) ;
}
} ) . call ( this , require ( '_process' ) , typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : { } )
} , { "./support/isBuffer" : 40 , "_process" : 37 , "inherits" : 39 } ] , 42 : [ function ( require , module , exports ) {
'use strict' ;
var leveljs = require ( 'level-js' ) ;
var db = typeof indexedDB === 'undefined' ? { open : function open ( _ , cb ) {
return cb ( true ) ;
} } : leveljs ( './tessdata2' ) ;
var langdata = require ( '../common/langdata.json' ) ;
module . exports = function getLanguageData ( req , res , cb ) {
var lang = req . options . lang ;
function saveDataFile ( data ) {
db . put ( lang , data , function ( err ) {
return console . log ( 'cached' , lang , err ) ;
} ) ;
cb ( data ) ;
}
db . open ( { compression : false } , function ( err ) {
if ( err ) return fetchLanguageData ( req , res , cb ) ;
db . get ( lang , function ( err , data ) {
if ( err ) return fetchLanguageData ( req , res , saveDataFile ) ;
res . progress ( { status : 'found in cache ' + lang + '.traineddata' } ) ;
cb ( data ) ;
} ) ;
} ) ;
} ;
var ungzip = require ( 'pako' ) . ungzip ;
function fetchLanguageData ( req , res , cb ) {
var lang = req . options . lang ;
var langfile = lang + '.traineddata.gz' ;
var url = req . workerOptions . langPath + langfile ;
var xhr = new XMLHttpRequest ( ) ;
xhr . open ( 'GET' , url , true ) ;
xhr . responseType = 'arraybuffer' ;
xhr . onerror = function ( e ) {
xhr . onprogress = xhr . onload = null ;
cb ( xhr , null ) ;
} ;
xhr . onprogress = function ( e ) {
return res . progress ( {
status : 'downloading ' + langfile ,
loaded : e . loaded ,
progress : Math . min ( 1 , e . loaded / langdata [ lang ] )
} ) ;
} ;
xhr . onload = function ( e ) {
if ( ! ( xhr . status == 200 || xhr . status == 0 && xhr . response ) ) return res . reject ( 'Error downloading language ' + url ) ;
res . progress ( { status : 'unzipping ' + langfile } ) ;
// in case the gzips are already ungzipped or extra gzipped
var response = new Uint8Array ( xhr . response ) ;
try {
while ( response [ 0 ] == 0x1f && response [ 1 ] == 0x8b ) {
response = ungzip ( response ) ;
}
} catch ( err ) {
return res . reject ( 'Error unzipping language file ' + langfile + '\n' + err . message ) ;
}
cb ( response ) ;
} ;
xhr . send ( ) ;
}
} , { "../common/langdata.json" : 46 , "level-js" : 12 , "pako" : 21 } ] , 43 : [ function ( require , module , exports ) {
( function ( process , global ) {
'use strict' ;
var workerUtils = require ( '../common/worker.js' ) ;
if ( process . env . NODE _ENV === "development" ) {
console . debug ( 'Using Development Worker' ) ;
}
global . addEventListener ( 'message' , function ( e ) {
var packet = e . data ;
workerUtils . dispatchHandlers ( packet , function ( obj ) {
return postMessage ( obj ) ;
} ) ;
} ) ;
exports . getCore = function ( req , res ) {
if ( ! global . TesseractCore ) {
res . progress ( { status : 'loading tesseract core' , progress : 0 } ) ;
importScripts ( req . workerOptions . corePath ) ;
res . progress ( { status : 'loading tesseract core' , progress : 1 } ) ;
}
return TesseractCore ;
} ;
exports . getLanguageData = require ( './lang.js' ) ;
workerUtils . setAdapter ( module . exports ) ;
} ) . call ( this , require ( '_process' ) , typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : { } )
} , { "../common/worker.js" : 47 , "./lang.js" : 42 , "_process" : 37 } ] , 44 : [ function ( require , module , exports ) {
'use strict' ;
// This converts an image to grayscale
module . exports = function desaturate ( image ) {
var width , height ;
if ( image . data ) {
var src = image . data ;
width = image . width , height = image . height ;
var dst = new Uint8Array ( width * height ) ;
var srcLength = src . length | 0 ,
srcLength _16 = srcLength - 16 | 0 ;
for ( var i = 0 , j = 0 ; i <= srcLength _16 ; i += 16 , j += 4 ) {
// convert to grayscale 4 pixels at a time; eveything with alpha gets put in front of 50% gray
dst [ j ] = ( src [ i ] * 77 + src [ i + 1 ] * 151 + src [ i + 2 ] * 28 ) * src [ i + 3 ] + ( 255 - src [ i + 3 ] << 15 ) + 32768 >> 16 ;
dst [ j + 1 ] = ( src [ i + 4 ] * 77 + src [ i + 5 ] * 151 + src [ i + 6 ] * 28 ) * src [ i + 7 ] + ( 255 - src [ i + 7 ] << 15 ) + 32768 >> 16 ;
dst [ j + 2 ] = ( src [ i + 8 ] * 77 + src [ i + 9 ] * 151 + src [ i + 10 ] * 28 ) * src [ i + 11 ] + ( 255 - src [ i + 11 ] << 15 ) + 32768 >> 16 ;
dst [ j + 3 ] = ( src [ i + 12 ] * 77 + src [ i + 13 ] * 151 + src [ i + 14 ] * 28 ) * src [ i + 15 ] + ( 255 - src [ i + 15 ] << 15 ) + 32768 >> 16 ;
}
for ( ; i < srcLength ; i += 4 , ++ j ) {
//finish up
dst [ j ] = ( src [ i ] * 77 + src [ i + 1 ] * 151 + src [ i + 2 ] * 28 ) * src [ i + 3 ] + ( 255 - src [ i + 3 ] << 15 ) + 32768 >> 16 ;
} image = dst ;
} else {
throw 'Invalid ImageData' ;
}
return image ;
} ;
} , { } ] , 45 : [ function ( require , module , exports ) {
'use strict' ;
module . exports = function DumpLiterallyEverything ( Module , base ) {
var ri = base . GetIterator ( ) ;
var blocks = [ ] ;
var block , para , textline , word , symbol ;
function enumToString ( value , prefix ) {
return Object . keys ( Module ) . filter ( function ( e ) {
return e . substr ( 0 , prefix . length + 1 ) == prefix + '_' ;
} ) . filter ( function ( e ) {
return Module [ e ] === value ;
} ) . map ( function ( e ) {
return e . slice ( prefix . length + 1 ) ;
} ) [ 0 ] ;
}
ri . Begin ( ) ;
do {
if ( ri . IsAtBeginningOf ( Module . RIL _BLOCK ) ) {
var poly = ri . BlockPolygon ( ) ;
var polygon = null ;
// BlockPolygon() returns null when automatic page segmentation is off
if ( Module . getPointer ( poly ) > 0 ) {
var n = poly . get _n ( ) ,
px = poly . get _x ( ) ,
py = poly . get _y ( ) ,
polygon = [ ] ;
for ( var i = 0 ; i < n ; i ++ ) {
polygon . push ( [ px . getValue ( i ) , py . getValue ( i ) ] ) ;
}
Module . _ptaDestroy ( Module . getPointer ( poly ) ) ;
}
block = {
paragraphs : [ ] ,
text : ri . GetUTF8Text ( Module . RIL _BLOCK ) ,
confidence : ri . Confidence ( Module . RIL _BLOCK ) ,
baseline : ri . getBaseline ( Module . RIL _BLOCK ) ,
bbox : ri . getBoundingBox ( Module . RIL _BLOCK ) ,
blocktype : enumToString ( ri . BlockType ( ) , 'PT' ) ,
polygon : polygon
} ;
blocks . push ( block ) ;
}
if ( ri . IsAtBeginningOf ( Module . RIL _PARA ) ) {
para = {
lines : [ ] ,
text : ri . GetUTF8Text ( Module . RIL _PARA ) ,
confidence : ri . Confidence ( Module . RIL _PARA ) ,
baseline : ri . getBaseline ( Module . RIL _PARA ) ,
bbox : ri . getBoundingBox ( Module . RIL _PARA ) ,
is _ltr : ! ! ri . ParagraphIsLtr ( )
} ;
block . paragraphs . push ( para ) ;
}
if ( ri . IsAtBeginningOf ( Module . RIL _TEXTLINE ) ) {
textline = {
words : [ ] ,
text : ri . GetUTF8Text ( Module . RIL _TEXTLINE ) ,
confidence : ri . Confidence ( Module . RIL _TEXTLINE ) ,
baseline : ri . getBaseline ( Module . RIL _TEXTLINE ) ,
bbox : ri . getBoundingBox ( Module . RIL _TEXTLINE )
} ;
para . lines . push ( textline ) ;
}
if ( ri . IsAtBeginningOf ( Module . RIL _WORD ) ) {
var fontInfo = ri . getWordFontAttributes ( ) ,
wordDir = ri . WordDirection ( ) ;
word = {
symbols : [ ] ,
choices : [ ] ,
text : ri . GetUTF8Text ( Module . RIL _WORD ) ,
confidence : ri . Confidence ( Module . RIL _WORD ) ,
baseline : ri . getBaseline ( Module . RIL _WORD ) ,
bbox : ri . getBoundingBox ( Module . RIL _WORD ) ,
is _numeric : ! ! ri . WordIsNumeric ( ) ,
in _dictionary : ! ! ri . WordIsFromDictionary ( ) ,
direction : enumToString ( wordDir , 'DIR' ) ,
language : ri . WordRecognitionLanguage ( ) ,
is _bold : fontInfo . is _bold ,
is _italic : fontInfo . is _italic ,
is _underlined : fontInfo . is _underlined ,
is _monospace : fontInfo . is _monospace ,
is _serif : fontInfo . is _serif ,
is _smallcaps : fontInfo . is _smallcaps ,
font _size : fontInfo . pointsize ,
font _id : fontInfo . font _id ,
font _name : fontInfo . font _name
} ;
var wc = new Module . WordChoiceIterator ( ri ) ;
do {
word . choices . push ( {
text : wc . GetUTF8Text ( ) ,
confidence : wc . Confidence ( )
} ) ;
} while ( wc . Next ( ) ) ;
Module . destroy ( wc ) ;
textline . words . push ( word ) ;
}
var image = null ;
// var pix = ri.GetBinaryImage(Module.RIL_SYMBOL)
// var image = pix2array(pix);
// // for some reason it seems that things stop working if you destroy pics
// Module._pixDestroy(Module.getPointer(pix));
if ( ri . IsAtBeginningOf ( Module . RIL _SYMBOL ) ) {
symbol = {
choices : [ ] ,
image : image ,
text : ri . GetUTF8Text ( Module . RIL _SYMBOL ) ,
confidence : ri . Confidence ( Module . RIL _SYMBOL ) ,
baseline : ri . getBaseline ( Module . RIL _SYMBOL ) ,
bbox : ri . getBoundingBox ( Module . RIL _SYMBOL ) ,
is _superscript : ! ! ri . SymbolIsSuperscript ( ) ,
is _subscript : ! ! ri . SymbolIsSubscript ( ) ,
is _dropcap : ! ! ri . SymbolIsDropcap ( )
} ;
word . symbols . push ( symbol ) ;
var ci = new Module . ChoiceIterator ( ri ) ;
do {
symbol . choices . push ( {
text : ci . GetUTF8Text ( ) ,
confidence : ci . Confidence ( )
} ) ;
} while ( ci . Next ( ) ) ;
Module . destroy ( ci ) ;
}
} while ( ri . Next ( Module . RIL _SYMBOL ) ) ;
Module . destroy ( ri ) ;
return {
text : base . GetUTF8Text ( ) ,
html : deindent ( base . GetHOCRText ( ) ) ,
confidence : base . MeanTextConf ( ) ,
blocks : blocks ,
psm : enumToString ( base . GetPageSegMode ( ) , 'PSM' ) ,
oem : enumToString ( base . oem ( ) , 'OEM' ) ,
version : base . Version ( )
} ;
} ;
// the generated HOCR is excessively indented, so
// we get rid of that indentation
function deindent ( html ) {
var lines = html . split ( '\n' ) ;
if ( lines [ 0 ] . substring ( 0 , 2 ) === " " ) {
for ( var i = 0 ; i < lines . length ; i ++ ) {
if ( lines [ i ] . substring ( 0 , 2 ) === " " ) {
lines [ i ] = lines [ i ] . slice ( 2 ) ;
}
} ;
}
return lines . join ( '\n' ) ;
}
} , { } ] , 46 : [ function ( require , module , exports ) {
module . exports = { "afr" : 1079573 , "ara" : 1701536 , "aze" : 1420865 , "bel" : 1276820 , "ben" : 6772012 , "bul" : 1605615 , "cat" : 1652368 , "ces" : 1035441 , "chi_sim" : 17710414 , "chi_tra" : 24717749 , "chr" : 320649 , "dan-frak" : 677656 , "dan" : 1972936 , "deu-frak" : 822644 , "deu" : 991656 , "ell" : 859719 , "eng" : 9453554 , "enm" : 619254 , "epo" : 1241212 , "equ" : 821130 , "est" : 1905040 , "eus" : 1641190 , "fin" : 979418 , "fra" : 1376221 , "frk" : 5912963 , "frm" : 5147082 , "glg" : 1674938 , "grc" : 3012615 , "heb" : 1051501 , "hin" : 6590065 , "hrv" : 1926995 , "hun" : 3074473 , "ind" : 1874776 , "isl" : 1634041 , "ita" : 948593 , "ita_old" : 3436571 , "jpn" : 13507168 , "kan" : 4390317 , "kor" : 5353098 , "lav" : 1843944 , "lit" : 1779240 , "mal" : 5966263 , "meme" : 88453 , "mkd" : 1163087 , "mlt" : 1463001 , "msa" : 1665427 , "nld" : 1134708 , "nor" : 2191610 , "osd" : 4274649 , "pol" : 7024662 , "por" : 909359 , "ron" : 915680 , "rus" : 5969957 , "slk-frak" : 289885 , "slk" : 2217342 , "slv" : 1611338 , "spa" : 883170 , "spa_old" : 5647453 , "sqi" : 1667041 , "srp" : 1770244 , "swa" : 757916 , "swe" : 2451917 , "tam" : 3498763 , "tel" : 5795246 , "tgl" : 1496256 , "tha" : 3811136 , "tur" : 3563264 , "ukr" : 937566 , "vie" : 2195922 }
} , { } ] , 47 : [ function ( require , module , exports ) {
'use strict' ;
var latestJob ;
var Module ;
var base ;
var adapter = { } ;
function dispatchHandlers ( packet , send ) {
function respond ( status , data ) {
send ( {
jobId : packet . jobId ,
status : status ,
action : packet . action ,
data : data
} ) ;
}
respond . resolve = respond . bind ( this , 'resolve' ) ;
respond . reject = respond . bind ( this , 'reject' ) ;
respond . progress = respond . bind ( this , 'progress' ) ;
latestJob = respond ;
try {
if ( packet . action === 'recognize' ) {
handleRecognize ( packet . payload , respond ) ;
} else if ( packet . action === 'detect' ) {
handleDetect ( packet . payload , respond ) ;
}
} catch ( err ) {
respond . reject ( err ) ;
}
}
exports . dispatchHandlers = dispatchHandlers ;
exports . setAdapter = function setAdapter ( impl ) {
adapter = impl ;
} ;
function handleInit ( req , res ) {
var MIN _MEMORY = 100663296 ;
if ( [ 'chi_sim' , 'chi_tra' , 'jpn' ] . indexOf ( req . options . lang ) != - 1 ) {
MIN _MEMORY = 167772160 ;
}
if ( ! Module || Module . TOTAL _MEMORY < MIN _MEMORY ) {
var Core = adapter . getCore ( req , res ) ;
res . progress ( { status : 'initializing tesseract' , progress : 0 } ) ;
Module = Core ( {
TOTAL _MEMORY : MIN _MEMORY ,
TesseractProgress : function TesseractProgress ( percent ) {
latestJob . progress ( { status : 'recognizing text' , progress : Math . max ( 0 , ( percent - 30 ) / 70 ) } ) ;
} ,
onRuntimeInitialized : function onRuntimeInitialized ( ) { }
} ) ;
Module . FS _createPath ( "/" , "tessdata" , true , true ) ;
base = new Module . TessBaseAPI ( ) ;
res . progress ( { status : 'initializing tesseract' , progress : 1 } ) ;
}
}
var dump = require ( './dump.js' ) ;
var desaturate = require ( './desaturate.js' ) ;
function setImage ( Module , base , image ) {
var imgbin = desaturate ( image ) ,
width = image . width ,
height = image . height ;
var ptr = Module . allocate ( imgbin , 'i8' , Module . ALLOC _NORMAL ) ;
base . SetImage ( Module . wrapPointer ( ptr ) , width , height , 1 , width ) ;
base . SetRectangle ( 0 , 0 , width , height ) ;
return ptr ;
}
function loadLanguage ( req , res , cb ) {
var lang = req . options . lang ;
if ( ! Module . _loadedLanguages ) Module . _loadedLanguages = { } ;
if ( lang in Module . _loadedLanguages ) return cb ( ) ;
adapter . getLanguageData ( req , res , function ( data ) {
Module . FS _createDataFile ( 'tessdata' , lang + ".traineddata" , data , true , false ) ;
res . progress ( { status : 'loading ' + lang + '.traineddata' , progress : 1 } ) ;
Module . _loadedLanguages [ lang ] = true ;
cb ( ) ;
} ) ;
}
function handleRecognize ( req , res ) {
handleInit ( req , res ) ;
loadLanguage ( req , res , function ( ) {
var lang = req . options . lang ;
res . progress ( { status : 'initializing api' , progress : 0 } ) ;
base . Init ( null , lang ) ;
res . progress ( { status : 'initializing api' , progress : 0.3 } ) ;
var options = req . options ;
for ( var option in options ) {
if ( options . hasOwnProperty ( option ) ) {
base . SetVariable ( option , options [ option ] ) ;
}
}
res . progress ( { status : 'initializing api' , progress : 0.6 } ) ;
var ptr = setImage ( Module , base , req . image ) ;
res . progress ( { status : 'initializing api' , progress : 1 } ) ;
base . Recognize ( null ) ;
var result = dump ( Module , base ) ;
base . End ( ) ;
Module . _free ( ptr ) ;
res . resolve ( result ) ;
} ) ;
}
function handleDetect ( req , res ) {
handleInit ( req , res ) ;
req . options . lang = 'osd' ;
loadLanguage ( req , res , function ( ) {
base . Init ( null , 'osd' ) ;
base . SetPageSegMode ( Module . PSM _OSD _ONLY ) ;
var ptr = setImage ( Module , base , req . image ) ;
var results = new Module . OSResults ( ) ;
var success = base . DetectOS ( results ) ;
if ( ! success ) {
base . End ( ) ;
Module . _free ( ptr ) ;
res . reject ( "failed to detect os" ) ;
} else {
var charset = results . get _unicharset ( ) ;
var best = results . get _best _result ( ) ;
var oid = best . get _orientation _id ( ) ,
sid = best . get _script _id ( ) ;
var result = {
tesseract _script _id : sid ,
script : charset . get _script _from _script _id ( sid ) ,
script _confidence : best . get _sconfidence ( ) ,
orientation _degrees : [ 0 , 270 , 180 , 90 ] [ oid ] ,
orientation _confidence : best . get _oconfidence ( )
} ;
base . End ( ) ;
Module . _free ( ptr ) ;
res . resolve ( result ) ;
}
} ) ;
}
} , { "./desaturate.js" : 44 , "./dump.js" : 45 } ] } , { } , [ 43 ] ) ;