summaryrefslogtreecommitdiff
path: root/node_modules/yauzl
diff options
context:
space:
mode:
authorLinuxWizard42 <computerwizard@linuxmail.org>2022-10-12 23:08:57 +0300
committerLinuxWizard42 <computerwizard@linuxmail.org>2022-10-12 23:08:57 +0300
commit726b81b19251674e149ccfbb1abacbd837fc6db0 (patch)
treefbdbb227dc01357eb76e8222d76185bc124c5ca6 /node_modules/yauzl
parent34f0890e175698940d49238097579f44e4d78c89 (diff)
downloadFlashRunner-726b81b19251674e149ccfbb1abacbd837fc6db0.tar.gz
FlashRunner-726b81b19251674e149ccfbb1abacbd837fc6db0.tar.zst
Removed files that should not have been included in git
Diffstat (limited to 'node_modules/yauzl')
-rw-r--r--node_modules/yauzl/LICENSE21
-rw-r--r--node_modules/yauzl/README.md658
-rw-r--r--node_modules/yauzl/index.js796
-rw-r--r--node_modules/yauzl/package.json40
4 files changed, 0 insertions, 1515 deletions
diff --git a/node_modules/yauzl/LICENSE b/node_modules/yauzl/LICENSE
deleted file mode 100644
index 37538d4..0000000
--- a/node_modules/yauzl/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Josh Wolfe
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/node_modules/yauzl/README.md b/node_modules/yauzl/README.md
deleted file mode 100644
index d4e53f4..0000000
--- a/node_modules/yauzl/README.md
+++ /dev/null
@@ -1,658 +0,0 @@
-# yauzl
-
-[![Build Status](https://travis-ci.org/thejoshwolfe/yauzl.svg?branch=master)](https://travis-ci.org/thejoshwolfe/yauzl)
-[![Coverage Status](https://img.shields.io/coveralls/thejoshwolfe/yauzl.svg)](https://coveralls.io/r/thejoshwolfe/yauzl)
-
-yet another unzip library for node. For zipping, see
-[yazl](https://github.com/thejoshwolfe/yazl).
-
-Design principles:
-
- * Follow the spec.
- Don't scan for local file headers.
- Read the central directory for file metadata.
- (see [No Streaming Unzip API](#no-streaming-unzip-api)).
- * Don't block the JavaScript thread.
- Use and provide async APIs.
- * Keep memory usage under control.
- Don't attempt to buffer entire files in RAM at once.
- * Never crash (if used properly).
- Don't let malformed zip files bring down client applications who are trying to catch errors.
- * Catch unsafe file names.
- See `validateFileName()`.
-
-## Usage
-
-```js
-var yauzl = require("yauzl");
-
-yauzl.open("path/to/file.zip", {lazyEntries: true}, function(err, zipfile) {
- if (err) throw err;
- zipfile.readEntry();
- zipfile.on("entry", function(entry) {
- if (/\/$/.test(entry.fileName)) {
- // Directory file names end with '/'.
- // Note that entires for directories themselves are optional.
- // An entry's fileName implicitly requires its parent directories to exist.
- zipfile.readEntry();
- } else {
- // file entry
- zipfile.openReadStream(entry, function(err, readStream) {
- if (err) throw err;
- readStream.on("end", function() {
- zipfile.readEntry();
- });
- readStream.pipe(somewhere);
- });
- }
- });
-});
-```
-
-See also `examples/` for more usage examples.
-
-## API
-
-The default for every optional `callback` parameter is:
-
-```js
-function defaultCallback(err) {
- if (err) throw err;
-}
-```
-
-### open(path, [options], [callback])
-
-Calls `fs.open(path, "r")` and reads the `fd` effectively the same as `fromFd()` would.
-
-`options` may be omitted or `null`. The defaults are `{autoClose: true, lazyEntries: false, decodeStrings: true, validateEntrySizes: true, strictFileNames: false}`.
-
-`autoClose` is effectively equivalent to:
-
-```js
-zipfile.once("end", function() {
- zipfile.close();
-});
-```
-
-`lazyEntries` indicates that entries should be read only when `readEntry()` is called.
-If `lazyEntries` is `false`, `entry` events will be emitted as fast as possible to allow `pipe()`ing
-file data from all entries in parallel.
-This is not recommended, as it can lead to out of control memory usage for zip files with many entries.
-See [issue #22](https://github.com/thejoshwolfe/yauzl/issues/22).
-If `lazyEntries` is `true`, an `entry` or `end` event will be emitted in response to each call to `readEntry()`.
-This allows processing of one entry at a time, and will keep memory usage under control for zip files with many entries.
-
-`decodeStrings` is the default and causes yauzl to decode strings with `CP437` or `UTF-8` as required by the spec.
-The exact effects of turning this option off are:
-
-* `zipfile.comment`, `entry.fileName`, and `entry.fileComment` will be `Buffer` objects instead of `String`s.
-* Any Info-ZIP Unicode Path Extra Field will be ignored. See `extraFields`.
-* Automatic file name validation will not be performed. See `validateFileName()`.
-
-`validateEntrySizes` is the default and ensures that an entry's reported uncompressed size matches its actual uncompressed size.
-This check happens as early as possible, which is either before emitting each `"entry"` event (for entries with no compression),
-or during the `readStream` piping after calling `openReadStream()`.
-See `openReadStream()` for more information on defending against zip bomb attacks.
-
-When `strictFileNames` is `false` (the default) and `decodeStrings` is `true`,
-all backslash (`\`) characters in each `entry.fileName` are replaced with forward slashes (`/`).
-The spec forbids file names with backslashes,
-but Microsoft's `System.IO.Compression.ZipFile` class in .NET versions 4.5.0 until 4.6.1
-creates non-conformant zipfiles with backslashes in file names.
-`strictFileNames` is `false` by default so that clients can read these
-non-conformant zipfiles without knowing about this Microsoft-specific bug.
-When `strictFileNames` is `true` and `decodeStrings` is `true`,
-entries with backslashes in their file names will result in an error. See `validateFileName()`.
-When `decodeStrings` is `false`, `strictFileNames` has no effect.
-
-The `callback` is given the arguments `(err, zipfile)`.
-An `err` is provided if the End of Central Directory Record cannot be found, or if its metadata appears malformed.
-This kind of error usually indicates that this is not a zip file.
-Otherwise, `zipfile` is an instance of `ZipFile`.
-
-### fromFd(fd, [options], [callback])
-
-Reads from the fd, which is presumed to be an open .zip file.
-Note that random access is required by the zip file specification,
-so the fd cannot be an open socket or any other fd that does not support random access.
-
-`options` may be omitted or `null`. The defaults are `{autoClose: false, lazyEntries: false, decodeStrings: true, validateEntrySizes: true, strictFileNames: false}`.
-
-See `open()` for the meaning of the options and callback.
-
-### fromBuffer(buffer, [options], [callback])
-
-Like `fromFd()`, but reads from a RAM buffer instead of an open file.
-`buffer` is a `Buffer`.
-
-If a `ZipFile` is acquired from this method,
-it will never emit the `close` event,
-and calling `close()` is not necessary.
-
-`options` may be omitted or `null`. The defaults are `{lazyEntries: false, decodeStrings: true, validateEntrySizes: true, strictFileNames: false}`.
-
-See `open()` for the meaning of the options and callback.
-The `autoClose` option is ignored for this method.
-
-### fromRandomAccessReader(reader, totalSize, [options], [callback])
-
-This method of reading a zip file allows clients to implement their own back-end file system.
-For example, a client might translate read calls into network requests.
-
-The `reader` parameter must be of a type that is a subclass of
-[RandomAccessReader](#class-randomaccessreader) that implements the required methods.
-The `totalSize` is a Number and indicates the total file size of the zip file.
-
-`options` may be omitted or `null`. The defaults are `{autoClose: true, lazyEntries: false, decodeStrings: true, validateEntrySizes: true, strictFileNames: false}`.
-
-See `open()` for the meaning of the options and callback.
-
-### dosDateTimeToDate(date, time)
-
-Converts MS-DOS `date` and `time` data into a JavaScript `Date` object.
-Each parameter is a `Number` treated as an unsigned 16-bit integer.
-Note that this format does not support timezones,
-so the returned object will use the local timezone.
-
-### validateFileName(fileName)
-
-Returns `null` or a `String` error message depending on the validity of `fileName`.
-If `fileName` starts with `"/"` or `/[A-Za-z]:\//` or if it contains `".."` path segments or `"\\"`,
-this function returns an error message appropriate for use like this:
-
-```js
-var errorMessage = yauzl.validateFileName(fileName);
-if (errorMessage != null) throw new Error(errorMessage);
-```
-
-This function is automatically run for each entry, as long as `decodeStrings` is `true`.
-See `open()`, `strictFileNames`, and `Event: "entry"` for more information.
-
-### Class: ZipFile
-
-The constructor for the class is not part of the public API.
-Use `open()`, `fromFd()`, `fromBuffer()`, or `fromRandomAccessReader()` instead.
-
-#### Event: "entry"
-
-Callback gets `(entry)`, which is an `Entry`.
-See `open()` and `readEntry()` for when this event is emitted.
-
-If `decodeStrings` is `true`, entries emitted via this event have already passed file name validation.
-See `validateFileName()` and `open()` for more information.
-
-If `validateEntrySizes` is `true` and this entry's `compressionMethod` is `0` (stored without compression),
-this entry has already passed entry size validation.
-See `open()` for more information.
-
-#### Event: "end"
-
-Emitted after the last `entry` event has been emitted.
-See `open()` and `readEntry()` for more info on when this event is emitted.
-
-#### Event: "close"
-
-Emitted after the fd is actually closed.
-This is after calling `close()` (or after the `end` event when `autoClose` is `true`),
-and after all stream pipelines created from `openReadStream()` have finished reading data from the fd.
-
-If this `ZipFile` was acquired from `fromRandomAccessReader()`,
-the "fd" in the previous paragraph refers to the `RandomAccessReader` implemented by the client.
-
-If this `ZipFile` was acquired from `fromBuffer()`, this event is never emitted.
-
-#### Event: "error"
-
-Emitted in the case of errors with reading the zip file.
-(Note that other errors can be emitted from the streams created from `openReadStream()` as well.)
-After this event has been emitted, no further `entry`, `end`, or `error` events will be emitted,
-but the `close` event may still be emitted.
-
-#### readEntry()
-
-Causes this `ZipFile` to emit an `entry` or `end` event (or an `error` event).
-This method must only be called when this `ZipFile` was created with the `lazyEntries` option set to `true` (see `open()`).
-When this `ZipFile` was created with the `lazyEntries` option set to `true`,
-`entry` and `end` events are only ever emitted in response to this method call.
-
-The event that is emitted in response to this method will not be emitted until after this method has returned,
-so it is safe to call this method before attaching event listeners.
-
-After calling this method, calling this method again before the response event has been emitted will cause undefined behavior.
-Calling this method after the `end` event has been emitted will cause undefined behavior.
-Calling this method after calling `close()` will cause undefined behavior.
-
-#### openReadStream(entry, [options], callback)
-
-`entry` must be an `Entry` object from this `ZipFile`.
-`callback` gets `(err, readStream)`, where `readStream` is a `Readable Stream` that provides the file data for this entry.
-If this zipfile is already closed (see `close()`), the `callback` will receive an `err`.
-
-`options` may be omitted or `null`, and has the following defaults:
-
-```js
-{
- decompress: entry.isCompressed() ? true : null,
- decrypt: null,
- start: 0, // actually the default is null, see below
- end: entry.compressedSize, // actually the default is null, see below
-}
-```
-
-If the entry is compressed (with a supported compression method),
-and the `decompress` option is `true` (or omitted),
-the read stream provides the decompressed data.
-Omitting the `decompress` option is what most clients should do.
-
-The `decompress` option must be `null` (or omitted) when the entry is not compressed (see `isCompressed()`),
-and either `true` (or omitted) or `false` when the entry is compressed.
-Specifying `decompress: false` for a compressed entry causes the read stream
-to provide the raw compressed file data without going through a zlib inflate transform.
-
-If the entry is encrypted (see `isEncrypted()`), clients may want to avoid calling `openReadStream()` on the entry entirely.
-Alternatively, clients may call `openReadStream()` for encrypted entries and specify `decrypt: false`.
-If the entry is also compressed, clients must *also* specify `decompress: false`.
-Specifying `decrypt: false` for an encrypted entry causes the read stream to provide the raw, still-encrypted file data.
-(This data includes the 12-byte header described in the spec.)
-
-The `decrypt` option must be `null` (or omitted) for non-encrypted entries, and `false` for encrypted entries.
-Omitting the `decrypt` option (or specifying it as `null`) for an encrypted entry
-will result in the `callback` receiving an `err`.
-This default behavior is so that clients not accounting for encrypted files aren't surprised by bogus file data.
-
-The `start` (inclusive) and `end` (exclusive) options are byte offsets into this entry's file data,
-and can be used to obtain part of an entry's file data rather than the whole thing.
-If either of these options are specified and non-`null`,
-then the above options must be used to obain the file's raw data.
-Speficying `{start: 0, end: entry.compressedSize}` will result in the complete file,
-which is effectively the default values for these options,
-but note that unlike omitting the options, when you specify `start` or `end` as any non-`null` value,
-the above requirement is still enforced that you must also pass the appropriate options to get the file's raw data.
-
-It's possible for the `readStream` provided to the `callback` to emit errors for several reasons.
-For example, if zlib cannot decompress the data, the zlib error will be emitted from the `readStream`.
-Two more error cases (when `validateEntrySizes` is `true`) are if the decompressed data has too many
-or too few actual bytes compared to the reported byte count from the entry's `uncompressedSize` field.
-yauzl notices this false information and emits an error from the `readStream`
-after some number of bytes have already been piped through the stream.
-
-This check allows clients to trust the `uncompressedSize` field in `Entry` objects.
-Guarding against [zip bomb](http://en.wikipedia.org/wiki/Zip_bomb) attacks can be accomplished by
-doing some heuristic checks on the size metadata and then watching out for the above errors.
-Such heuristics are outside the scope of this library,
-but enforcing the `uncompressedSize` is implemented here as a security feature.
-
-It is possible to destroy the `readStream` before it has piped all of its data.
-To do this, call `readStream.destroy()`.
-You must `unpipe()` the `readStream` from any destination before calling `readStream.destroy()`.
-If this zipfile was created using `fromRandomAccessReader()`, the `RandomAccessReader` implementation
-must provide readable streams that implement a `.destroy()` method (see `randomAccessReader._readStreamForRange()`)
-in order for calls to `readStream.destroy()` to work in this context.
-
-#### close()
-
-Causes all future calls to `openReadStream()` to fail,
-and closes the fd, if any, after all streams created by `openReadStream()` have emitted their `end` events.
-
-If the `autoClose` option is set to `true` (see `open()`),
-this function will be called automatically effectively in response to this object's `end` event.
-
-If the `lazyEntries` option is set to `false` (see `open()`) and this object's `end` event has not been emitted yet,
-this function causes undefined behavior.
-If the `lazyEntries` option is set to `true`,
-you can call this function instead of calling `readEntry()` to abort reading the entries of a zipfile.
-
-It is safe to call this function multiple times; after the first call, successive calls have no effect.
-This includes situations where the `autoClose` option effectively calls this function for you.
-
-If `close()` is never called, then the zipfile is "kept open".
-For zipfiles created with `fromFd()`, this will leave the `fd` open, which may be desirable.
-For zipfiles created with `open()`, this will leave the underlying `fd` open, thereby "leaking" it, which is probably undesirable.
-For zipfiles created with `fromRandomAccessReader()`, the reader's `close()` method will never be called.
-For zipfiles created with `fromBuffer()`, the `close()` function has no effect whether called or not.
-
-Regardless of how this `ZipFile` was created, there are no resources other than those listed above that require cleanup from this function.
-This means it may be desirable to never call `close()` in some usecases.
-
-#### isOpen
-
-`Boolean`. `true` until `close()` is called; then it's `false`.
-
-#### entryCount
-
-`Number`. Total number of central directory records.
-
-#### comment
-
-`String`. Always decoded with `CP437` per the spec.
-
-If `decodeStrings` is `false` (see `open()`), this field is the undecoded `Buffer` instead of a decoded `String`.
-
-### Class: Entry
-
-Objects of this class represent Central Directory Records.
-Refer to the zipfile specification for more details about these fields.
-
-These fields are of type `Number`:
-
- * `versionMadeBy`
- * `versionNeededToExtract`
- * `generalPurposeBitFlag`
- * `compressionMethod`
- * `lastModFileTime` (MS-DOS format, see `getLastModDateTime`)
- * `lastModFileDate` (MS-DOS format, see `getLastModDateTime`)
- * `crc32`
- * `compressedSize`
- * `uncompressedSize`
- * `fileNameLength` (bytes)
- * `extraFieldLength` (bytes)
- * `fileCommentLength` (bytes)
- * `internalFileAttributes`
- * `externalFileAttributes`
- * `relativeOffsetOfLocalHeader`
-
-#### fileName
-
-`String`.
-Following the spec, the bytes for the file name are decoded with
-`UTF-8` if `generalPurposeBitFlag & 0x800`, otherwise with `CP437`.
-Alternatively, this field may be populated from the Info-ZIP Unicode Path Extra Field
-(see `extraFields`).
-
-This field is automatically validated by `validateFileName()` before yauzl emits an "entry" event.
-If this field would contain unsafe characters, yauzl emits an error instead of an entry.
-
-If `decodeStrings` is `false` (see `open()`), this field is the undecoded `Buffer` instead of a decoded `String`.
-Therefore, `generalPurposeBitFlag` and any Info-ZIP Unicode Path Extra Field are ignored.
-Furthermore, no automatic file name validation is performed for this file name.
-
-#### extraFields
-
-`Array` with each entry in the form `{id: id, data: data}`,
-where `id` is a `Number` and `data` is a `Buffer`.
-
-This library looks for and reads the ZIP64 Extended Information Extra Field (0x0001)
-in order to support ZIP64 format zip files.
-
-This library also looks for and reads the Info-ZIP Unicode Path Extra Field (0x7075)
-in order to support some zipfiles that use it instead of General Purpose Bit 11
-to convey `UTF-8` file names.
-When the field is identified and verified to be reliable (see the zipfile spec),
-the the file name in this field is stored in the `fileName` property,
-and the file name in the central directory record for this entry is ignored.
-Note that when `decodeStrings` is false, all Info-ZIP Unicode Path Extra Fields are ignored.
-
-None of the other fields are considered significant by this library.
-Fields that this library reads are left unalterned in the `extraFields` array.
-
-#### fileComment
-
-`String` decoded with the charset indicated by `generalPurposeBitFlag & 0x800` as with the `fileName`.
-(The Info-ZIP Unicode Path Extra Field has no effect on the charset used for this field.)
-
-If `decodeStrings` is `false` (see `open()`), this field is the undecoded `Buffer` instead of a decoded `String`.
-
-Prior to yauzl version 2.7.0, this field was erroneously documented as `comment` instead of `fileComment`.
-For compatibility with any code that uses the field name `comment`,
-yauzl creates an alias field named `comment` which is identical to `fileComment`.
-
-#### getLastModDate()
-
-Effectively implemented as:
-
-```js
-return dosDateTimeToDate(this.lastModFileDate, this.lastModFileTime);
-```
-
-#### isEncrypted()
-
-Returns is this entry encrypted with "Traditional Encryption".
-Effectively implemented as:
-
-```js
-return (this.generalPurposeBitFlag & 0x1) !== 0;
-```
-
-See `openReadStream()` for the implications of this value.
-
-Note that "Strong Encryption" is not supported, and will result in an `"error"` event emitted from the `ZipFile`.
-
-#### isCompressed()
-
-Effectively implemented as:
-
-```js
-return this.compressionMethod === 8;
-```
-
-See `openReadStream()` for the implications of this value.
-
-### Class: RandomAccessReader
-
-This class is meant to be subclassed by clients and instantiated for the `fromRandomAccessReader()` function.
-
-An example implementation can be found in `test/test.js`.
-
-#### randomAccessReader._readStreamForRange(start, end)
-
-Subclasses *must* implement this method.
-
-`start` and `end` are Numbers and indicate byte offsets from the start of the file.
-`end` is exclusive, so `_readStreamForRange(0x1000, 0x2000)` would indicate to read `0x1000` bytes.
-`end - start` will always be at least `1`.
-
-This method should return a readable stream which will be `pipe()`ed into another stream.
-It is expected that the readable stream will provide data in several chunks if necessary.
-If the readable stream provides too many or too few bytes, an error will be emitted.
-(Note that `validateEntrySizes` has no effect on this check,
-because this is a low-level API that should behave correctly regardless of the contents of the file.)
-Any errors emitted on the readable stream will be handled and re-emitted on the client-visible stream
-(returned from `zipfile.openReadStream()`) or provided as the `err` argument to the appropriate callback
-(for example, for `fromRandomAccessReader()`).
-
-The returned stream *must* implement a method `.destroy()`
-if you call `readStream.destroy()` on streams you get from `openReadStream()`.
-If you never call `readStream.destroy()`, then streams returned from this method do not need to implement a method `.destroy()`.
-`.destroy()` should abort any streaming that is in progress and clean up any associated resources.
-`.destroy()` will only be called after the stream has been `unpipe()`d from its destination.
-
-Note that the stream returned from this method might not be the same object that is provided by `openReadStream()`.
-The stream returned from this method might be `pipe()`d through one or more filter streams (for example, a zlib inflate stream).
-
-#### randomAccessReader.read(buffer, offset, length, position, callback)
-
-Subclasses may implement this method.
-The default implementation uses `createReadStream()` to fill the `buffer`.
-
-This method should behave like `fs.read()`.
-
-#### randomAccessReader.close(callback)
-
-Subclasses may implement this method.
-The default implementation is effectively `setImmediate(callback);`.
-
-`callback` takes parameters `(err)`.
-
-This method is called once the all streams returned from `_readStreamForRange()` have ended,
-and no more `_readStreamForRange()` or `read()` requests will be issued to this object.
-
-## How to Avoid Crashing
-
-When a malformed zipfile is encountered, the default behavior is to crash (throw an exception).
-If you want to handle errors more gracefully than this,
-be sure to do the following:
-
- * Provide `callback` parameters where they are allowed, and check the `err` parameter.
- * Attach a listener for the `error` event on any `ZipFile` object you get from `open()`, `fromFd()`, `fromBuffer()`, or `fromRandomAccessReader()`.
- * Attach a listener for the `error` event on any stream you get from `openReadStream()`.
-
-Minor version updates to yauzl will not add any additional requirements to this list.
-
-## Limitations
-
-### No Streaming Unzip API
-
-Due to the design of the .zip file format, it's impossible to interpret a .zip file from start to finish
-(such as from a readable stream) without sacrificing correctness.
-The Central Directory, which is the authority on the contents of the .zip file, is at the end of a .zip file, not the beginning.
-A streaming API would need to either buffer the entire .zip file to get to the Central Directory before interpreting anything
-(defeating the purpose of a streaming interface), or rely on the Local File Headers which are interspersed through the .zip file.
-However, the Local File Headers are explicitly denounced in the spec as being unreliable copies of the Central Directory,
-so trusting them would be a violation of the spec.
-
-Any library that offers a streaming unzip API must make one of the above two compromises,
-which makes the library either dishonest or nonconformant (usually the latter).
-This library insists on correctness and adherence to the spec, and so does not offer a streaming API.
-
-Here is a way to create a spec-conformant .zip file using the `zip` command line program (Info-ZIP)
-available in most unix-like environments, that is (nearly) impossible to parse correctly with a streaming parser:
-
-```
-$ echo -ne '\x50\x4b\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' > file.txt
-$ zip -q0 - file.txt | cat > out.zip
-```
-
-This .zip file contains a single file entry that uses General Purpose Bit 3,
-which means the Local File Header doesn't know the size of the file.
-Any streaming parser that encounters this situation will either immediately fail,
-or attempt to search for the Data Descriptor after the file's contents.
-The file's contents is a sequence of 16-bytes crafted to exactly mimic a valid Data Descriptor for an empty file,
-which will fool any parser that gets this far into thinking that the file is empty rather than containing 16-bytes.
-What follows the file's real contents is the file's real Data Descriptor,
-which will likely cause some kind of signature mismatch error for a streaming parser (if one hasn't occurred already).
-
-By using General Purpose Bit 3 (and compression method 0),
-it's possible to create arbitrarily ambiguous .zip files that
-distract parsers with file contents that contain apparently valid .zip file metadata.
-
-### Limitted ZIP64 Support
-
-For ZIP64, only zip files smaller than `8PiB` are supported,
-not the full `16EiB` range that a 64-bit integer should be able to index.
-This is due to the JavaScript Number type being an IEEE 754 double precision float.
-
-The Node.js `fs` module probably has this same limitation.
-
-### ZIP64 Extensible Data Sector Is Ignored
-
-The spec does not allow zip file creators to put arbitrary data here,
-but rather reserves its use for PKWARE and mentions something about Z390.
-This doesn't seem useful to expose in this library, so it is ignored.
-
-### No Multi-Disk Archive Support
-
-This library does not support multi-disk zip files.
-The multi-disk fields in the zipfile spec were intended for a zip file to span multiple floppy disks,
-which probably never happens now.
-If the "number of this disk" field in the End of Central Directory Record is not `0`,
-the `open()`, `fromFd()`, `fromBuffer()`, or `fromRandomAccessReader()` `callback` will receive an `err`.
-By extension the following zip file fields are ignored by this library and not provided to clients:
-
- * Disk where central directory starts
- * Number of central directory records on this disk
- * Disk number where file starts
-
-### Limited Encryption Handling
-
-You can detect when a file entry is encrypted with "Traditional Encryption" via `isEncrypted()`,
-but yauzl will not help you decrypt it.
-See `openReadStream()`.
-
-If a zip file contains file entries encrypted with "Strong Encryption", yauzl emits an error.
-
-If the central directory is encrypted or compressed, yauzl emits an error.
-
-### Local File Headers Are Ignored
-
-Many unzip libraries mistakenly read the Local File Header data in zip files.
-This data is officially defined to be redundant with the Central Directory information,
-and is not to be trusted.
-Aside from checking the signature, yauzl ignores the content of the Local File Header.
-
-### No CRC-32 Checking
-
-This library provides the `crc32` field of `Entry` objects read from the Central Directory.
-However, this field is not used for anything in this library.
-
-### versionNeededToExtract Is Ignored
-
-The field `versionNeededToExtract` is ignored,
-because this library doesn't support the complete zip file spec at any version,
-
-### No Support For Obscure Compression Methods
-
-Regarding the `compressionMethod` field of `Entry` objects,
-only method `0` (stored with no compression)
-and method `8` (deflated) are supported.
-Any of the other 15 official methods will cause the `openReadStream()` `callback` to receive an `err`.
-
-### Data Descriptors Are Ignored
-
-There may or may not be Data Descriptor sections in a zip file.
-This library provides no support for finding or interpreting them.
-
-### Archive Extra Data Record Is Ignored
-
-There may or may not be an Archive Extra Data Record section in a zip file.
-This library provides no support for finding or interpreting it.
-
-### No Language Encoding Flag Support
-
-Zip files officially support charset encodings other than CP437 and UTF-8,
-but the zip file spec does not specify how it works.
-This library makes no attempt to interpret the Language Encoding Flag.
-
-## Change History
-
- * 2.10.0
- * Added support for non-conformant zipfiles created by Microsoft, and added option `strictFileNames` to disable the workaround. [issue #66](https://github.com/thejoshwolfe/yauzl/issues/66), [issue #88](https://github.com/thejoshwolfe/yauzl/issues/88)
- * 2.9.2
- * Removed `tools/hexdump-zip.js` and `tools/hex2bin.js`. Those tools are now located here: [thejoshwolfe/hexdump-zip](https://github.com/thejoshwolfe/hexdump-zip) and [thejoshwolfe/hex2bin](https://github.com/thejoshwolfe/hex2bin)
- * Worked around performance problem with zlib when using `fromBuffer()` and `readStream.destroy()` for large compressed files. [issue #87](https://github.com/thejoshwolfe/yauzl/issues/87)
- * 2.9.1
- * Removed `console.log()` accidentally introduced in 2.9.0. [issue #64](https://github.com/thejoshwolfe/yauzl/issues/64)
- * 2.9.0
- * Throw an exception if `readEntry()` is called without `lazyEntries:true`. Previously this caused undefined behavior. [issue #63](https://github.com/thejoshwolfe/yauzl/issues/63)
- * 2.8.0
- * Added option `validateEntrySizes`. [issue #53](https://github.com/thejoshwolfe/yauzl/issues/53)
- * Added `examples/promises.js`
- * Added ability to read raw file data via `decompress` and `decrypt` options. [issue #11](https://github.com/thejoshwolfe/yauzl/issues/11), [issue #38](https://github.com/thejoshwolfe/yauzl/issues/38), [pull #39](https://github.com/thejoshwolfe/yauzl/pull/39)
- * Added `start` and `end` options to `openReadStream()`. [issue #38](https://github.com/thejoshwolfe/yauzl/issues/38)
- * 2.7.0
- * Added option `decodeStrings`. [issue #42](https://github.com/thejoshwolfe/yauzl/issues/42)
- * Fixed documentation for `entry.fileComment` and added compatibility alias. [issue #47](https://github.com/thejoshwolfe/yauzl/issues/47)
- * 2.6.0
- * Support Info-ZIP Unicode Path Extra Field, used by WinRAR for Chinese file names. [issue #33](https://github.com/thejoshwolfe/yauzl/issues/33)
- * 2.5.0
- * Ignore malformed Extra Field that is common in Android .apk files. [issue #31](https://github.com/thejoshwolfe/yauzl/issues/31)
- * 2.4.3
- * Fix crash when parsing malformed Extra Field buffers. [issue #31](https://github.com/thejoshwolfe/yauzl/issues/31)
- * 2.4.2
- * Remove .npmignore and .travis.yml from npm package.
- * 2.4.1
- * Fix error handling.
- * 2.4.0
- * Add ZIP64 support. [issue #6](https://github.com/thejoshwolfe/yauzl/issues/6)
- * Add `lazyEntries` option. [issue #22](https://github.com/thejoshwolfe/yauzl/issues/22)
- * Add `readStream.destroy()` method. [issue #26](https://github.com/thejoshwolfe/yauzl/issues/26)
- * Add `fromRandomAccessReader()`. [issue #14](https://github.com/thejoshwolfe/yauzl/issues/14)
- * Add `examples/unzip.js`.
- * 2.3.1
- * Documentation updates.
- * 2.3.0
- * Check that `uncompressedSize` is correct, or else emit an error. [issue #13](https://github.com/thejoshwolfe/yauzl/issues/13)
- * 2.2.1
- * Update dependencies.
- * 2.2.0
- * Update dependencies.
- * 2.1.0
- * Remove dependency on `iconv`.
- * 2.0.3
- * Fix crash when trying to read a 0-byte file.
- * 2.0.2
- * Fix event behavior after errors.
- * 2.0.1
- * Fix bug with using `iconv`.
- * 2.0.0
- * Initial release.
diff --git a/node_modules/yauzl/index.js b/node_modules/yauzl/index.js
deleted file mode 100644
index cf5d70d..0000000
--- a/node_modules/yauzl/index.js
+++ /dev/null
@@ -1,796 +0,0 @@
-var fs = require("fs");
-var zlib = require("zlib");
-var fd_slicer = require("fd-slicer");
-var crc32 = require("buffer-crc32");
-var util = require("util");
-var EventEmitter = require("events").EventEmitter;
-var Transform = require("stream").Transform;
-var PassThrough = require("stream").PassThrough;
-var Writable = require("stream").Writable;
-
-exports.open = open;
-exports.fromFd = fromFd;
-exports.fromBuffer = fromBuffer;
-exports.fromRandomAccessReader = fromRandomAccessReader;
-exports.dosDateTimeToDate = dosDateTimeToDate;
-exports.validateFileName = validateFileName;
-exports.ZipFile = ZipFile;
-exports.Entry = Entry;
-exports.RandomAccessReader = RandomAccessReader;
-
-function open(path, options, callback) {
- if (typeof options === "function") {
- callback = options;
- options = null;
- }
- if (options == null) options = {};
- if (options.autoClose == null) options.autoClose = true;
- if (options.lazyEntries == null) options.lazyEntries = false;
- if (options.decodeStrings == null) options.decodeStrings = true;
- if (options.validateEntrySizes == null) options.validateEntrySizes = true;
- if (options.strictFileNames == null) options.strictFileNames = false;
- if (callback == null) callback = defaultCallback;
- fs.open(path, "r", function(err, fd) {
- if (err) return callback(err);
- fromFd(fd, options, function(err, zipfile) {
- if (err) fs.close(fd, defaultCallback);
- callback(err, zipfile);
- });
- });
-}
-
-function fromFd(fd, options, callback) {
- if (typeof options === "function") {
- callback = options;
- options = null;
- }
- if (options == null) options = {};
- if (options.autoClose == null) options.autoClose = false;
- if (options.lazyEntries == null) options.lazyEntries = false;
- if (options.decodeStrings == null) options.decodeStrings = true;
- if (options.validateEntrySizes == null) options.validateEntrySizes = true;
- if (options.strictFileNames == null) options.strictFileNames = false;
- if (callback == null) callback = defaultCallback;
- fs.fstat(fd, function(err, stats) {
- if (err) return callback(err);
- var reader = fd_slicer.createFromFd(fd, {autoClose: true});
- fromRandomAccessReader(reader, stats.size, options, callback);
- });
-}
-
-function fromBuffer(buffer, options, callback) {
- if (typeof options === "function") {
- callback = options;
- options = null;
- }
- if (options == null) options = {};
- options.autoClose = false;
- if (options.lazyEntries == null) options.lazyEntries = false;
- if (options.decodeStrings == null) options.decodeStrings = true;
- if (options.validateEntrySizes == null) options.validateEntrySizes = true;
- if (options.strictFileNames == null) options.strictFileNames = false;
- // limit the max chunk size. see https://github.com/thejoshwolfe/yauzl/issues/87
- var reader = fd_slicer.createFromBuffer(buffer, {maxChunkSize: 0x10000});
- fromRandomAccessReader(reader, buffer.length, options, callback);
-}
-
-function fromRandomAccessReader(reader, totalSize, options, callback) {
- if (typeof options === "function") {
- callback = options;
- options = null;
- }
- if (options == null) options = {};
- if (options.autoClose == null) options.autoClose = true;
- if (options.lazyEntries == null) options.lazyEntries = false;
- if (options.decodeStrings == null) options.decodeStrings = true;
- var decodeStrings = !!options.decodeStrings;
- if (options.validateEntrySizes == null) options.validateEntrySizes = true;
- if (options.strictFileNames == null) options.strictFileNames = false;
- if (callback == null) callback = defaultCallback;
- if (typeof totalSize !== "number") throw new Error("expected totalSize parameter to be a number");
- if (totalSize > Number.MAX_SAFE_INTEGER) {
- throw new Error("zip file too large. only file sizes up to 2^52 are supported due to JavaScript's Number type being an IEEE 754 double.");
- }
-
- // the matching unref() call is in zipfile.close()
- reader.ref();
-
- // eocdr means End of Central Directory Record.
- // search backwards for the eocdr signature.
- // the last field of the eocdr is a variable-length comment.
- // the comment size is encoded in a 2-byte field in the eocdr, which we can't find without trudging backwards through the comment to find it.
- // as a consequence of this design decision, it's possible to have ambiguous zip file metadata if a coherent eocdr was in the comment.
- // we search backwards for a eocdr signature, and hope that whoever made the zip file was smart enough to forbid the eocdr signature in the comment.
- var eocdrWithoutCommentSize = 22;
- var maxCommentSize = 0xffff; // 2-byte size
- var bufferSize = Math.min(eocdrWithoutCommentSize + maxCommentSize, totalSize);
- var buffer = newBuffer(bufferSize);
- var bufferReadStart = totalSize - buffer.length;
- readAndAssertNoEof(reader, buffer, 0, bufferSize, bufferReadStart, function(err) {
- if (err) return callback(err);
- for (var i = bufferSize - eocdrWithoutCommentSize; i >= 0; i -= 1) {
- if (buffer.readUInt32LE(i) !== 0x06054b50) continue;
- // found eocdr
- var eocdrBuffer = buffer.slice(i);
-
- // 0 - End of central directory signature = 0x06054b50
- // 4 - Number of this disk
- var diskNumber = eocdrBuffer.readUInt16LE(4);
- if (diskNumber !== 0) {
- return callback(new Error("multi-disk zip files are not supported: found disk number: " + diskNumber));
- }
- // 6 - Disk where central directory starts
- // 8 - Number of central directory records on this disk
- // 10 - Total number of central directory records
- var entryCount = eocdrBuffer.readUInt16LE(10);
- // 12 - Size of central directory (bytes)
- // 16 - Offset of start of central directory, relative to start of archive
- var centralDirectoryOffset = eocdrBuffer.readUInt32LE(16);
- // 20 - Comment length
- var commentLength = eocdrBuffer.readUInt16LE(20);
- var expectedCommentLength = eocdrBuffer.length - eocdrWithoutCommentSize;
- if (commentLength !== expectedCommentLength) {
- return callback(new Error("invalid comment length. expected: " + expectedCommentLength + ". found: " + commentLength));
- }
- // 22 - Comment
- // the encoding is always cp437.
- var comment = decodeStrings ? decodeBuffer(eocdrBuffer, 22, eocdrBuffer.length, false)
- : eocdrBuffer.slice(22);
-
- if (!(entryCount === 0xffff || centralDirectoryOffset === 0xffffffff)) {
- return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries, decodeStrings, options.validateEntrySizes, options.strictFileNames));
- }
-
- // ZIP64 format
-
- // ZIP64 Zip64 end of central directory locator
- var zip64EocdlBuffer = newBuffer(20);
- var zip64EocdlOffset = bufferReadStart + i - zip64EocdlBuffer.length;
- readAndAssertNoEof(reader, zip64EocdlBuffer, 0, zip64EocdlBuffer.length, zip64EocdlOffset, function(err) {
- if (err) return callback(err);
-
- // 0 - zip64 end of central dir locator signature = 0x07064b50
- if (zip64EocdlBuffer.readUInt32LE(0) !== 0x07064b50) {
- return callback(new Error("invalid zip64 end of central directory locator signature"));
- }
- // 4 - number of the disk with the start of the zip64 end of central directory
- // 8 - relative offset of the zip64 end of central directory record
- var zip64EocdrOffset = readUInt64LE(zip64EocdlBuffer, 8);
- // 16 - total number of disks
-
- // ZIP64 end of central directory record
- var zip64EocdrBuffer = newBuffer(56);
- readAndAssertNoEof(reader, zip64EocdrBuffer, 0, zip64EocdrBuffer.length, zip64EocdrOffset, function(err) {
- if (err) return callback(err);
-
- // 0 - zip64 end of central dir signature 4 bytes (0x06064b50)
- if (zip64EocdrBuffer.readUInt32LE(0) !== 0x06064b50) {
- return callback(new Error("invalid zip64 end of central directory record signature"));
- }
- // 4 - size of zip64 end of central directory record 8 bytes
- // 12 - version made by 2 bytes
- // 14 - version needed to extract 2 bytes
- // 16 - number of this disk 4 bytes
- // 20 - number of the disk with the start of the central directory 4 bytes
- // 24 - total number of entries in the central directory on this disk 8 bytes
- // 32 - total number of entries in the central directory 8 bytes
- entryCount = readUInt64LE(zip64EocdrBuffer, 32);
- // 40 - size of the central directory 8 bytes
- // 48 - offset of start of central directory with respect to the starting disk number 8 bytes
- centralDirectoryOffset = readUInt64LE(zip64EocdrBuffer, 48);
- // 56 - zip64 extensible data sector (variable size)
- return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries, decodeStrings, options.validateEntrySizes, options.strictFileNames));
- });
- });
- return;
- }
- callback(new Error("end of central directory record signature not found"));
- });
-}
-
-util.inherits(ZipFile, EventEmitter);
-function ZipFile(reader, centralDirectoryOffset, fileSize, entryCount, comment, autoClose, lazyEntries, decodeStrings, validateEntrySizes, strictFileNames) {
- var self = this;
- EventEmitter.call(self);
- self.reader = reader;
- // forward close events
- self.reader.on("error", function(err) {
- // error closing the fd
- emitError(self, err);
- });
- self.reader.once("close", function() {
- self.emit("close");
- });
- self.readEntryCursor = centralDirectoryOffset;
- self.fileSize = fileSize;
- self.entryCount = entryCount;
- self.comment = comment;
- self.entriesRead = 0;
- self.autoClose = !!autoClose;
- self.lazyEntries = !!lazyEntries;
- self.decodeStrings = !!decodeStrings;
- self.validateEntrySizes = !!validateEntrySizes;
- self.strictFileNames = !!strictFileNames;
- self.isOpen = true;
- self.emittedError = false;
-
- if (!self.lazyEntries) self._readEntry();
-}
-ZipFile.prototype.close = function() {
- if (!this.isOpen) return;
- this.isOpen = false;
- this.reader.unref();
-};
-
-function emitErrorAndAutoClose(self, err) {
- if (self.autoClose) self.close();
- emitError(self, err);
-}
-function emitError(self, err) {
- if (self.emittedError) return;
- self.emittedError = true;
- self.emit("error", err);
-}
-
-ZipFile.prototype.readEntry = function() {
- if (!this.lazyEntries) throw new Error("readEntry() called without lazyEntries:true");
- this._readEntry();
-};
-ZipFile.prototype._readEntry = function() {
- var self = this;
- if (self.entryCount === self.entriesRead) {
- // done with metadata
- setImmediate(function() {
- if (self.autoClose) self.close();
- if (self.emittedError) return;
- self.emit("end");
- });
- return;
- }
- if (self.emittedError) return;
- var buffer = newBuffer(46);
- readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) {
- if (err) return emitErrorAndAutoClose(self, err);
- if (self.emittedError) return;
- var entry = new Entry();
- // 0 - Central directory file header signature
- var signature = buffer.readUInt32LE(0);
- if (signature !== 0x02014b50) return emitErrorAndAutoClose(self, new Error("invalid central directory file header signature: 0x" + signature.toString(16)));
- // 4 - Version made by
- entry.versionMadeBy = buffer.readUInt16LE(4);
- // 6 - Version needed to extract (minimum)
- entry.versionNeededToExtract = buffer.readUInt16LE(6);
- // 8 - General purpose bit flag
- entry.generalPurposeBitFlag = buffer.readUInt16LE(8);
- // 10 - Compression method
- entry.compressionMethod = buffer.readUInt16LE(10);
- // 12 - File last modification time
- entry.lastModFileTime = buffer.readUInt16LE(12);
- // 14 - File last modification date
- entry.lastModFileDate = buffer.readUInt16LE(14);
- // 16 - CRC-32
- entry.crc32 = buffer.readUInt32LE(16);
- // 20 - Compressed size
- entry.compressedSize = buffer.readUInt32LE(20);
- // 24 - Uncompressed size
- entry.uncompressedSize = buffer.readUInt32LE(24);
- // 28 - File name length (n)
- entry.fileNameLength = buffer.readUInt16LE(28);
- // 30 - Extra field length (m)
- entry.extraFieldLength = buffer.readUInt16LE(30);
- // 32 - File comment length (k)
- entry.fileCommentLength = buffer.readUInt16LE(32);
- // 34 - Disk number where file starts
- // 36 - Internal file attributes
- entry.internalFileAttributes = buffer.readUInt16LE(36);
- // 38 - External file attributes
- entry.externalFileAttributes = buffer.readUInt32LE(38);
- // 42 - Relative offset of local file header
- entry.relativeOffsetOfLocalHeader = buffer.readUInt32LE(42);
-
- if (entry.generalPurposeBitFlag & 0x40) return emitErrorAndAutoClose(self, new Error("strong encryption is not supported"));
-
- self.readEntryCursor += 46;
-
- buffer = newBuffer(entry.fileNameLength + entry.extraFieldLength + entry.fileCommentLength);
- readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) {
- if (err) return emitErrorAndAutoClose(self, err);
- if (self.emittedError) return;
- // 46 - File name
- var isUtf8 = (entry.generalPurposeBitFlag & 0x800) !== 0;
- entry.fileName = self.decodeStrings ? decodeBuffer(buffer, 0, entry.fileNameLength, isUtf8)
- : buffer.slice(0, entry.fileNameLength);
-
- // 46+n - Extra field
- var fileCommentStart = entry.fileNameLength + entry.extraFieldLength;
- var extraFieldBuffer = buffer.slice(entry.fileNameLength, fileCommentStart);
- entry.extraFields = [];
- var i = 0;
- while (i < extraFieldBuffer.length - 3) {
- var headerId = extraFieldBuffer.readUInt16LE(i + 0);
- var dataSize = extraFieldBuffer.readUInt16LE(i + 2);
- var dataStart = i + 4;
- var dataEnd = dataStart + dataSize;
- if (dataEnd > extraFieldBuffer.length) return emitErrorAndAutoClose(self, new Error("extra field length exceeds extra field buffer size"));
- var dataBuffer = newBuffer(dataSize);
- extraFieldBuffer.copy(dataBuffer, 0, dataStart, dataEnd);
- entry.extraFields.push({
- id: headerId,
- data: dataBuffer,
- });
- i = dataEnd;
- }
-
- // 46+n+m - File comment
- entry.fileComment = self.decodeStrings ? decodeBuffer(buffer, fileCommentStart, fileCommentStart + entry.fileCommentLength, isUtf8)
- : buffer.slice(fileCommentStart, fileCommentStart + entry.fileCommentLength);
- // compatibility hack for https://github.com/thejoshwolfe/yauzl/issues/47
- entry.comment = entry.fileComment;
-
- self.readEntryCursor += buffer.length;
- self.entriesRead += 1;
-
- if (entry.uncompressedSize === 0xffffffff ||
- entry.compressedSize === 0xffffffff ||
- entry.relativeOffsetOfLocalHeader === 0xffffffff) {
- // ZIP64 format
- // find the Zip64 Extended Information Extra Field
- var zip64EiefBuffer = null;
- for (var i = 0; i < entry.extraFields.length; i++) {
- var extraField = entry.extraFields[i];
- if (extraField.id === 0x0001) {
- zip64EiefBuffer = extraField.data;
- break;
- }
- }
- if (zip64EiefBuffer == null) {
- return emitErrorAndAutoClose(self, new Error("expected zip64 extended information extra field"));
- }
- var index = 0;
- // 0 - Original Size 8 bytes
- if (entry.uncompressedSize === 0xffffffff) {
- if (index + 8 > zip64EiefBuffer.length) {
- return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include uncompressed size"));
- }
- entry.uncompressedSize = readUInt64LE(zip64EiefBuffer, index);
- index += 8;
- }
- // 8 - Compressed Size 8 bytes
- if (entry.compressedSize === 0xffffffff) {
- if (index + 8 > zip64EiefBuffer.length) {
- return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include compressed size"));
- }
- entry.compressedSize = readUInt64LE(zip64EiefBuffer, index);
- index += 8;
- }
- // 16 - Relative Header Offset 8 bytes
- if (entry.relativeOffsetOfLocalHeader === 0xffffffff) {
- if (index + 8 > zip64EiefBuffer.length) {
- return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include relative header offset"));
- }
- entry.relativeOffsetOfLocalHeader = readUInt64LE(zip64EiefBuffer, index);
- index += 8;
- }
- // 24 - Disk Start Number 4 bytes
- }
-
- // check for Info-ZIP Unicode Path Extra Field (0x7075)
- // see https://github.com/thejoshwolfe/yauzl/issues/33
- if (self.decodeStrings) {
- for (var i = 0; i < entry.extraFields.length; i++) {
- var extraField = entry.extraFields[i];
- if (extraField.id === 0x7075) {
- if (extraField.data.length < 6) {
- // too short to be meaningful
- continue;
- }
- // Version 1 byte version of this extra field, currently 1
- if (extraField.data.readUInt8(0) !== 1) {
- // > Changes may not be backward compatible so this extra
- // > field should not be used if the version is not recognized.
- continue;
- }
- // NameCRC32 4 bytes File Name Field CRC32 Checksum
- var oldNameCrc32 = extraField.data.readUInt32LE(1);
- if (crc32.unsigned(buffer.slice(0, entry.fileNameLength)) !== oldNameCrc32) {
- // > If the CRC check fails, this UTF-8 Path Extra Field should be
- // > ignored and the File Name field in the header should be used instead.
- continue;
- }
- // UnicodeName Variable UTF-8 version of the entry File Name
- entry.fileName = decodeBuffer(extraField.data, 5, extraField.data.length, true);
- break;
- }
- }
- }
-
- // validate file size
- if (self.validateEntrySizes && entry.compressionMethod === 0) {
- var expectedCompressedSize = entry.uncompressedSize;
- if (entry.isEncrypted()) {
- // traditional encryption prefixes the file data with a header
- expectedCompressedSize += 12;
- }
- if (entry.compressedSize !== expectedCompressedSize) {
- var msg = "compressed/uncompressed size mismatch for stored file: " + entry.compressedSize + " != " + entry.uncompressedSize;
- return emitErrorAndAutoClose(self, new Error(msg));
- }
- }
-
- if (self.decodeStrings) {
- if (!self.strictFileNames) {
- // allow backslash
- entry.fileName = entry.fileName.replace(/\\/g, "/");
- }
- var errorMessage = validateFileName(entry.fileName, self.validateFileNameOptions);
- if (errorMessage != null) return emitErrorAndAutoClose(self, new Error(errorMessage));
- }
- self.emit("entry", entry);
-
- if (!self.lazyEntries) self._readEntry();
- });
- });
-};
-
-ZipFile.prototype.openReadStream = function(entry, options, callback) {
- var self = this;
- // parameter validation
- var relativeStart = 0;
- var relativeEnd = entry.compressedSize;
- if (callback == null) {
- callback = options;
- options = {};
- } else {
- // validate options that the caller has no excuse to get wrong
- if (options.decrypt != null) {
- if (!entry.isEncrypted()) {
- throw new Error("options.decrypt can only be specified for encrypted entries");
- }
- if (options.decrypt !== false) throw new Error("invalid options.decrypt value: " + options.decrypt);
- if (entry.isCompressed()) {
- if (options.decompress !== false) throw new Error("entry is encrypted and compressed, and options.decompress !== false");
- }
- }
- if (options.decompress != null) {
- if (!entry.isCompressed()) {
- throw new Error("options.decompress can only be specified for compressed entries");
- }
- if (!(options.decompress === false || options.decompress === true)) {
- throw new Error("invalid options.decompress value: " + options.decompress);
- }
- }
- if (options.start != null || options.end != null) {
- if (entry.isCompressed() && options.decompress !== false) {
- throw new Error("start/end range not allowed for compressed entry without options.decompress === false");
- }
- if (entry.isEncrypted() && options.decrypt !== false) {
- throw new Error("start/end range not allowed for encrypted entry without options.decrypt === false");
- }
- }
- if (options.start != null) {
- relativeStart = options.start;
- if (relativeStart < 0) throw new Error("options.start < 0");
- if (relativeStart > entry.compressedSize) throw new Error("options.start > entry.compressedSize");
- }
- if (options.end != null) {
- relativeEnd = options.end;
- if (relativeEnd < 0) throw new Error("options.end < 0");
- if (relativeEnd > entry.compressedSize) throw new Error("options.end > entry.compressedSize");
- if (relativeEnd < relativeStart) throw new Error("options.end < options.start");
- }
- }
- // any further errors can either be caused by the zipfile,
- // or were introduced in a minor version of yauzl,
- // so should be passed to the client rather than thrown.
- if (!self.isOpen) return callback(new Error("closed"));
- if (entry.isEncrypted()) {
- if (options.decrypt !== false) return callback(new Error("entry is encrypted, and options.decrypt !== false"));
- }
- // make sure we don't lose the fd before we open the actual read stream
- self.reader.ref();
- var buffer = newBuffer(30);
- readAndAssertNoEof(self.reader, buffer, 0, buffer.length, entry.relativeOffsetOfLocalHeader, function(err) {
- try {
- if (err) return callback(err);
- // 0 - Local file header signature = 0x04034b50
- var signature = buffer.readUInt32LE(0);
- if (signature !== 0x04034b50) {
- return callback(new Error("invalid local file header signature: 0x" + signature.toString(16)));
- }
- // all this should be redundant
- // 4 - Version needed to extract (minimum)
- // 6 - General purpose bit flag
- // 8 - Compression method
- // 10 - File last modification time
- // 12 - File last modification date
- // 14 - CRC-32
- // 18 - Compressed size
- // 22 - Uncompressed size
- // 26 - File name length (n)
- var fileNameLength = buffer.readUInt16LE(26);
- // 28 - Extra field length (m)
- var extraFieldLength = buffer.readUInt16LE(28);
- // 30 - File name
- // 30+n - Extra field
- var localFileHeaderEnd = entry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
- var decompress;
- if (entry.compressionMethod === 0) {
- // 0 - The file is stored (no compression)
- decompress = false;
- } else if (entry.compressionMethod === 8) {
- // 8 - The file is Deflated
- decompress = options.decompress != null ? options.decompress : true;
- } else {
- return callback(new Error("unsupported compression method: " + entry.compressionMethod));
- }
- var fileDataStart = localFileHeaderEnd;
- var fileDataEnd = fileDataStart + entry.compressedSize;
- if (entry.compressedSize !== 0) {
- // bounds check now, because the read streams will probably not complain loud enough.
- // since we're dealing with an unsigned offset plus an unsigned size,
- // we only have 1 thing to check for.
- if (fileDataEnd > self.fileSize) {
- return callback(new Error("file data overflows file bounds: " +
- fileDataStart + " + " + entry.compressedSize + " > " + self.fileSize));
- }
- }
- var readStream = self.reader.createReadStream({
- start: fileDataStart + relativeStart,
- end: fileDataStart + relativeEnd,
- });
- var endpointStream = readStream;
- if (decompress) {
- var destroyed = false;
- var inflateFilter = zlib.createInflateRaw();
- readStream.on("error", function(err) {
- // setImmediate here because errors can be emitted during the first call to pipe()
- setImmediate(function() {
- if (!destroyed) inflateFilter.emit("error", err);
- });
- });
- readStream.pipe(inflateFilter);
-
- if (self.validateEntrySizes) {
- endpointStream = new AssertByteCountStream(entry.uncompressedSize);
- inflateFilter.on("error", function(err) {
- // forward zlib errors to the client-visible stream
- setImmediate(function() {
- if (!destroyed) endpointStream.emit("error", err);
- });
- });
- inflateFilter.pipe(endpointStream);
- } else {
- // the zlib filter is the client-visible stream
- endpointStream = inflateFilter;
- }
- // this is part of yauzl's API, so implement this function on the client-visible stream
- endpointStream.destroy = function() {
- destroyed = true;
- if (inflateFilter !== endpointStream) inflateFilter.unpipe(endpointStream);
- readStream.unpipe(inflateFilter);
- // TODO: the inflateFilter may cause a memory leak. see Issue #27.
- readStream.destroy();
- };
- }
- callback(null, endpointStream);
- } finally {
- self.reader.unref();
- }
- });
-};
-
-function Entry() {
-}
-Entry.prototype.getLastModDate = function() {
- return dosDateTimeToDate(this.lastModFileDate, this.lastModFileTime);
-};
-Entry.prototype.isEncrypted = function() {
- return (this.generalPurposeBitFlag & 0x1) !== 0;
-};
-Entry.prototype.isCompressed = function() {
- return this.compressionMethod === 8;
-};
-
-function dosDateTimeToDate(date, time) {
- var day = date & 0x1f; // 1-31
- var month = (date >> 5 & 0xf) - 1; // 1-12, 0-11
- var year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108
-
- var millisecond = 0;
- var second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers)
- var minute = time >> 5 & 0x3f; // 0-59
- var hour = time >> 11 & 0x1f; // 0-23
-
- return new Date(year, month, day, hour, minute, second, millisecond);
-}
-
-function validateFileName(fileName) {
- if (fileName.indexOf("\\") !== -1) {
- return "invalid characters in fileName: " + fileName;
- }
- if (/^[a-zA-Z]:/.test(fileName) || /^\//.test(fileName)) {
- return "absolute path: " + fileName;
- }
- if (fileName.split("/").indexOf("..") !== -1) {
- return "invalid relative path: " + fileName;
- }
- // all good
- return null;
-}
-
-function readAndAssertNoEof(reader, buffer, offset, length, position, callback) {
- if (length === 0) {
- // fs.read will throw an out-of-bounds error if you try to read 0 bytes from a 0 byte file
- return setImmediate(function() { callback(null, newBuffer(0)); });
- }
- reader.read(buffer, offset, length, position, function(err, bytesRead) {
- if (err) return callback(err);
- if (bytesRead < length) {
- return callback(new Error("unexpected EOF"));
- }
- callback();
- });
-}
-
-util.inherits(AssertByteCountStream, Transform);
-function AssertByteCountStream(byteCount) {
- Transform.call(this);
- this.actualByteCount = 0;
- this.expectedByteCount = byteCount;
-}
-AssertByteCountStream.prototype._transform = function(chunk, encoding, cb) {
- this.actualByteCount += chunk.length;
- if (this.actualByteCount > this.expectedByteCount) {
- var msg = "too many bytes in the stream. expected " + this.expectedByteCount + ". got at least " + this.actualByteCount;
- return cb(new Error(msg));
- }
- cb(null, chunk);
-};
-AssertByteCountStream.prototype._flush = function(cb) {
- if (this.actualByteCount < this.expectedByteCount) {
- var msg = "not enough bytes in the stream. expected " + this.expectedByteCount + ". got only " + this.actualByteCount;
- return cb(new Error(msg));
- }
- cb();
-};
-
-util.inherits(RandomAccessReader, EventEmitter);
-function RandomAccessReader() {
- EventEmitter.call(this);
- this.refCount = 0;
-}
-RandomAccessReader.prototype.ref = function() {
- this.refCount += 1;
-};
-RandomAccessReader.prototype.unref = function() {
- var self = this;
- self.refCount -= 1;
-
- if (self.refCount > 0) return;
- if (self.refCount < 0) throw new Error("invalid unref");
-
- self.close(onCloseDone);
-
- function onCloseDone(err) {
- if (err) return self.emit('error', err);
- self.emit('close');
- }
-};
-RandomAccessReader.prototype.createReadStream = function(options) {
- var start = options.start;
- var end = options.end;
- if (start === end) {
- var emptyStream = new PassThrough();
- setImmediate(function() {
- emptyStream.end();
- });
- return emptyStream;
- }
- var stream = this._readStreamForRange(start, end);
-
- var destroyed = false;
- var refUnrefFilter = new RefUnrefFilter(this);
- stream.on("error", function(err) {
- setImmediate(function() {
- if (!destroyed) refUnrefFilter.emit("error", err);
- });
- });
- refUnrefFilter.destroy = function() {
- stream.unpipe(refUnrefFilter);
- refUnrefFilter.unref();
- stream.destroy();
- };
-
- var byteCounter = new AssertByteCountStream(end - start);
- refUnrefFilter.on("error", function(err) {
- setImmediate(function() {
- if (!destroyed) byteCounter.emit("error", err);
- });
- });
- byteCounter.destroy = function() {
- destroyed = true;
- refUnrefFilter.unpipe(byteCounter);
- refUnrefFilter.destroy();
- };
-
- return stream.pipe(refUnrefFilter).pipe(byteCounter);
-};
-RandomAccessReader.prototype._readStreamForRange = function(start, end) {
- throw new Error("not implemented");
-};
-RandomAccessReader.prototype.read = function(buffer, offset, length, position, callback) {
- var readStream = this.createReadStream({start: position, end: position + length});
- var writeStream = new Writable();
- var written = 0;
- writeStream._write = function(chunk, encoding, cb) {
- chunk.copy(buffer, offset + written, 0, chunk.length);
- written += chunk.length;
- cb();
- };
- writeStream.on("finish", callback);
- readStream.on("error", function(error) {
- callback(error);
- });
- readStream.pipe(writeStream);
-};
-RandomAccessReader.prototype.close = function(callback) {
- setImmediate(callback);
-};
-
-util.inherits(RefUnrefFilter, PassThrough);
-function RefUnrefFilter(context) {
- PassThrough.call(this);
- this.context = context;
- this.context.ref();
- this.unreffedYet = false;
-}
-RefUnrefFilter.prototype._flush = function(cb) {
- this.unref();
- cb();
-};
-RefUnrefFilter.prototype.unref = function(cb) {
- if (this.unreffedYet) return;
- this.unreffedYet = true;
- this.context.unref();
-};
-
-var cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ';
-function decodeBuffer(buffer, start, end, isUtf8) {
- if (isUtf8) {
- return buffer.toString("utf8", start, end);
- } else {
- var result = "";
- for (var i = start; i < end; i++) {
- result += cp437[buffer[i]];
- }
- return result;
- }
-}
-
-function readUInt64LE(buffer, offset) {
- // there is no native function for this, because we can't actually store 64-bit integers precisely.
- // after 53 bits, JavaScript's Number type (IEEE 754 double) can't store individual integers anymore.
- // but since 53 bits is a whole lot more than 32 bits, we do our best anyway.
- var lower32 = buffer.readUInt32LE(offset);
- var upper32 = buffer.readUInt32LE(offset + 4);
- // we can't use bitshifting here, because JavaScript bitshifting only works on 32-bit integers.
- return upper32 * 0x100000000 + lower32;
- // as long as we're bounds checking the result of this function against the total file size,
- // we'll catch any overflow errors, because we already made sure the total file size was within reason.
-}
-
-// Node 10 deprecated new Buffer().
-var newBuffer;
-if (typeof Buffer.allocUnsafe === "function") {
- newBuffer = function(len) {
- return Buffer.allocUnsafe(len);
- };
-} else {
- newBuffer = function(len) {
- return new Buffer(len);
- };
-}
-
-function defaultCallback(err) {
- if (err) throw err;
-}
diff --git a/node_modules/yauzl/package.json b/node_modules/yauzl/package.json
deleted file mode 100644
index 4f1144a..0000000
--- a/node_modules/yauzl/package.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "name": "yauzl",
- "version": "2.10.0",
- "description": "yet another unzip library for node",
- "main": "index.js",
- "scripts": {
- "test": "node test/test.js",
- "test-cov": "istanbul cover test/test.js",
- "test-travis": "istanbul cover --report lcovonly test/test.js"
- },
- "repository": {
- "type": "git",
- "url": "https://github.com/thejoshwolfe/yauzl.git"
- },
- "keywords": [
- "unzip",
- "zip",
- "stream",
- "archive",
- "file"
- ],
- "author": "Josh Wolfe <thejoshwolfe@gmail.com>",
- "license": "MIT",
- "bugs": {
- "url": "https://github.com/thejoshwolfe/yauzl/issues"
- },
- "homepage": "https://github.com/thejoshwolfe/yauzl",
- "dependencies": {
- "fd-slicer": "~1.1.0",
- "buffer-crc32": "~0.2.3"
- },
- "devDependencies": {
- "bl": "~1.0.0",
- "istanbul": "~0.3.4",
- "pend": "~1.2.0"
- },
- "files": [
- "index.js"
- ]
-}