Compare commits

..

12 Commits
v4.2.0 ... main

27 changed files with 12203 additions and 2359 deletions

View File

@@ -1,68 +0,0 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
language: typescript
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "smartarchive"

View File

@@ -1,5 +1,64 @@
# Changelog # Changelog
## 2025-11-25 - 5.0.1 - fix(ziptools,gziptools)
Use fflate synchronous APIs for ZIP and GZIP operations for Deno compatibility; add TEntryFilter type and small docs/tests cleanup
- Replace fflate async APIs (zip, unzip, gzip, gunzip with callbacks) with synchronous counterparts (zipSync, unzipSync, gzipSync, gunzipSync) to avoid Web Worker issues in Deno
- ZipCompressionStream.finalize now uses fflate.zipSync and emits compressed Buffer synchronously
- GzipTools.compress / decompress now delegate to compressSync / decompressSync for cross-runtime compatibility
- ZipTools.createZip and ZipTools.extractZip now use zipSync/unzipSync and return Buffers
- Add TEntryFilter type to ts/interfaces.ts for fluent API entry filtering
- Minor readme.hints.md updates and small whitespace tidy in tests
## 2025-11-25 - 5.0.0 - BREAKING CHANGE(SmartArchive)
Refactor public API: rename factory/extraction methods, introduce typed interfaces and improved compression tools
- Renamed SmartArchive factory methods: fromArchiveUrl -> fromUrl, fromArchiveFile -> fromFile, fromArchiveStream -> fromStream; added fromBuffer helper.
- Renamed extraction APIs: exportToFs -> extractToDirectory and exportToStreamOfStreamFiles -> extractToStream; stream-based helpers updated accordingly.
- Export surface reorganized (ts/index.ts): core interfaces and errors are exported and new modules (bzip2tools, archiveanalyzer) are publicly available.
- Introduced strong TypeScript types (ts/interfaces.ts) and centralized error types (ts/errors.ts) including Bzip2Error and BZIP2_ERROR_CODES.
- Refactored format implementations and stream transforms: GzipTools/GzipCompressionTransform/GzipDecompressionTransform, ZipTools (ZipCompressionStream, ZipDecompressionTransform), TarTools improvements.
- BZIP2 implementation improvements: new bit iterator (IBitReader), clearer error handling and streaming unbzip2 transform.
- Updated tests to use the new APIs and method names.
- Breaking change: public API method names and some class/transform names have changed — this requires code updates for consumers.
## 2025-11-25 - 4.2.4 - fix(plugins)
Migrate filesystem usage to Node fs/fsPromises and upgrade smartfile to v13; add listFileTree helper and update tests
- Bumped dependency @push.rocks/smartfile to ^13.0.0 and removed unused dependency `through`
- Replaced usages of smartfile.fs and smartfile.fsStream with Node native fs and fs/promises (createReadStream/createWriteStream, mkdir({recursive:true}), stat, readFile)
- Added plugins.listFileTree helper (recursive directory lister) and used it in TarTools.packDirectory and tests
- Updated SmartArchive.exportToFs to use plugins.fs and plugins.fsPromises for directory creation and file writes
- Updated TarTools to use plugins.fs.createReadStream and plugins.fsPromises.stat when packing directories
- Converted/updated tests to a Node/Deno-friendly test file (test.node+deno.ts) and switched test helpers to use fsPromises
- Added readme.hints.md with migration notes for Smartfile v13 and architecture/dependency notes
## 2025-11-25 - 4.2.3 - fix(build)
Upgrade dev tooling: bump @git.zone/tsbuild, @git.zone/tsrun and @git.zone/tstest versions
- Bump @git.zone/tsbuild from ^2.6.6 to ^3.1.0
- Bump @git.zone/tsrun from ^1.3.3 to ^2.0.0
- Bump @git.zone/tstest from ^2.3.4 to ^3.1.3
## 2025-08-18 - 4.2.2 - fix(smartarchive)
Improve tar entry streaming handling and add in-memory gzip/tgz tests
- Fix tar entry handling: properly consume directory entries (resume stream) and wait for entry end before continuing to next header
- Wrap tar file entries with a PassThrough so extracted StreamFile instances can be consumed while the tar extractor continues
- Handle nested archives correctly by piping resultStream -> decompressionStream -> analyzer -> unpacker, avoiding premature end signals
- Add and expand tests in test/test.gzip.ts: verify package.json and TS/license files after extraction, add in-memory gzip extraction test, and add real tgz-in-memory download+extraction test
- Minor logging improvements for tar extraction flow
## 2025-08-18 - 4.2.1 - fix(gzip)
Improve gzip streaming decompression, archive analysis and unpacking; add gzip tests
- Add a streaming DecompressGunzipTransform using fflate.Gunzip with proper _flush handling to support chunked gzip input and avoid buffering issues.
- Refactor ArchiveAnalyzer: introduce IAnalyzedResult, getAnalyzedStream(), and getDecompressionStream() to better detect mime types and wire appropriate decompression streams (gzip, zip, bzip2, tar).
- Use SmartRequest response streams converted via stream.Readable.fromWeb for URL sources in SmartArchive.getArchiveStream() to improve remote archive handling.
- Improve nested archive unpacking and SmartArchive export pipeline: more robust tar/zip handling, consistent SmartDuplex usage and backpressure handling.
- Enhance exportToFs: ensure directories, improved logging for relative paths, and safer write-stream wiring.
- Add comprehensive gzip-focused tests (test/test.gzip.ts) covering file extraction, stream extraction, header filename handling, large files, and a real-world tgz-from-URL extraction scenario.
## 2025-08-18 - 4.2.0 - feat(classes.smartarchive) ## 2025-08-18 - 4.2.0 - feat(classes.smartarchive)
Support URL streams, recursive archive unpacking and filesystem export; improve ZIP/GZIP/BZIP2 robustness; CI and package metadata updates Support URL streams, recursive archive unpacking and filesystem export; improve ZIP/GZIP/BZIP2 robustness; CI and package metadata updates

6945
deno.lock generated Normal file

File diff suppressed because it is too large Load Diff

4
dist_ts/index.d.ts vendored
View File

@@ -1,4 +1,8 @@
export * from './interfaces.js';
export * from './errors.js';
export * from './classes.smartarchive.js'; export * from './classes.smartarchive.js';
export * from './classes.tartools.js'; export * from './classes.tartools.js';
export * from './classes.ziptools.js'; export * from './classes.ziptools.js';
export * from './classes.gziptools.js'; export * from './classes.gziptools.js';
export * from './classes.bzip2tools.js';
export * from './classes.archiveanalyzer.js';

View File

@@ -1,5 +1,13 @@
// Core types and errors
export * from './interfaces.js';
export * from './errors.js';
// Main archive class
export * from './classes.smartarchive.js'; export * from './classes.smartarchive.js';
// Format-specific tools
export * from './classes.tartools.js'; export * from './classes.tartools.js';
export * from './classes.ziptools.js'; export * from './classes.ziptools.js';
export * from './classes.gziptools.js'; export * from './classes.gziptools.js';
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiaW5kZXguanMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi90cy9pbmRleC50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxjQUFjLDJCQUEyQixDQUFDO0FBQzFDLGNBQWMsdUJBQXVCLENBQUM7QUFDdEMsY0FBYyx1QkFBdUIsQ0FBQztBQUN0QyxjQUFjLHdCQUF3QixDQUFDIn0= export * from './classes.bzip2tools.js';
// Archive analysis
export * from './classes.archiveanalyzer.js';
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiaW5kZXguanMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi90cy9pbmRleC50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSx3QkFBd0I7QUFDeEIsY0FBYyxpQkFBaUIsQ0FBQztBQUNoQyxjQUFjLGFBQWEsQ0FBQztBQUU1QixxQkFBcUI7QUFDckIsY0FBYywyQkFBMkIsQ0FBQztBQUUxQyx3QkFBd0I7QUFDeEIsY0FBYyx1QkFBdUIsQ0FBQztBQUN0QyxjQUFjLHVCQUF1QixDQUFDO0FBQ3RDLGNBQWMsd0JBQXdCLENBQUM7QUFDdkMsY0FBYyx5QkFBeUIsQ0FBQztBQUV4QyxtQkFBbUI7QUFDbkIsY0FBYyw4QkFBOEIsQ0FBQyJ9

View File

@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartarchive", "name": "@push.rocks/smartarchive",
"version": "4.2.0", "version": "5.0.1",
"description": "A library for working with archive files, providing utilities for compressing and decompressing data.", "description": "A library for working with archive files, providing utilities for compressing and decompressing data.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts", "typings": "dist_ts/index.d.ts",
@@ -22,7 +22,7 @@
"homepage": "https://code.foss.global/push.rocks/smartarchive#readme", "homepage": "https://code.foss.global/push.rocks/smartarchive#readme",
"dependencies": { "dependencies": {
"@push.rocks/smartdelay": "^3.0.5", "@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^11.2.7", "@push.rocks/smartfile": "^13.0.0",
"@push.rocks/smartpath": "^6.0.0", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartpromise": "^4.2.3", "@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^4.2.2", "@push.rocks/smartrequest": "^4.2.2",
@@ -33,13 +33,12 @@
"@types/tar-stream": "^3.1.4", "@types/tar-stream": "^3.1.4",
"fflate": "^0.8.2", "fflate": "^0.8.2",
"file-type": "^21.0.0", "file-type": "^21.0.0",
"tar-stream": "^3.1.7", "tar-stream": "^3.1.7"
"through": "^2.3.8"
}, },
"devDependencies": { "devDependencies": {
"@git.zone/tsbuild": "^2.6.6", "@git.zone/tsbuild": "^3.1.0",
"@git.zone/tsrun": "^1.3.3", "@git.zone/tsrun": "^2.0.0",
"@git.zone/tstest": "^2.3.4" "@git.zone/tstest": "^3.1.3"
}, },
"private": false, "private": false,
"files": [ "files": [

3586
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1 +1,84 @@
# Smartarchive Development Hints
## Architecture Overview
`@push.rocks/smartarchive` uses a **fluent builder pattern** for all archive operations. The main entry point is `SmartArchive.create()` which returns a builder instance.
### Two Operating Modes
1. **Extraction Mode** - Triggered by `.url()`, `.file()`, `.stream()`, or `.buffer()`
2. **Creation Mode** - Triggered by `.format()` or `.entry()`
Modes are mutually exclusive - you cannot mix extraction and creation methods in the same chain.
## Key Classes
- **SmartArchive** - Main class with fluent API for all operations
- **TarTools** - TAR-specific operations (pack/extract)
- **ZipTools** - ZIP-specific operations using fflate
- **GzipTools** - GZIP compression/decompression using fflate
- **Bzip2Tools** - BZIP2 decompression (extract only, no creation)
- **ArchiveAnalyzer** - Format detection via magic bytes
## Dependencies
- **fflate** - Pure JS compression for ZIP/GZIP (works in browser)
- **tar-stream** - TAR archive handling
- **file-type** - MIME type detection via magic bytes
- **@push.rocks/smartfile** - SmartFile and StreamFile classes
## API Changes (v5.0.0)
The v5.0.0 release introduced a complete API refactor:
### Old API (deprecated)
```typescript
// Old static factory methods - NO LONGER EXIST
await SmartArchive.fromUrl(url);
await SmartArchive.fromFile(path);
await SmartArchive.fromDirectory(path, options);
```
### New Fluent API
```typescript
// Current fluent builder pattern
await SmartArchive.create()
.url(url)
.extract(targetDir);
await SmartArchive.create()
.format('tar.gz')
.directory(path)
.toFile(outputPath);
```
## Migration Notes (from v4.x)
### Smartfile v13 Changes
Smartfile v13 removed filesystem operations. Replacements:
- `smartfile.fs.ensureDir(path)``fsPromises.mkdir(path, { recursive: true })`
- `smartfile.fs.stat(path)``fsPromises.stat(path)`
- `smartfile.fs.toReadStream(path)``fs.createReadStream(path)`
### Still using from smartfile
- `SmartFile` class (in-memory file representation)
- `StreamFile` class (streaming file handling)
## Testing
```bash
pnpm test # Run all tests
tstest test/test.node+deno.ts --verbose # Run specific test
```
Tests use a Verdaccio registry URL (`verdaccio.lossless.digital`) for test archives.
## Key Files
- `ts/classes.smartarchive.ts` - Main SmartArchive class with fluent API
- `ts/classes.tartools.ts` - TAR operations
- `ts/classes.ziptools.ts` - ZIP operations
- `ts/classes.gziptools.ts` - GZIP operations
- `ts/classes.bzip2tools.ts` - BZIP2 decompression
- `ts/classes.archiveanalyzer.ts` - Format detection
- `ts/interfaces.ts` - Type definitions

625
readme.md
View File

@@ -1,29 +1,32 @@
# @push.rocks/smartarchive 📦 # @push.rocks/smartarchive 📦
**Powerful archive manipulation for modern Node.js applications** A powerful, streaming-first archive manipulation library with a fluent builder API. Works seamlessly in Node.js and Deno.
`@push.rocks/smartarchive` is a versatile library for handling archive files with a focus on developer experience. Work with **zip**, **tar**, **gzip**, and **bzip2** formats through a unified, streaming-optimized API. ## Issue Reporting and Security
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
## Features 🚀 ## Features 🚀
- 📁 **Multi-format support** - Handle `.zip`, `.tar`, `.tar.gz`, `.tgz`, and `.bz2` archives - 📁 **Multi-format support** Handle `.zip`, `.tar`, `.tar.gz`, `.tgz`, `.gz`, and `.bz2` archives
- 🌊 **Streaming-first architecture** - Process large archives without memory constraints - 🌊 **Streaming-first architecture** Process large archives without memory constraints
- 🔄 **Unified API** - Consistent interface across different archive formats - **Fluent builder API** Chain methods for readable, expressive code
- 🎯 **Smart detection** - Automatically identifies archive types - 🎯 **Smart detection** Automatically identifies archive types via magic bytes
-**High performance** - Optimized for speed with parallel processing where possible -**High performance** Built on `tar-stream` and `fflate` for speed
- 🔧 **Flexible I/O** - Work with files, URLs, and streams seamlessly - 🔧 **Flexible I/O** Work with files, URLs, streams, and buffers seamlessly
- 📊 **Archive analysis** - Inspect contents without extraction - 🛠️ **Modern TypeScript** Full type safety and excellent IDE support
- 🛠️ **Modern TypeScript** - Full type safety and excellent IDE support - 🔄 **Dual-mode operation** Extract existing archives OR create new ones
- 🦕 **Cross-runtime** Works in both Node.js and Deno environments
## Installation 📥 ## Installation 📥
```bash ```bash
# Using npm
npm install @push.rocks/smartarchive
# Using pnpm (recommended) # Using pnpm (recommended)
pnpm add @push.rocks/smartarchive pnpm add @push.rocks/smartarchive
# Using npm
npm install @push.rocks/smartarchive
# Using yarn # Using yarn
yarn add @push.rocks/smartarchive yarn add @push.rocks/smartarchive
``` ```
@@ -36,110 +39,245 @@ yarn add @push.rocks/smartarchive
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
// Extract a .tar.gz archive from a URL directly to the filesystem // Extract a .tar.gz archive from a URL directly to the filesystem
const archive = await SmartArchive.fromArchiveUrl( await SmartArchive.create()
'https://github.com/some/repo/archive/main.tar.gz' .url('https://registry.npmjs.org/some-package/-/some-package-1.0.0.tgz')
); .extract('./extracted');
await archive.exportToFs('./extracted');
``` ```
### Process archive as a stream ### Create an archive from entries
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
// Stream-based processing for memory efficiency // Create a tar.gz archive with files
const archive = await SmartArchive.fromArchiveFile('./large-archive.zip'); await SmartArchive.create()
const streamOfFiles = await archive.exportToStreamOfStreamFiles(); .format('tar.gz')
.compression(6)
.entry('config.json', JSON.stringify({ name: 'myapp' }))
.entry('readme.txt', 'Hello World!')
.toFile('./backup.tar.gz');
```
// Process each file in the archive ### Extract with filtering and path manipulation
streamOfFiles.on('data', (fileStream) => {
console.log(`Processing ${fileStream.path}`); ```typescript
// Handle individual file stream import { SmartArchive } from '@push.rocks/smartarchive';
});
// Extract only JSON files, stripping the first path component
await SmartArchive.create()
.url('https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz')
.stripComponents(1) // Remove 'package/' prefix
.include(/\.json$/) // Only extract JSON files
.extract('./node_modules/lodash');
``` ```
## Core Concepts 💡 ## Core Concepts 💡
### Archive Sources ### Fluent Builder Pattern
`SmartArchive` accepts archives from three sources: `SmartArchive` uses a fluent builder pattern where you chain methods to configure the operation:
1. **URL** - Download and process archives from the web ```typescript
2. **File** - Load archives from the local filesystem SmartArchive.create() // Start a new builder
3. **Stream** - Process archives from any Node.js stream .source(...) // Configure source (extraction mode)
.options(...) // Set options
.terminal() // Execute the operation
```
### Export Destinations ### Two Operating Modes
Extract archives to multiple destinations: **Extraction Mode** - Load an existing archive and extract/analyze it:
```typescript
SmartArchive.create()
.url('...') // or .file(), .stream(), .buffer()
.extract('./out') // or .toSmartFiles(), .list(), etc.
```
1. **Filesystem** - Extract directly to a directory **Creation Mode** - Build a new archive from entries:
2. **Stream of files** - Process files individually as streams ```typescript
3. **Archive stream** - Re-stream as different format SmartArchive.create()
.format('tar.gz') // Set output format
.entry(...) // Add files
.toFile('./out.tar.gz') // or .toBuffer(), .toStream()
```
> ⚠️ **Note:** You cannot mix extraction and creation methods in the same chain.
## API Reference 📚
### Source Methods (Extraction Mode)
| Method | Description |
|--------|-------------|
| `.url(url)` | Load archive from a URL |
| `.file(path)` | Load archive from local filesystem |
| `.stream(readable)` | Load archive from any Node.js readable stream |
| `.buffer(buffer)` | Load archive from an in-memory Buffer |
### Creation Methods (Creation Mode)
| Method | Description |
|--------|-------------|
| `.format(fmt)` | Set output format: `'tar'`, `'tar.gz'`, `'tgz'`, `'zip'`, `'gz'` |
| `.compression(level)` | Set compression level (0-9, default: 6) |
| `.entry(path, content)` | Add a file entry (string or Buffer content) |
| `.entries(array)` | Add multiple entries at once |
| `.directory(path, archiveBase?)` | Add entire directory contents |
| `.addSmartFile(file, path?)` | Add a SmartFile instance |
| `.addStreamFile(file, path?)` | Add a StreamFile instance |
### Filter Methods (Both Modes)
| Method | Description |
|--------|-------------|
| `.filter(predicate)` | Filter entries with custom function |
| `.include(pattern)` | Only include entries matching regex/string pattern |
| `.exclude(pattern)` | Exclude entries matching regex/string pattern |
### Extraction Options
| Method | Description |
|--------|-------------|
| `.stripComponents(n)` | Strip N leading path components |
| `.overwrite(bool)` | Overwrite existing files (default: false) |
| `.fileName(name)` | Set output filename for single-file archives (gz, bz2) |
### Terminal Methods (Extraction)
| Method | Returns | Description |
|--------|---------|-------------|
| `.extract(targetDir)` | `Promise<void>` | Extract to filesystem directory |
| `.toStreamFiles()` | `Promise<StreamIntake<StreamFile>>` | Get stream of StreamFile objects |
| `.toSmartFiles()` | `Promise<SmartFile[]>` | Get in-memory SmartFile array |
| `.extractFile(path)` | `Promise<SmartFile \| null>` | Extract single file by path |
| `.list()` | `Promise<IArchiveEntryInfo[]>` | List all entries |
| `.analyze()` | `Promise<IArchiveInfo>` | Get archive metadata |
| `.hasFile(path)` | `Promise<boolean>` | Check if file exists |
### Terminal Methods (Creation)
| Method | Returns | Description |
|--------|---------|-------------|
| `.build()` | `Promise<SmartArchive>` | Build the archive (implicit in other terminals) |
| `.toBuffer()` | `Promise<Buffer>` | Get archive as Buffer |
| `.toFile(path)` | `Promise<void>` | Write archive to disk |
| `.toStream()` | `Promise<Readable>` | Get raw archive stream |
## Usage Examples 🔨 ## Usage Examples 🔨
### Working with ZIP files ### Download and extract npm packages
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
// Extract a ZIP file const pkg = await SmartArchive.create()
const zipArchive = await SmartArchive.fromArchiveFile('./archive.zip'); .url('https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz');
await zipArchive.exportToFs('./output');
// Stream ZIP contents for processing // Quick inspection of package.json
const fileStream = await zipArchive.exportToStreamOfStreamFiles(); const pkgJson = await pkg.extractFile('package/package.json');
fileStream.on('data', (file) => { if (pkgJson) {
if (file.path.endsWith('.json')) { const metadata = JSON.parse(pkgJson.contents.toString());
// Process JSON files from the archive console.log(`Package: ${metadata.name}@${metadata.version}`);
file.pipe(jsonProcessor); }
// Full extraction with path normalization
await SmartArchive.create()
.url('https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz')
.stripComponents(1)
.extract('./node_modules/lodash');
```
### Create ZIP archive
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
await SmartArchive.create()
.format('zip')
.compression(9)
.entry('report.txt', 'Monthly sales report...')
.entry('data/figures.json', JSON.stringify({ revenue: 10000 }))
.entry('images/logo.png', pngBuffer)
.toFile('./report-bundle.zip');
```
### Create TAR.GZ from directory
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
await SmartArchive.create()
.format('tar.gz')
.compression(9)
.directory('./src', 'source') // Archive ./src as 'source/' in archive
.toFile('./project-backup.tar.gz');
```
### Stream-based extraction
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
const fileStream = await SmartArchive.create()
.file('./large-archive.tar.gz')
.toStreamFiles();
fileStream.on('data', async (streamFile) => {
console.log(`Processing: ${streamFile.relativeFilePath}`);
if (streamFile.relativeFilePath.endsWith('.json')) {
const content = await streamFile.getContentAsBuffer();
const data = JSON.parse(content.toString());
// Process JSON data...
} }
}); });
fileStream.on('end', () => {
console.log('Extraction complete');
});
``` ```
### Working with TAR archives ### Filter specific file types
```typescript
import { SmartArchive, TarTools } from '@push.rocks/smartarchive';
// Extract a .tar.gz file
const tarGzArchive = await SmartArchive.fromArchiveFile('./archive.tar.gz');
await tarGzArchive.exportToFs('./extracted');
// Create a TAR archive (using TarTools directly)
const tarTools = new TarTools();
const packStream = await tarTools.packDirectory('./source-directory');
packStream.pipe(createWriteStream('./output.tar'));
```
### Extracting from URLs
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
// Download and extract in one operation // Extract only TypeScript files
const remoteArchive = await SmartArchive.fromArchiveUrl( const tsFiles = await SmartArchive.create()
'https://example.com/data.tar.gz' .url('https://example.com/project.tar.gz')
); .include(/\.ts$/)
.exclude(/node_modules/)
.toSmartFiles();
// Extract to filesystem for (const file of tsFiles) {
await remoteArchive.exportToFs('./local-dir'); console.log(`${file.relative}: ${file.contents.length} bytes`);
}
// Or process as stream
const stream = await remoteArchive.exportToStreamOfStreamFiles();
``` ```
### Analyzing archive contents ### Analyze archive without extraction
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
// Analyze without extracting const archive = SmartArchive.create()
const archive = await SmartArchive.fromArchiveFile('./archive.zip'); .file('./unknown-archive.tar.gz');
const analyzer = archive.archiveAnalyzer;
// Use the analyzer to inspect contents // Get format info
// (exact implementation depends on analyzer methods) const info = await archive.analyze();
console.log(`Format: ${info.format}`);
console.log(`Compressed: ${info.isCompressed}`);
// List contents
const entries = await archive.list();
for (const entry of entries) {
console.log(`${entry.path} (${entry.isDirectory ? 'dir' : 'file'})`);
}
// Check for specific file
if (await archive.hasFile('package.json')) {
const pkgFile = await archive.extractFile('package.json');
console.log(pkgFile?.contents.toString());
}
``` ```
### Working with GZIP files ### Working with GZIP files
@@ -148,139 +286,219 @@ const analyzer = archive.archiveAnalyzer;
import { SmartArchive, GzipTools } from '@push.rocks/smartarchive'; import { SmartArchive, GzipTools } from '@push.rocks/smartarchive';
// Decompress a .gz file // Decompress a .gz file
const gzipArchive = await SmartArchive.fromArchiveFile('./data.json.gz'); await SmartArchive.create()
await gzipArchive.exportToFs('./decompressed', 'data.json'); .file('./data.json.gz')
.fileName('data.json') // Specify output name (gzip doesn't store filename)
.extract('./decompressed');
// Use GzipTools directly for streaming // Use GzipTools directly for compression/decompression
const gzipTools = new GzipTools(); const gzipTools = new GzipTools();
// Compress a buffer
const compressed = await gzipTools.compress(Buffer.from('Hello World'), 9);
const decompressed = await gzipTools.decompress(compressed);
// Synchronous operations
const compressedSync = gzipTools.compressSync(inputBuffer, 6);
const decompressedSync = gzipTools.decompressSync(compressedSync);
// Streaming
const compressStream = gzipTools.getCompressionStream(6);
const decompressStream = gzipTools.getDecompressionStream(); const decompressStream = gzipTools.getDecompressionStream();
createReadStream('./compressed.gz') createReadStream('./input.txt')
.pipe(decompressStream) .pipe(compressStream)
.pipe(createWriteStream('./decompressed')); .pipe(createWriteStream('./output.gz'));
``` ```
### Working with BZIP2 files ### Working with TAR archives directly
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
// Handle .bz2 files
const bzipArchive = await SmartArchive.fromArchiveUrl(
'https://example.com/data.bz2'
);
await bzipArchive.exportToFs('./extracted', 'data.txt');
```
### Advanced streaming operations
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
import { pipeline } from 'stream/promises';
// Chain operations with streams
const archive = await SmartArchive.fromArchiveFile('./archive.tar.gz');
const exportStream = await archive.exportToStreamOfStreamFiles();
// Process each file in the archive
await pipeline(
exportStream,
async function* (source) {
for await (const file of source) {
if (file.path.endsWith('.log')) {
// Process log files
yield processLogFile(file);
}
}
},
createWriteStream('./processed-logs.txt')
);
```
### Creating archives (advanced)
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
import { TarTools } from '@push.rocks/smartarchive'; import { TarTools } from '@push.rocks/smartarchive';
// Using SmartArchive to create an archive const tarTools = new TarTools();
const archive = new SmartArchive();
// Add content to the archive // Create a TAR archive manually
archive.addedDirectories.push('./src'); const pack = await tarTools.getPackStream();
archive.addedFiles.push('./readme.md');
archive.addedFiles.push('./package.json');
// Export as TAR.GZ await tarTools.addFileToPack(pack, {
const tarGzStream = await archive.exportToTarGzStream(); fileName: 'hello.txt',
tarGzStream.pipe(createWriteStream('./output.tar.gz')); content: 'Hello, World!'
});
await tarTools.addFileToPack(pack, {
fileName: 'data.json',
content: Buffer.from(JSON.stringify({ foo: 'bar' }))
});
pack.finalize();
pack.pipe(createWriteStream('./output.tar'));
// Pack a directory to TAR.GZ buffer
const tgzBuffer = await tarTools.packDirectoryToTarGz('./src', 6);
// Pack a directory to TAR.GZ stream
const tgzStream = await tarTools.packDirectoryToTarGzStream('./src');
``` ```
### Extract and transform ### Working with ZIP archives directly
```typescript
import { ZipTools } from '@push.rocks/smartarchive';
const zipTools = new ZipTools();
// Create a ZIP archive from entries
const zipBuffer = await zipTools.createZip([
{ archivePath: 'readme.txt', content: 'Hello!' },
{ archivePath: 'data.bin', content: Buffer.from([0x00, 0x01, 0x02]) }
], 6);
// Extract a ZIP buffer
const entries = await zipTools.extractZip(zipBuffer);
for (const entry of entries) {
console.log(`${entry.path}: ${entry.content.length} bytes`);
}
```
### In-memory round-trip
```typescript ```typescript
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
import { Transform } from 'stream';
// Extract and transform files in one pipeline // Create archive in memory
const archive = await SmartArchive.fromArchiveUrl( const archive = await SmartArchive.create()
'https://example.com/source-code.tar.gz' .format('tar.gz')
); .entry('config.json', JSON.stringify({ version: '1.0.0' }))
.build();
const extractStream = await archive.exportToStreamOfStreamFiles(); const buffer = await archive.toBuffer();
// Transform TypeScript to JavaScript during extraction // Extract from buffer
extractStream.on('data', (fileStream) => { const files = await SmartArchive.create()
if (fileStream.path.endsWith('.ts')) { .buffer(buffer)
fileStream .toSmartFiles();
.pipe(typescriptTranspiler())
.pipe(createWriteStream(fileStream.path.replace('.ts', '.js'))); for (const file of files) {
} else { console.log(`${file.relative}: ${file.contents.toString()}`);
fileStream.pipe(createWriteStream(fileStream.path)); }
```
## Real-World Use Cases 🌍
### CI/CD: Download & Extract Build Artifacts
```typescript
const artifacts = await SmartArchive.create()
.url(`${CI_SERVER}/artifacts/build-${BUILD_ID}.zip`)
.stripComponents(1)
.extract('./dist');
```
### Backup System
```typescript
// Create backup
await SmartArchive.create()
.format('tar.gz')
.compression(9)
.directory('./data')
.toFile(`./backups/backup-${Date.now()}.tar.gz`);
// Restore backup
await SmartArchive.create()
.file('./backups/backup-latest.tar.gz')
.extract('/restore/location');
```
### Bundle files for HTTP download
```typescript
import { SmartArchive } from '@push.rocks/smartarchive';
// Express/Fastify handler
app.get('/download-bundle', async (req, res) => {
const buffer = await SmartArchive.create()
.format('zip')
.entry('report.pdf', pdfBuffer)
.entry('data.xlsx', excelBuffer)
.entry('images/chart.png', chartBuffer)
.toBuffer();
res.setHeader('Content-Type', 'application/zip');
res.setHeader('Content-Disposition', 'attachment; filename=report-bundle.zip');
res.send(buffer);
});
```
### Data Pipeline: Process Compressed Datasets
```typescript
const fileStream = await SmartArchive.create()
.url('https://data.source/dataset.tar.gz')
.toStreamFiles();
fileStream.on('data', async (file) => {
if (file.relativeFilePath.endsWith('.csv')) {
const content = await file.getContentAsBuffer();
// Stream CSV processing...
} }
}); });
``` ```
## API Reference 📚 ## Supported Formats 📋
### SmartArchive Class | Format | Extension(s) | Extract | Create |
|--------|--------------|---------|--------|
| TAR | `.tar` | ✅ | ✅ |
| TAR.GZ / TGZ | `.tar.gz`, `.tgz` | ✅ | ✅ |
| ZIP | `.zip` | ✅ | ✅ |
| GZIP | `.gz` | ✅ | ✅ |
| BZIP2 | `.bz2` | ✅ | ❌ |
#### Static Methods ## Type Definitions
- `SmartArchive.fromArchiveUrl(url: string)` - Create from URL ```typescript
- `SmartArchive.fromArchiveFile(path: string)` - Create from file // Supported archive formats
- `SmartArchive.fromArchiveStream(stream: NodeJS.ReadableStream)` - Create from stream type TArchiveFormat = 'tar' | 'tar.gz' | 'tgz' | 'zip' | 'gz' | 'bz2';
#### Instance Methods // Compression level (0 = none, 9 = maximum)
type TCompressionLevel = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9;
- `exportToFs(targetDir: string, fileName?: string)` - Extract to filesystem // Entry for creating archives
- `exportToStreamOfStreamFiles()` - Get a stream of file streams interface IArchiveEntry {
- `exportToTarGzStream()` - Export as TAR.GZ stream archivePath: string;
- `getArchiveStream()` - Get the raw archive stream content: string | Buffer | Readable | SmartFile | StreamFile;
size?: number;
mode?: number;
mtime?: Date;
}
#### Properties // Information about an archive entry
interface IArchiveEntryInfo {
path: string;
size: number;
isDirectory: boolean;
isFile: boolean;
mtime?: Date;
mode?: number;
}
- `archiveAnalyzer` - Analyze archive contents // Archive analysis result
- `tarTools` - TAR-specific operations interface IArchiveInfo {
- `zipTools` - ZIP-specific operations format: TArchiveFormat | null;
- `gzipTools` - GZIP-specific operations isCompressed: boolean;
- `bzip2Tools` - BZIP2-specific operations isArchive: boolean;
entries?: IArchiveEntryInfo[];
### Specialized Tools }
```
Each tool class provides format-specific operations:
- **TarTools** - Pack/unpack TAR archives
- **ZipTools** - Handle ZIP compression
- **GzipTools** - GZIP compression/decompression
- **Bzip2Tools** - BZIP2 operations
## Performance Tips 🏎️ ## Performance Tips 🏎️
1. **Use streaming for large files** - Avoid loading entire archives into memory 1. **Use streaming for large files** `.toStreamFiles()` processes entries one at a time without loading the entire archive
2. **Process files in parallel** - Utilize stream operations for concurrent processing 2. **Provide byte lengths when known** When using TarTools directly, provide `byteLength` for better performance
3. **Choose the right format** - TAR.GZ for Unix systems, ZIP for cross-platform compatibility 3. **Choose appropriate compression** Use 1-3 for speed, 6 (default) for balance, 9 for maximum compression
4. **Enable compression wisely** - Balance between file size and CPU usage 4. **Filter early** Use `.include()`/`.exclude()` to skip unwanted entries before processing
## Error Handling 🛡️ ## Error Handling 🛡️
@@ -288,50 +506,25 @@ Each tool class provides format-specific operations:
import { SmartArchive } from '@push.rocks/smartarchive'; import { SmartArchive } from '@push.rocks/smartarchive';
try { try {
const archive = await SmartArchive.fromArchiveUrl('https://example.com/file.zip'); await SmartArchive.create()
await archive.exportToFs('./output'); .url('https://example.com/file.zip')
.extract('./output');
} catch (error) { } catch (error) {
if (error.code === 'ENOENT') { if (error.message.includes('No source configured')) {
console.error('Archive file not found'); console.error('Forgot to specify source');
} else if (error.code === 'EACCES') { } else if (error.message.includes('No format specified')) {
console.error('Permission denied'); console.error('Forgot to set format for creation');
} else if (error.message.includes('extraction mode')) {
console.error('Cannot mix extraction and creation methods');
} else { } else {
console.error('Archive extraction failed:', error.message); console.error('Archive operation failed:', error.message);
} }
} }
``` ```
## Real-World Use Cases 🌍
### Backup System
```typescript
// Automated backup extraction
const backup = await SmartArchive.fromArchiveFile('./backup.tar.gz');
await backup.exportToFs('/restore/location');
```
### CI/CD Pipeline
```typescript
// Download and extract build artifacts
const artifacts = await SmartArchive.fromArchiveUrl(
`${CI_SERVER}/artifacts/build-${BUILD_ID}.zip`
);
await artifacts.exportToFs('./dist');
```
### Data Processing
```typescript
// Process compressed datasets
const dataset = await SmartArchive.fromArchiveUrl(
'https://data.source/dataset.tar.bz2'
);
const files = await dataset.exportToStreamOfStreamFiles();
// Process each file in the dataset
```
## License and Legal Information ## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file. **Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
@@ -339,11 +532,15 @@ This repository contains open-source code that is licensed under the MIT License
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH. This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
### Issue Reporting and Security
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
### Company Information ### Company Information
Task Venture Capital GmbH Task Venture Capital GmbH
Registered at District court Bremen HRB 35230 HB, Germany Registered at District court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc. For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works. By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

View File

@@ -1,7 +1,33 @@
import * as path from 'path'; import * as path from 'node:path';
import * as fs from 'node:fs';
import * as fsPromises from 'node:fs/promises';
import * as smartpath from '@push.rocks/smartpath'; import * as smartpath from '@push.rocks/smartpath';
import * as smartfile from '@push.rocks/smartfile'; import * as smartfile from '@push.rocks/smartfile';
import * as smartrequest from '@push.rocks/smartrequest'; import * as smartrequest from '@push.rocks/smartrequest';
import * as smartstream from '@push.rocks/smartstream'; import * as smartstream from '@push.rocks/smartstream';
export { path, smartpath, smartfile, smartrequest, smartstream }; export { path, fs, fsPromises, smartpath, smartfile, smartrequest, smartstream };
/**
* List files in a directory recursively, returning relative paths
*/
export async function listFileTree(dirPath: string, _pattern: string = '**/*'): Promise<string[]> {
const results: string[] = [];
async function walkDir(currentPath: string, relativePath: string = '') {
const entries = await fsPromises.readdir(currentPath, { withFileTypes: true });
for (const entry of entries) {
const entryRelPath = relativePath ? path.join(relativePath, entry.name) : entry.name;
const entryFullPath = path.join(currentPath, entry.name);
if (entry.isDirectory()) {
await walkDir(entryFullPath, entryRelPath);
} else if (entry.isFile()) {
results.push(entryRelPath);
}
}
}
await walkDir(dirPath);
return results;
}

382
test/test.gzip.node+deno.ts Normal file
View File

@@ -0,0 +1,382 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as plugins from './plugins.js';
import * as smartarchive from '../ts/index.js';
const testPaths = {
nogitDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/',
),
gzipTestDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/gzip-test',
),
};
tap.preTask('should prepare test directories', async () => {
await plugins.fsPromises.mkdir(testPaths.gzipTestDir, { recursive: true });
});
tap.test('should create and extract a gzip file', async () => {
// Create test data
const testContent = 'This is a test file for gzip compression and decompression.\n'.repeat(100);
const testFileName = 'test-file.txt';
const gzipFileName = 'test-file.txt.gz';
// Write the original file
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.gzipTestDir, testFileName),
testContent
);
// Create gzip compressed version using fflate directly
const fflate = await import('fflate');
const compressed = fflate.gzipSync(Buffer.from(testContent));
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.gzipTestDir, gzipFileName),
Buffer.from(compressed)
);
// Now test extraction using SmartArchive fluent API
const extractPath = plugins.path.join(testPaths.gzipTestDir, 'extracted');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.file(plugins.path.join(testPaths.gzipTestDir, gzipFileName))
.fileName('test-file.txt')
.extract(extractPath);
// Read the extracted file
const extractedContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, 'test-file.txt'),
'utf8'
);
// Verify the content matches
expect(extractedContent).toEqual(testContent);
});
tap.test('should handle gzip stream extraction', async () => {
// Create test data
const testContent = 'Stream test data for gzip\n'.repeat(50);
const gzipFileName = 'stream-test.txt.gz';
// Create gzip compressed version
const fflate = await import('fflate');
const compressed = fflate.gzipSync(Buffer.from(testContent));
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.gzipTestDir, gzipFileName),
Buffer.from(compressed)
);
// Create a read stream for the gzip file
const gzipStream = plugins.fs.createReadStream(
plugins.path.join(testPaths.gzipTestDir, gzipFileName)
);
// Test extraction using SmartArchive from stream with fluent API
const streamFiles: any[] = [];
const resultStream = await smartarchive.SmartArchive.create()
.stream(gzipStream)
.toStreamFiles();
await new Promise<void>((resolve, reject) => {
resultStream.on('data', (streamFile) => {
streamFiles.push(streamFile);
});
resultStream.on('end', resolve);
resultStream.on('error', reject);
});
// Verify we got the expected file
expect(streamFiles.length).toBeGreaterThan(0);
// Read content from the stream file
if (streamFiles[0]) {
const chunks: Buffer[] = [];
const readStream = await streamFiles[0].createReadStream();
await new Promise<void>((resolve, reject) => {
readStream.on('data', (chunk: Buffer) => chunks.push(chunk));
readStream.on('end', resolve);
readStream.on('error', reject);
});
const extractedContent = Buffer.concat(chunks).toString();
expect(extractedContent).toEqual(testContent);
}
});
tap.test('should handle gzip files with original filename in header', async () => {
// Test with a real-world gzip file that includes filename in header
const testContent = 'File with name in gzip header\n'.repeat(30);
const gzipFileName = 'compressed.gz';
// Create a proper gzip with filename header using Node's zlib
const zlib = await import('node:zlib');
const gzipBuffer = await new Promise<Buffer>((resolve, reject) => {
zlib.gzip(Buffer.from(testContent), {
level: 9,
}, (err, result) => {
if (err) reject(err);
else resolve(result);
});
});
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.gzipTestDir, gzipFileName),
gzipBuffer
);
// Test extraction with fluent API
const extractPath = plugins.path.join(testPaths.gzipTestDir, 'header-test');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.file(plugins.path.join(testPaths.gzipTestDir, gzipFileName))
.fileName('compressed.txt')
.extract(extractPath);
// Check if file was extracted (name might be derived from archive name)
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files.length).toBeGreaterThan(0);
// Read and verify content
const extractedFile = files[0];
const extractedContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, extractedFile || 'compressed.txt'),
'utf8'
);
expect(extractedContent).toEqual(testContent);
});
tap.test('should handle large gzip files', async () => {
// Create a larger test file
const largeContent = 'x'.repeat(1024 * 1024); // 1MB of 'x' characters
const gzipFileName = 'large-file.txt.gz';
// Compress the large file
const fflate = await import('fflate');
const compressed = fflate.gzipSync(Buffer.from(largeContent));
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.gzipTestDir, gzipFileName),
Buffer.from(compressed)
);
// Test extraction with fluent API
const extractPath = plugins.path.join(testPaths.gzipTestDir, 'large-extracted');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.file(plugins.path.join(testPaths.gzipTestDir, gzipFileName))
.fileName('large-file.txt')
.extract(extractPath);
// Verify the extracted content
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files.length).toBeGreaterThan(0);
const extractedContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, files[0] || 'large-file.txt'),
'utf8'
);
expect(extractedContent.length).toEqual(largeContent.length);
expect(extractedContent).toEqual(largeContent);
});
tap.test('should handle real-world multi-chunk gzip from URL', async () => {
// Test with a real tgz file that will be processed in multiple chunks
const testUrl = 'https://registry.npmjs.org/@push.rocks/smartfile/-/smartfile-11.2.7.tgz';
// Download and extract the archive with fluent API
const extractPath = plugins.path.join(testPaths.gzipTestDir, 'real-world-test');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.url(testUrl)
.extract(extractPath);
// Verify extraction worked
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files.length).toBeGreaterThan(0);
// Check for expected package structure
const hasPackageJson = files.some(f => f.includes('package.json'));
expect(hasPackageJson).toBeTrue();
// Read and verify package.json content
const packageJsonPath = files.find(f => f.includes('package.json'));
if (packageJsonPath) {
const packageJsonContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, packageJsonPath),
'utf8'
);
const packageJson = JSON.parse(packageJsonContent);
expect(packageJson.name).toEqual('@push.rocks/smartfile');
expect(packageJson.version).toEqual('11.2.7');
}
// Read and verify a TypeScript file
const tsFilePath = files.find(f => f.endsWith('.ts'));
if (tsFilePath) {
const tsFileContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, tsFilePath),
'utf8'
);
// TypeScript files should have content
expect(tsFileContent.length).toBeGreaterThan(10);
console.log(` ✓ TypeScript file ${tsFilePath} has ${tsFileContent.length} bytes`);
}
// Read and verify license file
const licensePath = files.find(f => f.includes('license'));
if (licensePath) {
const licenseContent = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, licensePath),
'utf8'
);
expect(licenseContent).toContain('MIT');
}
// Verify we can read multiple files without corruption
const readableFiles = files.filter(f =>
f.endsWith('.json') || f.endsWith('.md') || f.endsWith('.ts') || f.endsWith('.js')
).slice(0, 5); // Test first 5 readable files
for (const file of readableFiles) {
const content = await plugins.fsPromises.readFile(
plugins.path.join(extractPath, file),
'utf8'
);
expect(content).toBeDefined();
expect(content.length).toBeGreaterThan(0);
console.log(` ✓ Successfully read ${file} (${content.length} bytes)`);
}
});
tap.test('should handle gzip extraction fully in memory', async () => {
// Create test data in memory
const testContent = 'This is test data for in-memory gzip processing\n'.repeat(100);
// Compress using fflate in memory
const fflate = await import('fflate');
const compressed = fflate.gzipSync(Buffer.from(testContent));
// Process through SmartArchive without touching filesystem using fluent API
const streamFiles: plugins.smartfile.StreamFile[] = [];
const resultStream = await smartarchive.SmartArchive.create()
.buffer(Buffer.from(compressed))
.toStreamFiles();
await new Promise<void>((resolve, reject) => {
resultStream.on('data', (streamFile: plugins.smartfile.StreamFile) => {
streamFiles.push(streamFile);
});
resultStream.on('end', resolve);
resultStream.on('error', reject);
});
// Verify we got a file
expect(streamFiles.length).toBeGreaterThan(0);
// Read the content from memory without filesystem
const firstFile = streamFiles[0];
const chunks: Buffer[] = [];
const readStream = await firstFile.createReadStream();
await new Promise<void>((resolve, reject) => {
readStream.on('data', (chunk: Buffer) => chunks.push(chunk));
readStream.on('end', resolve);
readStream.on('error', reject);
});
const extractedContent = Buffer.concat(chunks).toString();
expect(extractedContent).toEqual(testContent);
console.log(` ✓ In-memory extraction successful (${extractedContent.length} bytes)`);
});
tap.test('should handle real tgz file fully in memory', async (tools) => {
await tools.timeout(10000); // Set 10 second timeout
// Download tgz file into memory
const response = await plugins.smartrequest.SmartRequest.create()
.url('https://registry.npmjs.org/@push.rocks/smartfile/-/smartfile-11.2.7.tgz')
.get();
const tgzBuffer = Buffer.from(await response.arrayBuffer());
console.log(` Downloaded ${tgzBuffer.length} bytes into memory`);
// Process through SmartArchive in memory with fluent API
const streamFiles: plugins.smartfile.StreamFile[] = [];
const resultStream = await smartarchive.SmartArchive.create()
.buffer(tgzBuffer)
.toStreamFiles();
await new Promise<void>((resolve, reject) => {
let timeout: NodeJS.Timeout;
const cleanup = () => {
clearTimeout(timeout);
};
timeout = setTimeout(() => {
cleanup();
resolve(); // Resolve after timeout if stream doesn't end
}, 5000);
resultStream.on('data', (streamFile: plugins.smartfile.StreamFile) => {
streamFiles.push(streamFile);
});
resultStream.on('end', () => {
cleanup();
resolve();
});
resultStream.on('error', (err) => {
cleanup();
reject(err);
});
});
console.log(` Extracted ${streamFiles.length} files in memory`);
// At minimum we should have extracted something
expect(streamFiles.length).toBeGreaterThan(0);
// Find and read package.json from memory
const packageJsonFile = streamFiles.find(f => f.relativeFilePath?.includes('package.json'));
if (packageJsonFile) {
const chunks: Buffer[] = [];
const readStream = await packageJsonFile.createReadStream();
await new Promise<void>((resolve, reject) => {
readStream.on('data', (chunk: Buffer) => chunks.push(chunk));
readStream.on('end', resolve);
readStream.on('error', reject);
});
const packageJsonContent = Buffer.concat(chunks).toString();
const packageJson = JSON.parse(packageJsonContent);
expect(packageJson.name).toEqual('@push.rocks/smartfile');
expect(packageJson.version).toEqual('11.2.7');
console.log(` ✓ Read package.json from memory: ${packageJson.name}@${packageJson.version}`);
}
// Read a few more files to verify integrity
const filesToCheck = streamFiles.slice(0, 3);
for (const file of filesToCheck) {
const chunks: Buffer[] = [];
const readStream = await file.createReadStream();
await new Promise<void>((resolve, reject) => {
readStream.on('data', (chunk: Buffer) => chunks.push(chunk));
readStream.on('end', resolve);
readStream.on('error', reject);
});
const content = Buffer.concat(chunks);
expect(content.length).toBeGreaterThan(0);
console.log(` ✓ Read ${file.relativeFilePath} from memory (${content.length} bytes)`);
}
});
export default tap.start();

240
test/test.node+deno.ts Normal file
View File

@@ -0,0 +1,240 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as plugins from './plugins.js';
const testPaths = {
nogitDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/',
),
remoteDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/remote',
),
};
import * as smartarchive from '../ts/index.js';
tap.preTask('should prepare .nogit dir', async () => {
await plugins.fsPromises.mkdir(testPaths.remoteDir, { recursive: true });
});
tap.preTask('should prepare downloads', async (tools) => {
const response = await plugins.smartrequest.SmartRequest.create()
.url(
'https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz',
)
.get();
const downloadedFile: Buffer = Buffer.from(await response.arrayBuffer());
await plugins.fsPromises.writeFile(
plugins.path.join(testPaths.nogitDir, 'test.tgz'),
downloadedFile,
);
});
tap.test('should extract existing files on disk using fluent API', async () => {
await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.extract(testPaths.nogitDir);
});
tap.test('should extract from file using fluent API', async () => {
const extractPath = plugins.path.join(testPaths.nogitDir, 'from-file-test');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.file(plugins.path.join(testPaths.nogitDir, 'test.tgz'))
.extract(extractPath);
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files.length).toBeGreaterThan(0);
});
tap.test('should extract with stripComponents using fluent API', async () => {
const extractPath = plugins.path.join(testPaths.nogitDir, 'strip-test');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.stripComponents(1)
.extract(extractPath);
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files.length).toBeGreaterThan(0);
// Files should not have 'package/' prefix
const hasPackagePrefix = files.some(f => f.startsWith('package/'));
expect(hasPackagePrefix).toBeFalse();
});
tap.test('should extract with filter using fluent API', async () => {
const extractPath = plugins.path.join(testPaths.nogitDir, 'filter-test');
await plugins.fsPromises.mkdir(extractPath, { recursive: true });
await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.filter(entry => entry.path.endsWith('.json'))
.extract(extractPath);
const files = await plugins.listFileTree(extractPath, '**/*');
// All extracted files should be JSON
for (const file of files) {
expect(file.endsWith('.json')).toBeTrue();
}
});
tap.test('should list archive entries using fluent API', async () => {
const entries = await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.list();
expect(entries.length).toBeGreaterThan(0);
const hasPackageJson = entries.some(e => e.path.includes('package.json'));
expect(hasPackageJson).toBeTrue();
});
tap.test('should create archive using fluent API', async () => {
const archive = await smartarchive.SmartArchive.create()
.format('tar.gz')
.compression(9)
.entry('hello.txt', 'Hello World!')
.entry('config.json', JSON.stringify({ name: 'test', version: '1.0.0' }));
expect(archive).toBeInstanceOf(smartarchive.SmartArchive);
const buffer = await archive.toBuffer();
expect(buffer.length).toBeGreaterThan(0);
});
tap.test('should create and write archive to file using fluent API', async () => {
const outputPath = plugins.path.join(testPaths.nogitDir, 'created-archive.tar.gz');
await smartarchive.SmartArchive.create()
.format('tar.gz')
.entry('readme.txt', 'This is a test archive')
.entry('data/info.json', JSON.stringify({ created: new Date().toISOString() }))
.toFile(outputPath);
// Verify file was created
const stats = await plugins.fsPromises.stat(outputPath);
expect(stats.size).toBeGreaterThan(0);
// Verify we can extract it
const extractPath = plugins.path.join(testPaths.nogitDir, 'verify-created');
await smartarchive.SmartArchive.create()
.file(outputPath)
.extract(extractPath);
const files = await plugins.listFileTree(extractPath, '**/*');
expect(files).toContain('readme.txt');
});
tap.test('should create ZIP archive using fluent API', async () => {
const outputPath = plugins.path.join(testPaths.nogitDir, 'created-archive.zip');
await smartarchive.SmartArchive.create()
.format('zip')
.entry('file1.txt', 'Content 1')
.entry('file2.txt', 'Content 2')
.toFile(outputPath);
// Verify file was created
const stats = await plugins.fsPromises.stat(outputPath);
expect(stats.size).toBeGreaterThan(0);
});
tap.test('should extract to SmartFiles using fluent API', async () => {
const smartFiles = await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.toSmartFiles();
expect(smartFiles.length).toBeGreaterThan(0);
const packageJson = smartFiles.find(f => f.relative.includes('package.json'));
expect(packageJson).toBeDefined();
});
tap.test('should analyze archive using fluent API', async () => {
const info = await smartarchive.SmartArchive.create()
.file(plugins.path.join(testPaths.nogitDir, 'test.tgz'))
.analyze();
expect(info.isArchive).toBeTrue();
expect(info.isCompressed).toBeTrue();
expect(info.format).toEqual('gz');
});
tap.test('should check if file exists in archive using fluent API', async () => {
const hasPackageJson = await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.hasFile('package.json');
expect(hasPackageJson).toBeTrue();
});
tap.test('should extract single file using fluent API', async () => {
const packageJson = await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.extractFile('package.json');
expect(packageJson).toBeDefined();
expect(packageJson!.contents.toString()).toContain('websetup');
});
tap.test('should handle include/exclude patterns', async () => {
const smartFiles = await smartarchive.SmartArchive.create()
.url('https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz')
.include(/\.json$/)
.toSmartFiles();
expect(smartFiles.length).toBeGreaterThan(0);
for (const file of smartFiles) {
expect(file.relative.endsWith('.json')).toBeTrue();
}
});
tap.test('should throw error when mixing modes', async () => {
let threw = false;
try {
smartarchive.SmartArchive.create()
.url('https://example.com/archive.tgz')
.entry('file.txt', 'content'); // This should throw
} catch (e) {
threw = true;
expect((e as Error).message).toContain('extraction mode');
}
expect(threw).toBeTrue();
});
tap.test('should throw error when no source configured', async () => {
let threw = false;
try {
await smartarchive.SmartArchive.create().extract('./output');
} catch (e) {
threw = true;
expect((e as Error).message).toContain('No source configured');
}
expect(threw).toBeTrue();
});
tap.test('should throw error when no format configured', async () => {
let threw = false;
try {
await smartarchive.SmartArchive.create()
.entry('file.txt', 'content')
.toBuffer();
} catch (e) {
threw = true;
expect((e as Error).message).toContain('No format specified');
}
expect(threw).toBeTrue();
});
tap.skip.test('should extract a b2zip', async () => {
const dataUrl =
'https://daten.offeneregister.de/de_companies_ocdata.jsonl.bz2';
await smartarchive.SmartArchive.create()
.url(dataUrl)
.extract(plugins.path.join(testPaths.nogitDir, 'de_companies_ocdata.jsonl'));
});
export default tap.start();

View File

@@ -1,52 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as plugins from './plugins.js';
const testPaths = {
nogitDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/',
),
remoteDir: plugins.path.join(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../.nogit/remote',
),
};
import * as smartarchive from '../ts/index.js';
tap.preTask('should prepare .nogit dir', async () => {
await plugins.smartfile.fs.ensureDir(testPaths.remoteDir);
});
tap.preTask('should prepare downloads', async (tools) => {
const response = await plugins.smartrequest.SmartRequest.create()
.url(
'https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz',
)
.get();
const downloadedFile: Buffer = Buffer.from(await response.arrayBuffer());
await plugins.smartfile.memory.toFs(
downloadedFile,
plugins.path.join(testPaths.nogitDir, 'test.tgz'),
);
});
tap.test('should extract existing files on disk', async () => {
const testSmartarchive = await smartarchive.SmartArchive.fromArchiveUrl(
'https://verdaccio.lossless.digital/@pushrocks%2fwebsetup/-/websetup-2.0.14.tgz',
);
await testSmartarchive.exportToFs(testPaths.nogitDir);
});
tap.skip.test('should extract a b2zip', async () => {
const dataUrl =
'https://daten.offeneregister.de/de_companies_ocdata.jsonl.bz2';
const testArchive = await smartarchive.SmartArchive.fromArchiveUrl(dataUrl);
await testArchive.exportToFs(
plugins.path.join(testPaths.nogitDir, 'de_companies_ocdata.jsonl'),
'data.jsonl',
);
});
await tap.start();

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartarchive', name: '@push.rocks/smartarchive',
version: '4.2.0', version: '5.0.1',
description: 'A library for working with archive files, providing utilities for compressing and decompressing data.' description: 'A library for working with archive files, providing utilities for compressing and decompressing data.'
} }

View File

@@ -1,44 +1,60 @@
var BITMASK = [0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff]; import type { IBitReader } from '../interfaces.js';
// returns a function that reads bits. const BITMASK = [0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff] as const;
// takes a buffer iterator as input
export function bitIterator(nextBuffer: () => Buffer) { /**
var bit = 0, * Creates a bit reader function for BZIP2 decompression.
byte = 0; * Takes a buffer iterator as input and returns a function that reads bits.
var bytes = nextBuffer(); */
var f = function (n) { export function bitIterator(nextBuffer: () => Buffer): IBitReader {
if (n === null && bit != 0) { let bit = 0;
let byte = 0;
let bytes = nextBuffer();
let _bytesRead = 0;
const reader = function (n: number | null): number | void {
if (n === null && bit !== 0) {
// align to byte boundary // align to byte boundary
bit = 0; bit = 0;
byte++; byte++;
return; return;
} }
var result = 0;
while (n > 0) { let result = 0;
let remaining = n as number;
while (remaining > 0) {
if (byte >= bytes.length) { if (byte >= bytes.length) {
byte = 0; byte = 0;
bytes = nextBuffer(); bytes = nextBuffer();
} }
var left = 8 - bit;
if (bit === 0 && n > 0) const left = 8 - bit;
// @ts-ignore
f.bytesRead++; if (bit === 0 && remaining > 0) {
if (n >= left) { _bytesRead++;
}
if (remaining >= left) {
result <<= left; result <<= left;
result |= BITMASK[left] & bytes[byte++]; result |= BITMASK[left] & bytes[byte++];
bit = 0; bit = 0;
n -= left; remaining -= left;
} else { } else {
result <<= n; result <<= remaining;
result |= result |= (bytes[byte] & (BITMASK[remaining] << (8 - remaining - bit))) >> (8 - remaining - bit);
(bytes[byte] & (BITMASK[n] << (8 - n - bit))) >> (8 - n - bit); bit += remaining;
bit += n; remaining = 0;
n = 0;
} }
} }
return result; return result;
}; } as IBitReader;
// @ts-ignore
f.bytesRead = 0; Object.defineProperty(reader, 'bytesRead', {
return f; get: () => _bytesRead,
enumerable: true,
});
return reader;
} }

View File

@@ -1,23 +1,22 @@
export class Bzip2Error extends Error { import { Bzip2Error, BZIP2_ERROR_CODES } from '../errors.js';
public name: string = 'Bzip2Error'; import type { IBitReader, IHuffmanGroup } from '../interfaces.js';
public message: string;
public stack = new Error().stack;
constructor(messageArg: string) { // Re-export Bzip2Error for backward compatibility
super(); export { Bzip2Error };
this.message = messageArg;
} /**
* Throw a BZIP2 error with proper error code
*/
function throwError(message: string, code: string = BZIP2_ERROR_CODES.INVALID_BLOCK_DATA): never {
throw new Bzip2Error(message, code);
} }
var messageArg = { /**
Error: function (message) { * BZIP2 decompression implementation
throw new Bzip2Error(message); */
},
};
export class Bzip2 { export class Bzip2 {
public Bzip2Error = Bzip2Error; // CRC32 lookup table for BZIP2
public crcTable = [ public readonly crcTable: readonly number[] = [
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
@@ -63,14 +62,24 @@ export class Bzip2 {
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4,
]; ];
array = function (bytes) { // State arrays initialized in header()
var bit = 0, private byteCount!: Int32Array;
byte = 0; private symToByte!: Uint8Array;
var BITMASK = [0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff]; private mtfSymbol!: Int32Array;
return function (n) { private selectors!: Uint8Array;
var result = 0;
/**
* Create a bit reader from a byte array
*/
array(bytes: Uint8Array | Buffer): (n: number) => number {
let bit = 0;
let byte = 0;
const BITMASK = [0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff];
return function (n: number): number {
let result = 0;
while (n > 0) { while (n > 0) {
var left = 8 - bit; const left = 8 - bit;
if (n >= left) { if (n >= left) {
result <<= left; result <<= left;
result |= BITMASK[left] & bytes[byte++]; result |= BITMASK[left] & bytes[byte++];
@@ -78,234 +87,341 @@ export class Bzip2 {
n -= left; n -= left;
} else { } else {
result <<= n; result <<= n;
result |= result |= (bytes[byte] & (BITMASK[n] << (8 - n - bit))) >> (8 - n - bit);
(bytes[byte] & (BITMASK[n] << (8 - n - bit))) >> (8 - n - bit);
bit += n; bit += n;
n = 0; n = 0;
} }
} }
return result; return result;
}; };
}; }
simple = function (srcbuffer, stream) { /**
var bits = this.array(srcbuffer); * Simple decompression from a buffer
var size = this.header(bits); */
var ret = false; simple(srcbuffer: Uint8Array | Buffer, stream: (byte: number) => void): void {
var bufsize = 100000 * size; const bits = this.array(srcbuffer);
var buf = new Int32Array(bufsize); const size = this.header(bits as IBitReader);
let ret: number | null = 0;
const bufsize = 100000 * size;
const buf = new Int32Array(bufsize);
do { do {
ret = this.decompress(bits, stream, buf, bufsize); ret = this.decompress(bits as IBitReader, stream, buf, bufsize, ret);
} while (!ret); } while (ret !== null);
}; }
header = function (bits) { /**
* Parse BZIP2 header and return block size
*/
header(bits: IBitReader): number {
this.byteCount = new Int32Array(256); this.byteCount = new Int32Array(256);
this.symToByte = new Uint8Array(256); this.symToByte = new Uint8Array(256);
this.mtfSymbol = new Int32Array(256); this.mtfSymbol = new Int32Array(256);
this.selectors = new Uint8Array(0x8000); this.selectors = new Uint8Array(0x8000);
if (bits(8 * 3) != 4348520) messageArg.Error('No magic number found'); if (bits(8 * 3) !== 4348520) {
throwError('No BZIP2 magic number found at start of stream', BZIP2_ERROR_CODES.NO_MAGIC_NUMBER);
var i = bits(8) - 48;
if (i < 1 || i > 9) messageArg.Error('Not a BZIP archive');
return i;
};
decompress = function (bits, stream, buf, bufsize, streamCRC) {
var MAX_HUFCODE_BITS = 20;
var MAX_SYMBOLS = 258;
var SYMBOL_RUNA = 0;
var SYMBOL_RUNB = 1;
var GROUP_SIZE = 50;
var crc = 0 ^ -1;
for (var h = '', i = 0; i < 6; i++) h += bits(8).toString(16);
if (h == '177245385090') {
var finalCRC = bits(32) | 0;
if (finalCRC !== streamCRC)
messageArg.Error('Error in bzip2: crc32 do not match');
// align stream to byte
bits(null);
return null; // reset streamCRC for next call
} }
if (h != '314159265359') messageArg.Error('eek not valid bzip data');
var crcblock = bits(32) | 0; // CRC code const blockSize = (bits(8) as number) - 48;
if (bits(1)) messageArg.Error('unsupported obsolete version'); if (blockSize < 1 || blockSize > 9) {
var origPtr = bits(24); throwError('Invalid BZIP2 archive: block size must be 1-9', BZIP2_ERROR_CODES.INVALID_ARCHIVE);
if (origPtr > bufsize) }
messageArg.Error('Initial position larger than buffer size'); return blockSize;
var t = bits(16); }
var symTotal = 0;
for (i = 0; i < 16; i++) { /**
if (t & (1 << (15 - i))) { * Decompress a BZIP2 block
var k = bits(16); */
for (j = 0; j < 16; j++) { decompress(
if (k & (1 << (15 - j))) { bits: IBitReader,
stream: (byte: number) => void,
buf: Int32Array,
bufsize: number,
streamCRC?: number | null
): number | null {
const MAX_HUFCODE_BITS = 20;
const MAX_SYMBOLS = 258;
const SYMBOL_RUNA = 0;
const SYMBOL_RUNB = 1;
const GROUP_SIZE = 50;
let crc = 0 ^ -1;
// Read block header
let headerHex = '';
for (let i = 0; i < 6; i++) {
headerHex += (bits(8) as number).toString(16);
}
// Check for end-of-stream marker
if (headerHex === '177245385090') {
const finalCRC = bits(32) as number | 0;
if (finalCRC !== streamCRC) {
throwError('CRC32 mismatch: stream checksum verification failed', BZIP2_ERROR_CODES.CRC_MISMATCH);
}
// Align stream to byte boundary
bits(null);
return null;
}
// Verify block signature (pi digits)
if (headerHex !== '314159265359') {
throwError('Invalid block header: expected pi signature (0x314159265359)', BZIP2_ERROR_CODES.INVALID_BLOCK_DATA);
}
const crcblock = bits(32) as number | 0;
if (bits(1)) {
throwError('Unsupported obsolete BZIP2 format version', BZIP2_ERROR_CODES.INVALID_ARCHIVE);
}
const origPtr = bits(24) as number;
if (origPtr > bufsize) {
throwError('Initial position larger than buffer size', BZIP2_ERROR_CODES.BUFFER_OVERFLOW);
}
// Read symbol map
let symbolMapBits = bits(16) as number;
let symTotal = 0;
for (let i = 0; i < 16; i++) {
if (symbolMapBits & (1 << (15 - i))) {
const subMap = bits(16) as number;
for (let j = 0; j < 16; j++) {
if (subMap & (1 << (15 - j))) {
this.symToByte[symTotal++] = 16 * i + j; this.symToByte[symTotal++] = 16 * i + j;
} }
} }
} }
} }
var groupCount = bits(3); // Read Huffman groups
if (groupCount < 2 || groupCount > 6) messageArg.Error('another error'); const groupCount = bits(3) as number;
var nSelectors = bits(15); if (groupCount < 2 || groupCount > 6) {
if (nSelectors == 0) messageArg.Error('meh'); throwError('Invalid group count: must be between 2 and 6', BZIP2_ERROR_CODES.INVALID_HUFFMAN);
for (var i = 0; i < groupCount; i++) this.mtfSymbol[i] = i; }
for (var i = 0; i < nSelectors; i++) { const nSelectors = bits(15) as number;
for (var j = 0; bits(1); j++) if (nSelectors === 0) {
if (j >= groupCount) messageArg.Error('whoops another error'); throwError('Invalid selector count: cannot be zero', BZIP2_ERROR_CODES.INVALID_SELECTOR);
var uc = this.mtfSymbol[j]; }
for (var k: any = j - 1; k >= 0; k--) {
// Initialize MTF symbol array
for (let i = 0; i < groupCount; i++) {
this.mtfSymbol[i] = i;
}
// Read selectors using MTF decoding
for (let i = 0; i < nSelectors; i++) {
let j = 0;
while (bits(1)) {
j++;
if (j >= groupCount) {
throwError('Invalid MTF index: exceeds group count', BZIP2_ERROR_CODES.INVALID_HUFFMAN);
}
}
const uc = this.mtfSymbol[j];
for (let k = j - 1; k >= 0; k--) {
this.mtfSymbol[k + 1] = this.mtfSymbol[k]; this.mtfSymbol[k + 1] = this.mtfSymbol[k];
} }
this.mtfSymbol[0] = uc; this.mtfSymbol[0] = uc;
this.selectors[i] = uc; this.selectors[i] = uc;
} }
var symCount = symTotal + 2; // Build Huffman tables
var groups = []; const symCount = symTotal + 2;
var length = new Uint8Array(MAX_SYMBOLS), const groups: IHuffmanGroup[] = [];
temp = new Uint16Array(MAX_HUFCODE_BITS + 1); const length = new Uint8Array(MAX_SYMBOLS);
const temp = new Uint16Array(MAX_HUFCODE_BITS + 1);
var hufGroup; for (let j = 0; j < groupCount; j++) {
let t = bits(5) as number;
for (var j = 0; j < groupCount; j++) { for (let i = 0; i < symCount; i++) {
t = bits(5); //lengths
for (var i = 0; i < symCount; i++) {
while (true) { while (true) {
if (t < 1 || t > MAX_HUFCODE_BITS) if (t < 1 || t > MAX_HUFCODE_BITS) {
messageArg.Error('I gave up a while ago on writing error messages'); throwError('Invalid Huffman code length: must be between 1 and 20', BZIP2_ERROR_CODES.INVALID_HUFFMAN);
}
if (!bits(1)) break; if (!bits(1)) break;
if (!bits(1)) t++; if (!bits(1)) t++;
else t--; else t--;
} }
length[i] = t; length[i] = t;
} }
var minLen, maxLen;
minLen = maxLen = length[0]; let minLen = length[0];
for (var i = 1; i < symCount; i++) { let maxLen = length[0];
for (let i = 1; i < symCount; i++) {
if (length[i] > maxLen) maxLen = length[i]; if (length[i] > maxLen) maxLen = length[i];
else if (length[i] < minLen) minLen = length[i]; else if (length[i] < minLen) minLen = length[i];
} }
hufGroup = groups[j] = {};
hufGroup.permute = new Int32Array(MAX_SYMBOLS);
hufGroup.limit = new Int32Array(MAX_HUFCODE_BITS + 1);
hufGroup.base = new Int32Array(MAX_HUFCODE_BITS + 1);
hufGroup.minLen = minLen; const hufGroup: IHuffmanGroup = {
hufGroup.maxLen = maxLen; permute: new Int32Array(MAX_SYMBOLS),
var base = hufGroup.base; limit: new Int32Array(MAX_HUFCODE_BITS + 1),
var limit = hufGroup.limit; base: new Int32Array(MAX_HUFCODE_BITS + 1),
var pp = 0; minLen,
for (var i: number = minLen; i <= maxLen; i++) maxLen,
for (var t: any = 0; t < symCount; t++) };
if (length[t] == i) hufGroup.permute[pp++] = t; groups[j] = hufGroup;
for (i = minLen; i <= maxLen; i++) temp[i] = limit[i] = 0;
for (i = 0; i < symCount; i++) temp[length[i]]++; const base = hufGroup.base;
pp = t = 0; const limit = hufGroup.limit;
for (i = minLen; i < maxLen; i++) {
let pp = 0;
for (let i = minLen; i <= maxLen; i++) {
for (let t = 0; t < symCount; t++) {
if (length[t] === i) hufGroup.permute[pp++] = t;
}
}
for (let i = minLen; i <= maxLen; i++) {
temp[i] = 0;
limit[i] = 0;
}
for (let i = 0; i < symCount; i++) {
temp[length[i]]++;
}
pp = 0;
let tt = 0;
for (let i = minLen; i < maxLen; i++) {
pp += temp[i]; pp += temp[i];
limit[i] = pp - 1; limit[i] = pp - 1;
pp <<= 1; pp <<= 1;
base[i + 1] = pp - (t += temp[i]); base[i + 1] = pp - (tt += temp[i]);
} }
limit[maxLen] = pp + temp[maxLen] - 1; limit[maxLen] = pp + temp[maxLen] - 1;
base[minLen] = 0; base[minLen] = 0;
} }
for (var i = 0; i < 256; i++) { // Initialize for decoding
for (let i = 0; i < 256; i++) {
this.mtfSymbol[i] = i; this.mtfSymbol[i] = i;
this.byteCount[i] = 0; this.byteCount[i] = 0;
} }
var runPos, count, symCount: number, selector;
runPos = count = symCount = selector = 0; let runPos = 0;
let count = 0;
let symCountRemaining = 0;
let selector = 0;
let hufGroup = groups[0];
let base = hufGroup.base;
let limit = hufGroup.limit;
// Main decoding loop
while (true) { while (true) {
if (!symCount--) { if (!symCountRemaining--) {
symCount = GROUP_SIZE - 1; symCountRemaining = GROUP_SIZE - 1;
if (selector >= nSelectors) if (selector >= nSelectors) {
messageArg.Error("meow i'm a kitty, that's an error"); throwError('Invalid selector index: exceeds available groups', BZIP2_ERROR_CODES.INVALID_SELECTOR);
}
hufGroup = groups[this.selectors[selector++]]; hufGroup = groups[this.selectors[selector++]];
base = hufGroup.base; base = hufGroup.base;
limit = hufGroup.limit; limit = hufGroup.limit;
} }
i = hufGroup.minLen;
j = bits(i); let i = hufGroup.minLen;
let j = bits(i) as number;
while (true) { while (true) {
if (i > hufGroup.maxLen) messageArg.Error("rawr i'm a dinosaur"); if (i > hufGroup.maxLen) {
throwError('Huffman decoding error: bit length exceeds maximum allowed', BZIP2_ERROR_CODES.INVALID_HUFFMAN);
}
if (j <= limit[i]) break; if (j <= limit[i]) break;
i++; i++;
j = (j << 1) | bits(1); j = (j << 1) | (bits(1) as number);
} }
j -= base[i]; j -= base[i];
if (j < 0 || j >= MAX_SYMBOLS) messageArg.Error("moo i'm a cow"); if (j < 0 || j >= MAX_SYMBOLS) {
var nextSym = hufGroup.permute[j]; throwError('Symbol index out of bounds during Huffman decoding', BZIP2_ERROR_CODES.INVALID_HUFFMAN);
if (nextSym == SYMBOL_RUNA || nextSym == SYMBOL_RUNB) { }
const nextSym = hufGroup.permute[j];
if (nextSym === SYMBOL_RUNA || nextSym === SYMBOL_RUNB) {
if (!runPos) { if (!runPos) {
runPos = 1; runPos = 1;
t = 0; j = 0;
} }
if (nextSym == SYMBOL_RUNA) t += runPos; if (nextSym === SYMBOL_RUNA) j += runPos;
else t += 2 * runPos; else j += 2 * runPos;
runPos <<= 1; runPos <<= 1;
continue; continue;
} }
if (runPos) { if (runPos) {
runPos = 0; runPos = 0;
if (count + t > bufsize) messageArg.Error('Boom.'); const runLength = j;
uc = this.symToByte[this.mtfSymbol[0]]; if (count + runLength > bufsize) {
this.byteCount[uc] += t; throwError('Run-length overflow: decoded run exceeds buffer capacity', BZIP2_ERROR_CODES.BUFFER_OVERFLOW);
while (t--) buf[count++] = uc; }
const uc = this.symToByte[this.mtfSymbol[0]];
this.byteCount[uc] += runLength;
for (let t = 0; t < runLength; t++) {
buf[count++] = uc;
}
} }
if (nextSym > symTotal) break; if (nextSym > symTotal) break;
if (count >= bufsize)
messageArg.Error("I can't think of anything. Error"); if (count >= bufsize) {
i = nextSym - 1; throwError('Buffer overflow: decoded data exceeds buffer capacity', BZIP2_ERROR_CODES.BUFFER_OVERFLOW);
uc = this.mtfSymbol[i]; }
for (var k: any = i - 1; k >= 0; k--) {
const mtfIndex = nextSym - 1;
const uc = this.mtfSymbol[mtfIndex];
for (let k = mtfIndex - 1; k >= 0; k--) {
this.mtfSymbol[k + 1] = this.mtfSymbol[k]; this.mtfSymbol[k + 1] = this.mtfSymbol[k];
} }
this.mtfSymbol[0] = uc; this.mtfSymbol[0] = uc;
uc = this.symToByte[uc]; const decodedByte = this.symToByte[uc];
this.byteCount[uc]++; this.byteCount[decodedByte]++;
buf[count++] = uc; buf[count++] = decodedByte;
} }
if (origPtr < 0 || origPtr >= count)
messageArg.Error( if (origPtr < 0 || origPtr >= count) {
"I'm a monkey and I'm throwing something at someone, namely you", throwError('Invalid original pointer: position outside decoded block', BZIP2_ERROR_CODES.INVALID_POSITION);
); }
var j = 0;
for (var i = 0; i < 256; i++) { // Inverse BWT transform
k = j + this.byteCount[i]; let j = 0;
for (let i = 0; i < 256; i++) {
const k = j + this.byteCount[i];
this.byteCount[i] = j; this.byteCount[i] = j;
j = k; j = k;
} }
for (var i = 0; i < count; i++) {
uc = buf[i] & 0xff; for (let i = 0; i < count; i++) {
const uc = buf[i] & 0xff;
buf[this.byteCount[uc]] |= i << 8; buf[this.byteCount[uc]] |= i << 8;
this.byteCount[uc]++; this.byteCount[uc]++;
} }
var pos = 0,
current = 0, // Output decoded data
run = 0; let pos = 0;
let current = 0;
let run = 0;
if (count) { if (count) {
pos = buf[origPtr]; pos = buf[origPtr];
current = pos & 0xff; current = pos & 0xff;
pos >>= 8; pos >>= 8;
run = -1; run = -1;
} }
count = count;
var copies, previous, outbyte; let remaining = count;
while (count) { while (remaining) {
count--; remaining--;
previous = current; const previous = current;
pos = buf[pos]; pos = buf[pos];
current = pos & 0xff; current = pos & 0xff;
pos >>= 8; pos >>= 8;
if (run++ == 3) {
let copies: number;
let outbyte: number;
if (run++ === 3) {
copies = current; copies = current;
outbyte = previous; outbyte = previous;
current = -1; current = -1;
@@ -313,19 +429,21 @@ export class Bzip2 {
copies = 1; copies = 1;
outbyte = current; outbyte = current;
} }
while (copies--) { while (copies--) {
crc = crc = ((crc << 8) ^ this.crcTable[((crc >> 24) ^ outbyte) & 0xff]) & 0xffffffff;
((crc << 8) ^ this.crcTable[((crc >> 24) ^ outbyte) & 0xff]) &
0xffffffff; // crc32
stream(outbyte); stream(outbyte);
} }
if (current != previous) run = 0;
if (current !== previous) run = 0;
} }
crc = (crc ^ -1) >>> 0; crc = (crc ^ -1) >>> 0;
if ((crc | 0) != (crcblock | 0)) if ((crc | 0) !== (crcblock | 0)) {
messageArg.Error('Error in bzip2: crc32 do not match'); throwError('CRC32 mismatch: block checksum verification failed', BZIP2_ERROR_CODES.CRC_MISMATCH);
streamCRC = (crc ^ ((streamCRC << 1) | (streamCRC >>> 31))) & 0xffffffff; }
return streamCRC;
}; const newStreamCRC = (crc ^ (((streamCRC || 0) << 1) | ((streamCRC || 0) >>> 31))) & 0xffffffff;
return newStreamCRC;
}
} }

View File

@@ -1,51 +1,53 @@
import * as plugins from '../plugins.js'; import * as plugins from '../plugins.js';
import { Bzip2Error, BZIP2_ERROR_CODES } from '../errors.js';
import type { IBitReader } from '../interfaces.js';
import { Bzip2 } from './bzip2.js'; import { Bzip2 } from './bzip2.js';
import { bitIterator } from './bititerator.js'; import { bitIterator } from './bititerator.js';
export function unbzip2Stream() { /**
* Creates a streaming BZIP2 decompression transform
*/
export function unbzip2Stream(): plugins.smartstream.SmartDuplex<Buffer, Buffer> {
const bzip2Instance = new Bzip2(); const bzip2Instance = new Bzip2();
var bufferQueue = []; const bufferQueue: Buffer[] = [];
var hasBytes = 0; let hasBytes = 0;
var blockSize = 0; let blockSize = 0;
var broken = false; let broken = false;
var done = false; let bitReader: IBitReader | null = null;
var bitReader = null; let streamCRC: number | null = null;
var streamCRC = null;
function decompressBlock() { function decompressBlock(): Buffer | undefined {
if (!blockSize) { if (!blockSize) {
blockSize = bzip2Instance.header(bitReader); blockSize = bzip2Instance.header(bitReader!);
streamCRC = 0; streamCRC = 0;
} else { return undefined;
var bufsize = 100000 * blockSize;
var buf = new Int32Array(bufsize);
var chunk = [];
var f = function (b) {
chunk.push(b);
};
streamCRC = bzip2Instance.decompress(
bitReader,
f,
buf,
bufsize,
streamCRC,
);
if (streamCRC === null) {
// reset for next bzip2 header
blockSize = 0;
return;
} else {
return Buffer.from(chunk);
}
} }
const bufsize = 100000 * blockSize;
const buf = new Int32Array(bufsize);
const chunk: number[] = [];
const outputFunc = (b: number): void => {
chunk.push(b);
};
streamCRC = bzip2Instance.decompress(bitReader!, outputFunc, buf, bufsize, streamCRC);
if (streamCRC === null) {
// Reset for next bzip2 header
blockSize = 0;
return undefined;
}
return Buffer.from(chunk);
} }
var outlength = 0; let outlength = 0;
const decompressAndPush = async () => {
if (broken) return; const decompressAndPush = async (): Promise<Buffer | undefined> => {
if (broken) return undefined;
try { try {
const resultChunk = decompressBlock(); const resultChunk = decompressBlock();
if (resultChunk) { if (resultChunk) {
@@ -53,40 +55,39 @@ export function unbzip2Stream() {
} }
return resultChunk; return resultChunk;
} catch (e) { } catch (e) {
console.error(e);
broken = true; broken = true;
if (e instanceof Error) {
throw new Bzip2Error(`Decompression failed: ${e.message}`, BZIP2_ERROR_CODES.INVALID_BLOCK_DATA);
}
throw e;
} }
}; };
let counter = 0;
return new plugins.smartstream.SmartDuplex({ return new plugins.smartstream.SmartDuplex<Buffer, Buffer>({
objectMode: true, objectMode: true,
name: 'bzip2', name: 'bzip2',
debug: false,
highWaterMark: 1, highWaterMark: 1,
writeFunction: async function (data, streamTools) { writeFunction: async function (data, streamTools) {
// console.log(`got chunk ${counter++}`)
bufferQueue.push(data); bufferQueue.push(data);
hasBytes += data.length; hasBytes += data.length;
if (bitReader === null) { if (bitReader === null) {
bitReader = bitIterator(function () { bitReader = bitIterator(function () {
return bufferQueue.shift(); return bufferQueue.shift()!;
}); });
} }
while (
!broken && const threshold = 25000 + 100000 * blockSize || 4;
hasBytes - bitReader.bytesRead + 1 >= (25000 + 100000 * blockSize || 4) while (!broken && hasBytes - bitReader.bytesRead + 1 >= threshold) {
) {
//console.error('decompressing with', hasBytes - bitReader.bytesRead + 1, 'bytes in buffer');
const result = await decompressAndPush(); const result = await decompressAndPush();
if (!result) { if (!result) {
continue; continue;
} }
// console.log(result.toString());
await streamTools.push(result); await streamTools.push(result);
} }
return null;
}, },
finalFunction: async function (streamTools) { finalFunction: async function (streamTools) {
//console.error(x,'last compressing with', hasBytes, 'bytes in buffer');
while (!broken && bitReader && hasBytes > bitReader.bytesRead) { while (!broken && bitReader && hasBytes > bitReader.bytesRead) {
const result = await decompressAndPush(); const result = await decompressAndPush();
if (!result) { if (!result) {
@@ -94,10 +95,11 @@ export function unbzip2Stream() {
} }
await streamTools.push(result); await streamTools.push(result);
} }
if (!broken) {
if (streamCRC !== null) if (!broken && streamCRC !== null) {
this.emit('error', new Error('input stream ended prematurely')); this.emit('error', new Bzip2Error('Input stream ended prematurely', BZIP2_ERROR_CODES.PREMATURE_END));
} }
return null;
}, },
}); });
} }

View File

@@ -1,24 +1,41 @@
import type { SmartArchive } from './classes.smartarchive.js'; import type { SmartArchive } from './classes.smartarchive.js';
import type { TSupportedMime } from './interfaces.js';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
/**
* Type for decompression streams
*/
export type TDecompressionStream =
| plugins.stream.Transform
| plugins.stream.Duplex
| plugins.tarStream.Extract;
/**
* Result of archive analysis
*/
export interface IAnalyzedResult { export interface IAnalyzedResult {
fileType: plugins.fileType.FileTypeResult; fileType: plugins.fileType.FileTypeResult | undefined;
isArchive: boolean; isArchive: boolean;
resultStream: plugins.smartstream.SmartDuplex; resultStream: plugins.smartstream.SmartDuplex<Buffer, Buffer>;
decompressionStream: decompressionStream: TDecompressionStream;
| plugins.stream.Transform
| plugins.stream.Duplex
| plugins.tarStream.Extract;
} }
/**
* Analyzes archive streams to detect format and provide decompression
*/
export class ArchiveAnalyzer { export class ArchiveAnalyzer {
smartArchiveRef: SmartArchive; private smartArchiveRef: SmartArchive;
constructor(smartArchiveRefArg: SmartArchive) { constructor(smartArchiveRefArg: SmartArchive) {
this.smartArchiveRef = smartArchiveRefArg; this.smartArchiveRef = smartArchiveRefArg;
} }
private async mimeTypeIsArchive(mimeType: string): Promise<boolean> { /**
* Check if a MIME type represents an archive format
*/
private async mimeTypeIsArchive(mimeType: string | undefined): Promise<boolean> {
if (!mimeType) return false;
const archiveMimeTypes: Set<string> = new Set([ const archiveMimeTypes: Set<string> = new Set([
'application/zip', 'application/zip',
'application/x-rar-compressed', 'application/x-rar-compressed',
@@ -26,50 +43,46 @@ export class ArchiveAnalyzer {
'application/gzip', 'application/gzip',
'application/x-7z-compressed', 'application/x-7z-compressed',
'application/x-bzip2', 'application/x-bzip2',
// Add other archive mime types here
]); ]);
return archiveMimeTypes.has(mimeType); return archiveMimeTypes.has(mimeType);
} }
private async getDecompressionStream( /**
mimeTypeArg: plugins.fileType.FileTypeResult['mime'], * Get the appropriate decompression stream for a MIME type
): Promise< */
plugins.stream.Transform | plugins.stream.Duplex | plugins.tarStream.Extract private async getDecompressionStream(mimeTypeArg: TSupportedMime): Promise<TDecompressionStream> {
> {
switch (mimeTypeArg) { switch (mimeTypeArg) {
case 'application/gzip': case 'application/gzip':
return this.smartArchiveRef.gzipTools.getDecompressionStream(); return this.smartArchiveRef.gzipTools.getDecompressionStream();
case 'application/zip': case 'application/zip':
return this.smartArchiveRef.zipTools.getDecompressionStream(); return this.smartArchiveRef.zipTools.getDecompressionStream();
case 'application/x-bzip2': case 'application/x-bzip2':
return await this.smartArchiveRef.bzip2Tools.getDecompressionStream(); // replace with your own bzip2 decompression stream return this.smartArchiveRef.bzip2Tools.getDecompressionStream();
case 'application/x-tar': case 'application/x-tar':
return this.smartArchiveRef.tarTools.getDecompressionStream(); // replace with your own tar decompression stream return this.smartArchiveRef.tarTools.getDecompressionStream();
default: default:
// Handle unsupported formats or no decompression needed // Handle unsupported formats or no decompression needed
return plugins.smartstream.createPassThrough(); return plugins.smartstream.createPassThrough();
} }
} }
public getAnalyzedStream() { /**
* Create an analyzed stream that detects archive type and provides decompression
* Emits a single IAnalyzedResult object
*/
public getAnalyzedStream(): plugins.smartstream.SmartDuplex<Buffer, IAnalyzedResult> {
let firstRun = true; let firstRun = true;
const resultStream = plugins.smartstream.createPassThrough(); const resultStream = plugins.smartstream.createPassThrough();
const analyzerstream = new plugins.smartstream.SmartDuplex<
Buffer, const analyzerstream = new plugins.smartstream.SmartDuplex<Buffer, IAnalyzedResult>({
IAnalyzedResult
>({
readableObjectMode: true, readableObjectMode: true,
writeFunction: async (chunkArg: Buffer, streamtools) => { writeFunction: async (chunkArg: Buffer, streamtools) => {
if (firstRun) { if (firstRun) {
firstRun = false; firstRun = false;
const fileType = await plugins.fileType.fileTypeFromBuffer(chunkArg); const fileType = await plugins.fileType.fileTypeFromBuffer(chunkArg);
const decompressionStream = await this.getDecompressionStream( const decompressionStream = await this.getDecompressionStream(fileType?.mime as TSupportedMime);
fileType?.mime as any,
);
/**
* analyzed stream emits once with this object
*/
const result: IAnalyzedResult = { const result: IAnalyzedResult = {
fileType, fileType,
isArchive: await this.mimeTypeIsArchive(fileType?.mime), isArchive: await this.mimeTypeIsArchive(fileType?.mime),
@@ -81,11 +94,12 @@ export class ArchiveAnalyzer {
await resultStream.backpressuredPush(chunkArg); await resultStream.backpressuredPush(chunkArg);
return null; return null;
}, },
finalFunction: async (tools) => { finalFunction: async () => {
resultStream.push(null); resultStream.push(null);
return null; return null;
}, },
}); });
return analyzerstream; return analyzerstream;
} }
} }

View File

@@ -1,62 +1,138 @@
import type { SmartArchive } from './classes.smartarchive.js';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import type { TCompressionLevel } from './interfaces.js';
// This class wraps fflate's gunzip in a Node.js Transform stream /**
export class CompressGunzipTransform extends plugins.stream.Transform { * Transform stream for GZIP compression using fflate
constructor() { */
export class GzipCompressionTransform extends plugins.stream.Transform {
private gzip: plugins.fflate.Gzip;
constructor(level: TCompressionLevel = 6) {
super(); super();
// Create a streaming Gzip compressor
this.gzip = new plugins.fflate.Gzip({ level }, (chunk, final) => {
this.push(Buffer.from(chunk));
if (final) {
this.push(null);
}
});
} }
_transform( _transform(
chunk: Buffer, chunk: Buffer,
encoding: BufferEncoding, encoding: BufferEncoding,
callback: plugins.stream.TransformCallback, callback: plugins.stream.TransformCallback
) { ): void {
plugins.fflate.gunzip(chunk, (err, decompressed) => { try {
if (err) { this.gzip.push(chunk, false);
callback(err); callback();
} else { } catch (err) {
this.push(decompressed); callback(err as Error);
callback(); }
} }
});
_flush(callback: plugins.stream.TransformCallback): void {
try {
this.gzip.push(new Uint8Array(0), true);
callback();
} catch (err) {
callback(err as Error);
}
} }
} }
// DecompressGunzipTransform class that extends the Node.js Transform stream to /**
// create a stream that decompresses GZip-compressed data using fflate's gunzip function * Transform stream for GZIP decompression using fflate
export class DecompressGunzipTransform extends plugins.stream.Transform { */
export class GzipDecompressionTransform extends plugins.stream.Transform {
private gunzip: plugins.fflate.Gunzip;
constructor() { constructor() {
super(); super();
// Create a streaming Gunzip decompressor
this.gunzip = new plugins.fflate.Gunzip((chunk, final) => {
this.push(Buffer.from(chunk));
if (final) {
this.push(null);
}
});
} }
_transform( _transform(
chunk: Buffer, chunk: Buffer,
encoding: BufferEncoding, encoding: BufferEncoding,
callback: plugins.stream.TransformCallback, callback: plugins.stream.TransformCallback
) { ): void {
// Use fflate's gunzip function to decompress the chunk try {
plugins.fflate.gunzip(chunk, (err, decompressed) => { this.gunzip.push(chunk, false);
if (err) { callback();
// If an error occurs during decompression, pass the error to the callback } catch (err) {
callback(err); callback(err as Error);
} else { }
// If decompression is successful, push the decompressed data into the stream }
this.push(decompressed);
callback(); _flush(callback: plugins.stream.TransformCallback): void {
} try {
}); this.gunzip.push(new Uint8Array(0), true);
callback();
} catch (err) {
callback(err as Error);
}
} }
} }
/**
* GZIP compression and decompression utilities
*/
export class GzipTools { export class GzipTools {
constructor() {} /**
* Get a streaming compression transform
public getCompressionStream() { */
return new CompressGunzipTransform(); public getCompressionStream(level?: TCompressionLevel): plugins.stream.Transform {
return new GzipCompressionTransform(level);
} }
public getDecompressionStream() { /**
return new DecompressGunzipTransform(); * Get a streaming decompression transform
*/
public getDecompressionStream(): plugins.stream.Transform {
return new GzipDecompressionTransform();
}
/**
* Compress data synchronously
*/
public compressSync(data: Buffer, level?: TCompressionLevel): Buffer {
const options = level !== undefined ? { level } : undefined;
return Buffer.from(plugins.fflate.gzipSync(data, options));
}
/**
* Decompress data synchronously
*/
public decompressSync(data: Buffer): Buffer {
return Buffer.from(plugins.fflate.gunzipSync(data));
}
/**
* Compress data asynchronously
* Note: Uses sync version for Deno compatibility (fflate async uses Web Workers
* which have issues in Deno)
*/
public async compress(data: Buffer, level?: TCompressionLevel): Promise<Buffer> {
// Use sync version wrapped in Promise for cross-runtime compatibility
return this.compressSync(data, level);
}
/**
* Decompress data asynchronously
* Note: Uses sync version for Deno compatibility (fflate async uses Web Workers
* which have issues in Deno)
*/
public async decompress(data: Buffer): Promise<Buffer> {
// Use sync version wrapped in Promise for cross-runtime compatibility
return this.decompressSync(data);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,14 @@
import type { SmartArchive } from './classes.smartarchive.js';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import type { IArchiveEntry, TCompressionLevel } from './interfaces.js';
import { GzipTools } from './classes.gziptools.js';
/**
* TAR archive creation and extraction utilities
*/
export class TarTools { export class TarTools {
// INSTANCE /**
constructor() {} * Add a file to a TAR pack stream
*/
// packing
public async addFileToPack( public async addFileToPack(
pack: plugins.tarStream.Pack, pack: plugins.tarStream.Pack,
optionsArg: { optionsArg: {
@@ -13,12 +16,12 @@ export class TarTools {
content?: content?:
| string | string
| Buffer | Buffer
| plugins.smartstream.stream.Readable | plugins.stream.Readable
| plugins.smartfile.SmartFile | plugins.smartfile.SmartFile
| plugins.smartfile.StreamFile; | plugins.smartfile.StreamFile;
byteLength?: number; byteLength?: number;
filePath?: string; filePath?: string;
}, }
): Promise<void> { ): Promise<void> {
return new Promise<void>(async (resolve, reject) => { return new Promise<void>(async (resolve, reject) => {
let fileName: string | null = null; let fileName: string | null = null;
@@ -26,18 +29,20 @@ export class TarTools {
if (optionsArg.fileName) { if (optionsArg.fileName) {
fileName = optionsArg.fileName; fileName = optionsArg.fileName;
} else if (optionsArg.content instanceof plugins.smartfile.SmartFile) { } else if (optionsArg.content instanceof plugins.smartfile.SmartFile) {
fileName = (optionsArg.content as plugins.smartfile.SmartFile).relative; fileName = optionsArg.content.relative;
} else if (optionsArg.content instanceof plugins.smartfile.StreamFile) { } else if (optionsArg.content instanceof plugins.smartfile.StreamFile) {
fileName = (optionsArg.content as plugins.smartfile.StreamFile) fileName = optionsArg.content.relativeFilePath;
.relativeFilePath;
} else if (optionsArg.filePath) { } else if (optionsArg.filePath) {
fileName = optionsArg.filePath; fileName = optionsArg.filePath;
} }
/** if (!fileName) {
* contentByteLength is used to set the size of the entry in the tar file reject(new Error('No filename specified for TAR entry'));
*/ return;
let contentByteLength: number; }
// Determine content byte length
let contentByteLength: number | undefined;
if (optionsArg.byteLength) { if (optionsArg.byteLength) {
contentByteLength = optionsArg.byteLength; contentByteLength = optionsArg.byteLength;
} else if (typeof optionsArg.content === 'string') { } else if (typeof optionsArg.content === 'string') {
@@ -45,95 +50,159 @@ export class TarTools {
} else if (Buffer.isBuffer(optionsArg.content)) { } else if (Buffer.isBuffer(optionsArg.content)) {
contentByteLength = optionsArg.content.length; contentByteLength = optionsArg.content.length;
} else if (optionsArg.content instanceof plugins.smartfile.SmartFile) { } else if (optionsArg.content instanceof plugins.smartfile.SmartFile) {
contentByteLength = await optionsArg.content.getSize(); // assuming SmartFile has getSize method contentByteLength = await optionsArg.content.getSize();
} else if (optionsArg.content instanceof plugins.smartfile.StreamFile) { } else if (optionsArg.content instanceof plugins.smartfile.StreamFile) {
contentByteLength = await optionsArg.content.getSize(); // assuming StreamFile has getSize method contentByteLength = await optionsArg.content.getSize();
} else if (
optionsArg.content instanceof plugins.smartstream.stream.Readable
) {
console.warn(
'@push.rocks/smartarchive: When streaming, it is recommended to provide byteLength, if known.',
);
} else if (optionsArg.filePath) { } else if (optionsArg.filePath) {
const fileStat = await plugins.smartfile.fs.stat(optionsArg.filePath); const fileStat = await plugins.fsPromises.stat(optionsArg.filePath);
contentByteLength = fileStat.size; contentByteLength = fileStat.size;
} }
/** // Convert all content types to Readable stream
* here we try to harmonize all kind of entries towards a readable stream let content: plugins.stream.Readable;
*/
let content: plugins.smartstream.stream.Readable;
if (Buffer.isBuffer(optionsArg.content)) { if (Buffer.isBuffer(optionsArg.content)) {
content = plugins.smartstream.stream.Readable.from(optionsArg.content); content = plugins.stream.Readable.from(optionsArg.content);
} else if (typeof optionsArg.content === 'string') { } else if (typeof optionsArg.content === 'string') {
content = plugins.smartstream.stream.Readable.from( content = plugins.stream.Readable.from(Buffer.from(optionsArg.content));
Buffer.from(optionsArg.content),
);
} else if (optionsArg.content instanceof plugins.smartfile.SmartFile) { } else if (optionsArg.content instanceof plugins.smartfile.SmartFile) {
content = plugins.smartstream.stream.Readable.from( content = plugins.stream.Readable.from(optionsArg.content.contents);
optionsArg.content.contents,
);
} else if (optionsArg.content instanceof plugins.smartfile.StreamFile) { } else if (optionsArg.content instanceof plugins.smartfile.StreamFile) {
content = await optionsArg.content.createReadStream(); content = await optionsArg.content.createReadStream();
} else if ( } else if (optionsArg.content instanceof plugins.stream.Readable) {
optionsArg.content instanceof plugins.smartstream.stream.Readable
) {
content = optionsArg.content; content = optionsArg.content;
} else if (optionsArg.filePath) {
content = plugins.fs.createReadStream(optionsArg.filePath);
} else {
reject(new Error('No content or filePath specified for TAR entry'));
return;
} }
const entry = pack.entry( const entry = pack.entry(
{ {
name: fileName, name: fileName,
...(contentByteLength ...(contentByteLength !== undefined ? { size: contentByteLength } : {}),
? {
size: contentByteLength,
}
: null),
}, },
(err: Error) => { (err: Error | null) => {
if (err) { if (err) {
reject(err); reject(err);
} else { } else {
resolve(); resolve();
} }
}, }
); );
content.pipe(entry); content.pipe(entry);
resolve(); // Note: resolve() is called in the callback above when pipe completes
}); });
} }
/** /**
* packs a directory from disk into a tar stream * Pack a directory into a TAR stream
* @param directoryPath
*/ */
public async packDirectory(directoryPath: string) { public async packDirectory(directoryPath: string): Promise<plugins.tarStream.Pack> {
const fileTree = await plugins.smartfile.fs.listFileTree( const fileTree = await plugins.listFileTree(directoryPath, '**/*');
directoryPath,
'**/*',
);
const pack = await this.getPackStream(); const pack = await this.getPackStream();
for (const filePath of fileTree) { for (const filePath of fileTree) {
const absolutePath = plugins.path.join(directoryPath, filePath); const absolutePath = plugins.path.join(directoryPath, filePath);
const fileStat = await plugins.smartfile.fs.stat(absolutePath); const fileStat = await plugins.fsPromises.stat(absolutePath);
await this.addFileToPack(pack, { await this.addFileToPack(pack, {
byteLength: fileStat.size, byteLength: fileStat.size,
filePath: absolutePath, filePath: absolutePath,
fileName: filePath, fileName: filePath,
content: plugins.smartfile.fsStream.createReadStream(absolutePath), content: plugins.fs.createReadStream(absolutePath),
}); });
} }
return pack; return pack;
} }
public async getPackStream() { /**
const pack = plugins.tarStream.pack(); * Get a new TAR pack stream
return pack; */
public async getPackStream(): Promise<plugins.tarStream.Pack> {
return plugins.tarStream.pack();
} }
// extracting /**
getDecompressionStream() { * Get a TAR extraction stream
*/
public getDecompressionStream(): plugins.tarStream.Extract {
return plugins.tarStream.extract(); return plugins.tarStream.extract();
} }
/**
* Pack files into a TAR buffer
*/
public async packFiles(files: IArchiveEntry[]): Promise<Buffer> {
const pack = await this.getPackStream();
for (const file of files) {
await this.addFileToPack(pack, {
fileName: file.archivePath,
content: file.content as string | Buffer | plugins.stream.Readable | plugins.smartfile.SmartFile | plugins.smartfile.StreamFile,
byteLength: file.size,
});
}
pack.finalize();
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
pack.on('data', (chunk: Buffer) => chunks.push(chunk));
pack.on('end', () => resolve(Buffer.concat(chunks)));
pack.on('error', reject);
});
}
/**
* Pack a directory into a TAR.GZ buffer
*/
public async packDirectoryToTarGz(
directoryPath: string,
compressionLevel?: TCompressionLevel
): Promise<Buffer> {
const pack = await this.packDirectory(directoryPath);
pack.finalize();
const gzipTools = new GzipTools();
const gzipStream = gzipTools.getCompressionStream(compressionLevel);
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
pack
.pipe(gzipStream)
.on('data', (chunk: Buffer) => chunks.push(chunk))
.on('end', () => resolve(Buffer.concat(chunks)))
.on('error', reject);
});
}
/**
* Pack a directory into a TAR.GZ stream
*/
public async packDirectoryToTarGzStream(
directoryPath: string,
compressionLevel?: TCompressionLevel
): Promise<plugins.stream.Readable> {
const pack = await this.packDirectory(directoryPath);
pack.finalize();
const gzipTools = new GzipTools();
const gzipStream = gzipTools.getCompressionStream(compressionLevel);
return pack.pipe(gzipStream);
}
/**
* Pack files into a TAR.GZ buffer
*/
public async packFilesToTarGz(
files: IArchiveEntry[],
compressionLevel?: TCompressionLevel
): Promise<Buffer> {
const tarBuffer = await this.packFiles(files);
const gzipTools = new GzipTools();
return gzipTools.compress(tarBuffer, compressionLevel);
}
} }

View File

@@ -1,83 +1,196 @@
import type { SmartArchive } from './classes.smartarchive.js';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import type { IArchiveEntry, TCompressionLevel } from './interfaces.js';
class DecompressZipTransform extends plugins.smartstream /**
.SmartDuplex<ArrayBufferLike> { * Transform stream for ZIP decompression using fflate
private streamtools: plugins.smartstream.IStreamTools; * Emits StreamFile objects for each file in the archive
*/
export class ZipDecompressionTransform extends plugins.smartstream.SmartDuplex<Buffer, plugins.smartfile.StreamFile> {
private streamtools!: plugins.smartstream.IStreamTools;
private unzipper = new plugins.fflate.Unzip(async (fileArg) => { private unzipper = new plugins.fflate.Unzip(async (fileArg) => {
let resultBuffer: Buffer; let resultBuffer: Buffer;
fileArg.ondata = async (flateError, dat, final) => { fileArg.ondata = async (_flateError, dat, final) => {
resultBuffer resultBuffer
? (resultBuffer = Buffer.concat([resultBuffer, Buffer.from(dat)])) ? (resultBuffer = Buffer.concat([resultBuffer, Buffer.from(dat)]))
: (resultBuffer = Buffer.from(dat)); : (resultBuffer = Buffer.from(dat));
if (final) { if (final) {
const streamFile = const streamFile = plugins.smartfile.StreamFile.fromBuffer(resultBuffer);
plugins.smartfile.StreamFile.fromBuffer(resultBuffer);
streamFile.relativeFilePath = fileArg.name; streamFile.relativeFilePath = fileArg.name;
this.streamtools.push(streamFile); this.streamtools.push(streamFile);
} }
}; };
fileArg.start(); fileArg.start();
}); });
constructor() { constructor() {
super({ super({
objectMode: true, objectMode: true,
writeFunction: async (chunkArg, streamtoolsArg) => { writeFunction: async (chunkArg, streamtoolsArg) => {
this.streamtools ? null : (this.streamtools = streamtoolsArg); this.streamtools ? null : (this.streamtools = streamtoolsArg);
this.unzipper.push( this.unzipper.push(
Buffer.isBuffer(chunkArg) ? chunkArg : Buffer.from(chunkArg), Buffer.isBuffer(chunkArg) ? chunkArg : Buffer.from(chunkArg as unknown as ArrayBuffer),
false, false
); );
return null;
}, },
finalFunction: async () => { finalFunction: async () => {
this.unzipper.push(Buffer.from(''), true); this.unzipper.push(Buffer.from(''), true);
await plugins.smartdelay.delayFor(0); await plugins.smartdelay.delayFor(0);
await this.streamtools.push(null); await this.streamtools.push(null);
return null;
}, },
}); });
this.unzipper.register(plugins.fflate.UnzipInflate); this.unzipper.register(plugins.fflate.UnzipInflate);
} }
} }
// This class wraps fflate's zip in a Node.js Transform stream for compression /**
export class CompressZipTransform extends plugins.stream.Transform { * Streaming ZIP compression using fflate
files: { [fileName: string]: Uint8Array }; * Allows adding multiple entries before finalizing
*/
export class ZipCompressionStream extends plugins.stream.Duplex {
private files: Map<string, { data: Uint8Array; options?: plugins.fflate.ZipOptions }> = new Map();
private finalized = false;
constructor() { constructor() {
super(); super();
this.files = {};
} }
_transform( /**
chunk: Buffer, * Add a file entry to the ZIP archive
encoding: BufferEncoding, */
callback: plugins.stream.TransformCallback, public async addEntry(
) { fileName: string,
// Simple example: storing chunks in memory before finalizing ZIP in _flush content: Buffer | plugins.stream.Readable,
this.files['file.txt'] = new Uint8Array(chunk); options?: { compressionLevel?: TCompressionLevel }
callback(); ): Promise<void> {
} if (this.finalized) {
throw new Error('Cannot add entries to a finalized ZIP archive');
}
_flush(callback: plugins.stream.TransformCallback) { let data: Buffer;
plugins.fflate.zip(this.files, (err, zipped) => { if (Buffer.isBuffer(content)) {
if (err) { data = content;
callback(err); } else {
} else { // Collect stream to buffer
this.push(Buffer.from(zipped)); const chunks: Buffer[] = [];
callback(); for await (const chunk of content) {
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
} }
data = Buffer.concat(chunks);
}
this.files.set(fileName, {
data: new Uint8Array(data),
options: options?.compressionLevel !== undefined ? { level: options.compressionLevel } : undefined,
}); });
} }
/**
* Finalize the ZIP archive and emit the compressed data
*/
public async finalize(): Promise<void> {
if (this.finalized) {
return;
}
this.finalized = true;
const filesObj: plugins.fflate.Zippable = {};
for (const [name, { data, options }] of this.files) {
filesObj[name] = options ? [data, options] : data;
}
// Use sync version for Deno compatibility (fflate async uses Web Workers)
try {
const result = plugins.fflate.zipSync(filesObj);
this.push(Buffer.from(result));
this.push(null);
} catch (err) {
throw err;
}
}
_read(): void {
// No-op: data is pushed when finalize() is called
}
_write(
_chunk: Buffer,
_encoding: BufferEncoding,
callback: (error?: Error | null) => void
): void {
// Not used for ZIP creation - use addEntry() instead
callback(new Error('Use addEntry() to add files to the ZIP archive'));
}
} }
/**
* ZIP compression and decompression utilities
*/
export class ZipTools { export class ZipTools {
constructor() {} /**
* Get a streaming compression object for creating ZIP archives
public getCompressionStream() { */
return new CompressZipTransform(); public getCompressionStream(): ZipCompressionStream {
return new ZipCompressionStream();
} }
public getDecompressionStream() { /**
return new DecompressZipTransform(); * Get a streaming decompression transform for extracting ZIP archives
*/
public getDecompressionStream(): ZipDecompressionTransform {
return new ZipDecompressionTransform();
}
/**
* Create a ZIP archive from an array of entries
*/
public async createZip(entries: IArchiveEntry[], compressionLevel?: TCompressionLevel): Promise<Buffer> {
const filesObj: plugins.fflate.Zippable = {};
for (const entry of entries) {
let data: Uint8Array;
if (typeof entry.content === 'string') {
data = new TextEncoder().encode(entry.content);
} else if (Buffer.isBuffer(entry.content)) {
data = new Uint8Array(entry.content);
} else if (entry.content instanceof plugins.smartfile.SmartFile) {
data = new Uint8Array(entry.content.contents);
} else if (entry.content instanceof plugins.smartfile.StreamFile) {
const buffer = await entry.content.getContentAsBuffer();
data = new Uint8Array(buffer);
} else {
// Readable stream
const chunks: Buffer[] = [];
for await (const chunk of entry.content as plugins.stream.Readable) {
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
}
data = new Uint8Array(Buffer.concat(chunks));
}
if (compressionLevel !== undefined) {
filesObj[entry.archivePath] = [data, { level: compressionLevel }];
} else {
filesObj[entry.archivePath] = data;
}
}
// Use sync version for Deno compatibility (fflate async uses Web Workers)
const result = plugins.fflate.zipSync(filesObj);
return Buffer.from(result);
}
/**
* Extract a ZIP buffer to an array of entries
*/
public async extractZip(data: Buffer): Promise<Array<{ path: string; content: Buffer }>> {
// Use sync version for Deno compatibility (fflate async uses Web Workers)
const result = plugins.fflate.unzipSync(data);
const entries: Array<{ path: string; content: Buffer }> = [];
for (const [path, content] of Object.entries(result)) {
entries.push({ path, content: Buffer.from(content) });
}
return entries;
} }
} }

70
ts/errors.ts Normal file
View File

@@ -0,0 +1,70 @@
/**
* Base error class for smartarchive
*/
export class SmartArchiveError extends Error {
public readonly code: string;
constructor(message: string, code: string) {
super(message);
this.name = 'SmartArchiveError';
this.code = code;
// Maintains proper stack trace for where error was thrown (V8)
if (Error.captureStackTrace) {
Error.captureStackTrace(this, this.constructor);
}
}
}
/**
* BZIP2-specific decompression errors
*/
export class Bzip2Error extends SmartArchiveError {
constructor(message: string, code: string = 'BZIP2_ERROR') {
super(message, code);
this.name = 'Bzip2Error';
}
}
/**
* Archive format detection errors
*/
export class ArchiveFormatError extends SmartArchiveError {
constructor(message: string) {
super(message, 'ARCHIVE_FORMAT_ERROR');
this.name = 'ArchiveFormatError';
}
}
/**
* Stream processing errors
*/
export class StreamError extends SmartArchiveError {
constructor(message: string) {
super(message, 'STREAM_ERROR');
this.name = 'StreamError';
}
}
/**
* BZIP2 error codes for programmatic error handling
*/
export const BZIP2_ERROR_CODES = {
NO_MAGIC_NUMBER: 'BZIP2_NO_MAGIC',
INVALID_ARCHIVE: 'BZIP2_INVALID_ARCHIVE',
CRC_MISMATCH: 'BZIP2_CRC_MISMATCH',
INVALID_BLOCK_DATA: 'BZIP2_INVALID_BLOCK',
BUFFER_OVERFLOW: 'BZIP2_BUFFER_OVERFLOW',
INVALID_HUFFMAN: 'BZIP2_INVALID_HUFFMAN',
INVALID_SELECTOR: 'BZIP2_INVALID_SELECTOR',
INVALID_POSITION: 'BZIP2_INVALID_POSITION',
PREMATURE_END: 'BZIP2_PREMATURE_END',
} as const;
export type TBzip2ErrorCode = typeof BZIP2_ERROR_CODES[keyof typeof BZIP2_ERROR_CODES];
/**
* Throw a BZIP2 error with a specific code
*/
export function throwBzip2Error(message: string, code: TBzip2ErrorCode): never {
throw new Bzip2Error(message, code);
}

View File

@@ -1,4 +1,15 @@
// Core types and errors
export * from './interfaces.js';
export * from './errors.js';
// Main archive class
export * from './classes.smartarchive.js'; export * from './classes.smartarchive.js';
// Format-specific tools
export * from './classes.tartools.js'; export * from './classes.tartools.js';
export * from './classes.ziptools.js'; export * from './classes.ziptools.js';
export * from './classes.gziptools.js'; export * from './classes.gziptools.js';
export * from './classes.bzip2tools.js';
// Archive analysis
export * from './classes.archiveanalyzer.js';

136
ts/interfaces.ts Normal file
View File

@@ -0,0 +1,136 @@
import type * as stream from 'node:stream';
import type { SmartFile, StreamFile } from '@push.rocks/smartfile';
/**
* Supported archive formats
*/
export type TArchiveFormat = 'tar' | 'tar.gz' | 'tgz' | 'zip' | 'gz' | 'bz2';
/**
* Compression level (0 = no compression, 9 = maximum compression)
*/
export type TCompressionLevel = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9;
/**
* Supported MIME types for archive detection
*/
export type TSupportedMime =
| 'application/gzip'
| 'application/zip'
| 'application/x-bzip2'
| 'application/x-tar'
| undefined;
/**
* Entry to add to an archive during creation
*/
export interface IArchiveEntry {
/** Path within the archive */
archivePath: string;
/** Content: string, Buffer, Readable stream, SmartFile, or StreamFile */
content: string | Buffer | stream.Readable | SmartFile | StreamFile;
/** Optional size hint for streams (improves performance) */
size?: number;
/** Optional file mode/permissions */
mode?: number;
/** Optional modification time */
mtime?: Date;
}
/**
* Options for creating archives
*/
export interface IArchiveCreationOptions {
/** Target archive format */
format: TArchiveFormat;
/** Compression level (0-9, default depends on format) */
compressionLevel?: TCompressionLevel;
/** Base path to strip from file paths in archive */
basePath?: string;
}
/**
* Options for extracting archives
*/
export interface IArchiveExtractionOptions {
/** Target directory for extraction */
targetDir: string;
/** Optional filename for single-file archives (gz, bz2) */
fileName?: string;
/** Number of leading path components to strip */
stripComponents?: number;
/** Filter function to select which entries to extract */
filter?: (entry: IArchiveEntryInfo) => boolean;
/** Whether to overwrite existing files */
overwrite?: boolean;
}
/**
* Information about an archive entry
*/
export interface IArchiveEntryInfo {
/** Path of the entry within the archive */
path: string;
/** Size in bytes */
size: number;
/** Whether this entry is a directory */
isDirectory: boolean;
/** Whether this entry is a file */
isFile: boolean;
/** Modification time */
mtime?: Date;
/** File mode/permissions */
mode?: number;
}
/**
* Result of archive analysis
*/
export interface IArchiveInfo {
/** Detected archive format */
format: TArchiveFormat | null;
/** Whether the archive is compressed */
isCompressed: boolean;
/** Whether this is a recognized archive format */
isArchive: boolean;
/** List of entries (if available without full extraction) */
entries?: IArchiveEntryInfo[];
}
/**
* Options for adding a file to a TAR pack stream
*/
export interface IAddFileOptions {
/** Filename within the archive */
fileName?: string;
/** File content */
content?: string | Buffer | stream.Readable | SmartFile | StreamFile;
/** Size in bytes (required for streams) */
byteLength?: number;
/** Path to file on disk (alternative to content) */
filePath?: string;
}
/**
* Bit reader interface for BZIP2 decompression
*/
export interface IBitReader {
(n: number | null): number | void;
bytesRead: number;
}
/**
* Huffman group for BZIP2 decompression
*/
export interface IHuffmanGroup {
permute: Int32Array;
limit: Int32Array;
base: Int32Array;
minLen: number;
maxLen: number;
}
/**
* Entry filter predicate for fluent API
*/
export type TEntryFilter = (entry: IArchiveEntryInfo) => boolean;

View File

@@ -1,8 +1,34 @@
// node native scope // node native scope
import * as path from 'path'; import * as path from 'node:path';
import * as stream from 'stream'; import * as stream from 'node:stream';
import * as fs from 'node:fs';
import * as fsPromises from 'node:fs/promises';
export { path, stream }; export { path, stream, fs, fsPromises };
/**
* List files in a directory recursively, returning relative paths
*/
export async function listFileTree(dirPath: string, _pattern: string = '**/*'): Promise<string[]> {
const results: string[] = [];
async function walkDir(currentPath: string, relativePath: string = '') {
const entries = await fsPromises.readdir(currentPath, { withFileTypes: true });
for (const entry of entries) {
const entryRelPath = relativePath ? path.join(relativePath, entry.name) : entry.name;
const entryFullPath = path.join(currentPath, entry.name);
if (entry.isDirectory()) {
await walkDir(entryFullPath, entryRelPath);
} else if (entry.isFile()) {
results.push(entryRelPath);
}
}
}
await walkDir(dirPath);
return results;
}
// @pushrocks scope // @pushrocks scope
import * as smartfile from '@push.rocks/smartfile'; import * as smartfile from '@push.rocks/smartfile';