feat(docs): Expand README with detailed usage/examples, update test runner and test script, and pin/bump dependencies
This commit is contained in:
		@@ -1,5 +1,13 @@
 | 
				
			|||||||
# Changelog
 | 
					# Changelog
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## 2025-08-28 - 3.2.0 - feat(docs)
 | 
				
			||||||
 | 
					Expand README with detailed usage/examples, update test runner and test script, and pin/bump dependencies
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- Completely overhauled README: added highlights, Quick Start, advanced configuration, core operations, examples, storage tier explanations, performance tips, and API reference.
 | 
				
			||||||
 | 
					- Updated tests to use @git.zone/tstest/tapbundle (test import changed) and adjusted package.json test script to run with --verbose --logfile --timeout 60.
 | 
				
			||||||
 | 
					- Bumped/pinned dependencies: @push.rocks/smartcache -> ^1.0.18 and several packages now have explicit version ranges (e.g. @push.rocks/lik -> ^6.2.2).
 | 
				
			||||||
 | 
					- Removed devDependency on @push.rocks/tapbundle.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## 2025-08-28 - 3.1.2 - fix(core)
 | 
					## 2025-08-28 - 3.1.2 - fix(core)
 | 
				
			||||||
Update CI workflows and dependencies; apply small bugfixes and formatting improvements
 | 
					Update CI workflows and dependencies; apply small bugfixes and formatting improvements
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -9,7 +9,7 @@
 | 
				
			|||||||
  "author": "Lossless GmbH",
 | 
					  "author": "Lossless GmbH",
 | 
				
			||||||
  "license": "MIT",
 | 
					  "license": "MIT",
 | 
				
			||||||
  "scripts": {
 | 
					  "scripts": {
 | 
				
			||||||
    "test": "(tstest test/ --web)",
 | 
					    "test": "(tstest test/ --verbose --logfile --timeout 60)",
 | 
				
			||||||
    "build": "(tsbuild --web --allowimplicitany)",
 | 
					    "build": "(tsbuild --web --allowimplicitany)",
 | 
				
			||||||
    "buildDocs": "tsdoc",
 | 
					    "buildDocs": "tsdoc",
 | 
				
			||||||
    "localPublish": "gitzone commit && pnpm run build && pnpm publish && pnpm publish --access public --registry=\"https://registry.npmjs.org\""
 | 
					    "localPublish": "gitzone commit && pnpm run build && pnpm publish && pnpm publish --access public --registry=\"https://registry.npmjs.org\""
 | 
				
			||||||
@@ -18,13 +18,12 @@
 | 
				
			|||||||
    "@git.zone/tsbuild": "^2.6.7",
 | 
					    "@git.zone/tsbuild": "^2.6.7",
 | 
				
			||||||
    "@git.zone/tsrun": "^1.2.44",
 | 
					    "@git.zone/tsrun": "^1.2.44",
 | 
				
			||||||
    "@git.zone/tstest": "^2.3.5",
 | 
					    "@git.zone/tstest": "^2.3.5",
 | 
				
			||||||
    "@push.rocks/tapbundle": "^6.0.3",
 | 
					 | 
				
			||||||
    "@types/node": "^24.3.0"
 | 
					    "@types/node": "^24.3.0"
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  "dependencies": {
 | 
					  "dependencies": {
 | 
				
			||||||
    "@push.rocks/lik": "^6.2.2",
 | 
					    "@push.rocks/lik": "^6.2.2",
 | 
				
			||||||
    "@push.rocks/smartbucket": "^3.3.10",
 | 
					    "@push.rocks/smartbucket": "^3.3.10",
 | 
				
			||||||
    "@push.rocks/smartcache": "^1.0.17",
 | 
					    "@push.rocks/smartcache": "^1.0.18",
 | 
				
			||||||
    "@push.rocks/smartenv": "^5.0.13",
 | 
					    "@push.rocks/smartenv": "^5.0.13",
 | 
				
			||||||
    "@push.rocks/smartexit": "^1.0.23",
 | 
					    "@push.rocks/smartexit": "^1.0.23",
 | 
				
			||||||
    "@push.rocks/smartfile": "^11.2.7",
 | 
					    "@push.rocks/smartfile": "^11.2.7",
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										1142
									
								
								pnpm-lock.yaml
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										1142
									
								
								pnpm-lock.yaml
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										436
									
								
								readme.md
									
									
									
									
									
								
							
							
						
						
									
										436
									
								
								readme.md
									
									
									
									
									
								
							@@ -1,161 +1,373 @@
 | 
				
			|||||||
# @push.rocks/levelcache
 | 
					# @push.rocks/levelcache 🚀
 | 
				
			||||||
 | 
					
 | 
				
			||||||
A cache that utilizes memory, disk, and S3 for data storage and backup.
 | 
					**Supercharged Multi-Level Caching for Modern Applications** 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					A high-performance, tiered caching solution that intelligently leverages memory, disk, and S3 storage to deliver blazing-fast data access with reliable persistence and backup capabilities.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Highlights
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					✨ **Intelligent Tiered Storage** - Automatically routes data between memory, disk, and S3 based on size and access patterns  
 | 
				
			||||||
 | 
					⚡ **Lightning Fast** - Memory-first architecture ensures microsecond access times for hot data  
 | 
				
			||||||
 | 
					💾 **Persistent & Durable** - Optional disk and S3 layers provide data durability across restarts  
 | 
				
			||||||
 | 
					🎯 **TTL Support** - Built-in time-to-live for automatic cache expiration  
 | 
				
			||||||
 | 
					🔧 **TypeScript First** - Full type safety and excellent IDE support  
 | 
				
			||||||
 | 
					☁️ **S3 Ready** - Seamless integration with Amazon S3 for massive scale caching  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Install
 | 
					## Install
 | 
				
			||||||
 | 
					
 | 
				
			||||||
To install `@push.rocks/levelcache`, you can use npm or yarn:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```bash
 | 
					```bash
 | 
				
			||||||
 | 
					# Using npm
 | 
				
			||||||
npm install @push.rocks/levelcache --save
 | 
					npm install @push.rocks/levelcache --save
 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
or
 | 
					# Using yarn  
 | 
				
			||||||
 | 
					 | 
				
			||||||
```bash
 | 
					 | 
				
			||||||
yarn add @push.rocks/levelcache
 | 
					yarn add @push.rocks/levelcache
 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
This installs `@push.rocks/levelcache` and adds it to your project's dependencies.
 | 
					# Using pnpm (recommended)
 | 
				
			||||||
 | 
					pnpm add @push.rocks/levelcache
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Usage
 | 
					## Usage
 | 
				
			||||||
 | 
					
 | 
				
			||||||
`@push.rocks/levelcache` provides a comprehensive solution for multi-level caching that takes advantage of memory, disk, and Amazon S3 storage, making it a versatile tool for data caching and backup. The package is built with TypeScript, enabling strict type checks and better development experience. Below, we'll explore how to effectively employ `@push.rocks/levelcache` in your projects, discussing its features and demonstrating its usage with code examples.
 | 
					### Quick Start
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 1. Overview
 | 
					Get up and running with just a few lines:
 | 
				
			||||||
 | 
					 | 
				
			||||||
The `LevelCache` class handles all cache operations. It decides where to store data based on pre-configured thresholds corresponding to the data size and the total storage capacity allocated for each storage type (memory/disk/S3). This mechanism optimizes both speed and persistence, allowing for efficient data storage and retrieval.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### 2. Getting Started: Initialization
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
To use `@push.rocks/levelcache`, you'll need to import the main classes: `LevelCache` and `CacheEntry`. `LevelCache` is the primary class, while `CacheEntry` represents individual pieces of cached data.
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
```typescript
 | 
					```typescript
 | 
				
			||||||
import { LevelCache, CacheEntry } from '@push.rocks/levelcache';
 | 
					import { LevelCache, CacheEntry } from '@push.rocks/levelcache';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Initialize cache with minimal config
 | 
				
			||||||
 | 
					const cache = new LevelCache({
 | 
				
			||||||
 | 
					  cacheId: 'myAppCache'
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Wait for cache to be ready
 | 
				
			||||||
 | 
					await cache.ready;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Store data
 | 
				
			||||||
 | 
					const entry = new CacheEntry({
 | 
				
			||||||
 | 
					  contents: Buffer.from('Hello Cache World! 🎉'),
 | 
				
			||||||
 | 
					  ttl: 60000 // 1 minute TTL
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					await cache.storeCacheEntryByKey('greeting', entry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Retrieve data
 | 
				
			||||||
 | 
					const retrieved = await cache.retrieveCacheEntryByKey('greeting');
 | 
				
			||||||
 | 
					console.log(retrieved.contents.toString()); // "Hello Cache World! 🎉"
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#### Initialization with Optional Configurations
 | 
					### Advanced Configuration
 | 
				
			||||||
 | 
					
 | 
				
			||||||
To create a cache, instantiate the `LevelCache` class with desired configurations. You can specify the limits for memory and disk storage, setup S3 configurations if needed, and more.
 | 
					`LevelCache` offers granular control over storage tiers and behavior:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```typescript
 | 
					```typescript
 | 
				
			||||||
const myCache = new LevelCache({
 | 
					const cache = new LevelCache({
 | 
				
			||||||
  cacheId: 'myUniqueCacheId', // Unique ID for cache delineation
 | 
					  cacheId: 'productionCache',
 | 
				
			||||||
  maxMemoryStorageInMB: 10, // Maximum memory use in MB (default 0.5 MB)
 | 
					  
 | 
				
			||||||
  maxDiskStorageInMB: 100, // Maximum disk space in MB (default 10 MB)
 | 
					  // Storage Limits
 | 
				
			||||||
  diskStoragePath: './myCache', // Path for storing disk cache; default is '.nogit'
 | 
					  maxMemoryStorageInMB: 128,      // 128MB RAM cache (default: 0.5)
 | 
				
			||||||
 | 
					  maxDiskStorageInMB: 1024,        // 1GB disk cache (default: 10)
 | 
				
			||||||
 | 
					  maxS3StorageInMB: 10240,         // 10GB S3 storage (optional)
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  // Disk Configuration  
 | 
				
			||||||
 | 
					  diskStoragePath: './cache-data', // Custom disk location (default: '.nogit')
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  // S3 Configuration (optional)
 | 
				
			||||||
  s3Config: {
 | 
					  s3Config: {
 | 
				
			||||||
    accessKeyId: 'yourAccessKeyId', // AWS S3 access key
 | 
					    accessKeyId: process.env.AWS_ACCESS_KEY_ID,
 | 
				
			||||||
    secretAccessKey: 'yourSecretAccessKey', // Corresponding secret key
 | 
					    secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
 | 
				
			||||||
    region: 'us-west-2', // AWS region, e.g., 'us-west-2'
 | 
					    region: 'us-east-1'
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  s3BucketName: 'myBucketName', // Designated name for S3 bucket
 | 
					  s3BucketName: 'my-cache-bucket',
 | 
				
			||||||
  immutableCache: false, // Whether stored cache entries should remain unaltered
 | 
					  
 | 
				
			||||||
  persistentCache: true, // Should the cache persist upon restarts
 | 
					  // Behavior Options
 | 
				
			||||||
 | 
					  forceLevel: 'memory',            // Force specific tier (optional)
 | 
				
			||||||
 | 
					  immutableCache: false,           // Prevent cache mutations
 | 
				
			||||||
 | 
					  persistentCache: true            // Persist cache on restarts
 | 
				
			||||||
});
 | 
					});
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 3. Storing and Retrieving Data
 | 
					### Core Operations
 | 
				
			||||||
 | 
					 | 
				
			||||||
`LevelCache` methods enable seamless data storage and retrieval, handling complexity under the hood.
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#### Storing Data
 | 
					#### Storing Data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Create a `CacheEntry` specifying the data content and time-to-live (`ttl`). Use `storeCacheEntryByKey` to add this entry to the cache.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```typescript
 | 
					```typescript
 | 
				
			||||||
async function storeData() {
 | 
					// Store text data
 | 
				
			||||||
  // Wait for cache to be ready before operations
 | 
					const textEntry = new CacheEntry({
 | 
				
			||||||
  await myCache.ready;
 | 
					  contents: Buffer.from('Important text data'),
 | 
				
			||||||
 | 
					  ttl: 3600000, // 1 hour
 | 
				
			||||||
  const entryContents = Buffer.from('Caching this data');
 | 
					  typeInfo: 'text/plain' // Optional metadata
 | 
				
			||||||
  const myCacheEntry = new CacheEntry({
 | 
					 | 
				
			||||||
    ttl: 7200000, // Time-to-live in milliseconds (2 hours)
 | 
					 | 
				
			||||||
    contents: entryContents,
 | 
					 | 
				
			||||||
});
 | 
					});
 | 
				
			||||||
 | 
					await cache.storeCacheEntryByKey('document:123', textEntry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  // Storing the cache entry associated with a specific key
 | 
					// Store JSON data
 | 
				
			||||||
  await myCache.storeCacheEntryByKey('someDataKey', myCacheEntry);
 | 
					const jsonData = { user: 'john', role: 'admin' };
 | 
				
			||||||
}
 | 
					const jsonEntry = new CacheEntry({
 | 
				
			||||||
 | 
					  contents: Buffer.from(JSON.stringify(jsonData)),
 | 
				
			||||||
 | 
					  ttl: 7200000, // 2 hours  
 | 
				
			||||||
 | 
					  typeInfo: 'application/json'
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
 | 
					await cache.storeCacheEntryByKey('user:john', jsonEntry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Store binary data (images, files, etc.)
 | 
				
			||||||
 | 
					const imageBuffer = await fs.readFile('./logo.png');
 | 
				
			||||||
 | 
					const imageEntry = new CacheEntry({
 | 
				
			||||||
 | 
					  contents: imageBuffer,
 | 
				
			||||||
 | 
					  ttl: 86400000, // 24 hours
 | 
				
			||||||
 | 
					  typeInfo: 'image/png'
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
 | 
					await cache.storeCacheEntryByKey('assets:logo', imageEntry);
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#### Retrieving Data
 | 
					#### Retrieving Data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Retrieve stored data using `retrieveCacheEntryByKey`. The retrieved `CacheEntry` will give access to the original data.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```typescript
 | 
					```typescript
 | 
				
			||||||
async function retrieveData() {
 | 
					// Basic retrieval
 | 
				
			||||||
  const retrievedEntry = await myCache.retrieveCacheEntryByKey('someDataKey');
 | 
					const entry = await cache.retrieveCacheEntryByKey('user:john');
 | 
				
			||||||
  if (retrievedEntry) {
 | 
					if (entry) {
 | 
				
			||||||
    const data = retrievedEntry.contents.toString();
 | 
					  const userData = JSON.parse(entry.contents.toString());
 | 
				
			||||||
    console.log(data); // Expected output: Caching this data
 | 
					  console.log(userData); // { user: 'john', role: 'admin' }
 | 
				
			||||||
} else {
 | 
					} else {
 | 
				
			||||||
    console.log('Data not found or expired.');
 | 
					  console.log('Cache miss or expired');
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 4. Key Management: Updating and Deleting
 | 
					// Check if key exists
 | 
				
			||||||
 | 
					const exists = await cache.checkKeyPresence('user:john');
 | 
				
			||||||
 | 
					console.log(`Key exists: ${exists}`);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#### Deleting Cache Entries
 | 
					// Handle cache misses gracefully
 | 
				
			||||||
 | 
					async function getUser(userId: string) {
 | 
				
			||||||
 | 
					  const cacheKey = `user:${userId}`;
 | 
				
			||||||
 | 
					  let entry = await cache.retrieveCacheEntryByKey(cacheKey);
 | 
				
			||||||
  
 | 
					  
 | 
				
			||||||
Remove entries with `deleteCacheEntryByKey`, enabling clean cache management.
 | 
					  if (!entry) {
 | 
				
			||||||
 | 
					    // Cache miss - fetch from database
 | 
				
			||||||
 | 
					    const userData = await database.getUser(userId);
 | 
				
			||||||
    
 | 
					    
 | 
				
			||||||
```typescript
 | 
					    // Store in cache for next time
 | 
				
			||||||
async function deleteData() {
 | 
					    entry = new CacheEntry({
 | 
				
			||||||
  // Removes an entry using its unique key identifier
 | 
					      contents: Buffer.from(JSON.stringify(userData)),
 | 
				
			||||||
  await myCache.deleteCacheEntryByKey('someDataKey');
 | 
					      ttl: 600000 // 10 minutes
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### 5. Cache Cleaning
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Often, managing storage limits or removing outdated data becomes essential. The library supports these scenarios.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#### Automated Cleaning
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
While cache entries will naturally expire with `ttl` values, you can force-remove outdated entries.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```typescript
 | 
					 | 
				
			||||||
// Clean outdated or expired entries
 | 
					 | 
				
			||||||
await myCache.cleanOutdated();
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#### Full Cache Reset
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Clear all entries, efficiently resetting your cache storage.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```typescript
 | 
					 | 
				
			||||||
// Flush entire cache content
 | 
					 | 
				
			||||||
await myCache.cleanAll();
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### 6. Configuring and Managing Advanced Use Cases
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
The flexible nature of `@push.rocks/levelcache` grants additional customization suited for more advanced requirements.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#### Custom Route Management
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
For certain demands, you might want to specify distinct data handling policies or routing logic.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
- Adjust S3 handling, size thresholds, or immutability options dynamically.
 | 
					 | 
				
			||||||
- Utilize internal API expansions defined within the library for fine-grained operations.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#### Handling Large Datasets
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Tailor the cache levels (memory, disk, S3) to accommodate higher loads:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```typescript
 | 
					 | 
				
			||||||
const largeDatasetCache = new LevelCache({
 | 
					 | 
				
			||||||
  cacheId: 'largeDatasetCache',
 | 
					 | 
				
			||||||
  // Customize limits and behavior for particular patterns
 | 
					 | 
				
			||||||
  maxMemoryStorageInMB: 1024, // 1 GB memory allocation
 | 
					 | 
				
			||||||
  maxDiskStorageInMB: 2048, // 2 GB disk space allowance
 | 
					 | 
				
			||||||
  maxS3StorageInMB: 10240, // 10 GB S3 backup buffering
 | 
					 | 
				
			||||||
    });
 | 
					    });
 | 
				
			||||||
 | 
					    await cache.storeCacheEntryByKey(cacheKey, entry);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  return JSON.parse(entry.contents.toString());
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
With intelligent routing and management embedded, `LevelCache` ensures optimal trade-offs between speed and stability.
 | 
					#### Managing Cache
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```typescript
 | 
				
			||||||
 | 
					// Delete specific entry
 | 
				
			||||||
 | 
					await cache.deleteCacheEntryByKey('user:john');
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Clean expired entries
 | 
				
			||||||
 | 
					await cache.cleanOutdated();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Clear entire cache
 | 
				
			||||||
 | 
					await cache.cleanAll();
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Storage Tiers Explained
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`LevelCache` automatically determines the optimal storage tier based on data size and available capacity:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### 1. **Memory Cache** 🧠
 | 
				
			||||||
 | 
					- **Speed**: Microsecond access
 | 
				
			||||||
 | 
					- **Best for**: Frequently accessed, small data
 | 
				
			||||||
 | 
					- **Default limit**: 0.5MB (configurable)
 | 
				
			||||||
 | 
					- First tier checked for all operations
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### 2. **Disk Cache** 💾
 | 
				
			||||||
 | 
					- **Speed**: Millisecond access
 | 
				
			||||||
 | 
					- **Best for**: Medium-sized data, persistent storage needed
 | 
				
			||||||
 | 
					- **Default limit**: 10MB (configurable)
 | 
				
			||||||
 | 
					- Data survives process restarts when `persistentCache: true`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### 3. **S3 Cache** ☁️
 | 
				
			||||||
 | 
					- **Speed**: Network latency (typically 50-200ms)
 | 
				
			||||||
 | 
					- **Best for**: Large data, long-term storage, distributed caching
 | 
				
			||||||
 | 
					- **Default limit**: 50MB (configurable)
 | 
				
			||||||
 | 
					- Requires S3 configuration
 | 
				
			||||||
 | 
					- Ideal for shared cache across multiple instances
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Real-World Use Cases
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### API Response Caching
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```typescript
 | 
				
			||||||
 | 
					class ApiCache {
 | 
				
			||||||
 | 
					  private cache: LevelCache;
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  constructor() {
 | 
				
			||||||
 | 
					    this.cache = new LevelCache({
 | 
				
			||||||
 | 
					      cacheId: 'apiResponses',
 | 
				
			||||||
 | 
					      maxMemoryStorageInMB: 256,
 | 
				
			||||||
 | 
					      maxDiskStorageInMB: 2048,
 | 
				
			||||||
 | 
					      persistentCache: true
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  async getCachedResponse(endpoint: string, params: any) {
 | 
				
			||||||
 | 
					    const cacheKey = `api:${endpoint}:${JSON.stringify(params)}`;
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    let cached = await this.cache.retrieveCacheEntryByKey(cacheKey);
 | 
				
			||||||
 | 
					    if (cached) {
 | 
				
			||||||
 | 
					      return JSON.parse(cached.contents.toString());
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    // Fetch fresh data
 | 
				
			||||||
 | 
					    const response = await fetch(endpoint, { params });
 | 
				
			||||||
 | 
					    const data = await response.json();
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    // Cache for 5 minutes
 | 
				
			||||||
 | 
					    const entry = new CacheEntry({
 | 
				
			||||||
 | 
					      contents: Buffer.from(JSON.stringify(data)),
 | 
				
			||||||
 | 
					      ttl: 300000
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					    await this.cache.storeCacheEntryByKey(cacheKey, entry);
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    return data;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Session Storage
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```typescript
 | 
				
			||||||
 | 
					class SessionManager {
 | 
				
			||||||
 | 
					  private cache: LevelCache;
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  constructor() {
 | 
				
			||||||
 | 
					    this.cache = new LevelCache({
 | 
				
			||||||
 | 
					      cacheId: 'sessions',
 | 
				
			||||||
 | 
					      maxMemoryStorageInMB: 64,
 | 
				
			||||||
 | 
					      maxDiskStorageInMB: 512,
 | 
				
			||||||
 | 
					      immutableCache: false,
 | 
				
			||||||
 | 
					      persistentCache: true
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  async createSession(userId: string, sessionData: any) {
 | 
				
			||||||
 | 
					    const sessionId = generateSessionId();
 | 
				
			||||||
 | 
					    const entry = new CacheEntry({
 | 
				
			||||||
 | 
					      contents: Buffer.from(JSON.stringify({
 | 
				
			||||||
 | 
					        userId,
 | 
				
			||||||
 | 
					        ...sessionData,
 | 
				
			||||||
 | 
					        createdAt: Date.now()
 | 
				
			||||||
 | 
					      })),
 | 
				
			||||||
 | 
					      ttl: 86400000 // 24 hour sessions
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    await this.cache.storeCacheEntryByKey(`session:${sessionId}`, entry);
 | 
				
			||||||
 | 
					    return sessionId;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  async getSession(sessionId: string) {
 | 
				
			||||||
 | 
					    const entry = await this.cache.retrieveCacheEntryByKey(`session:${sessionId}`);
 | 
				
			||||||
 | 
					    return entry ? JSON.parse(entry.contents.toString()) : null;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  async destroySession(sessionId: string) {
 | 
				
			||||||
 | 
					    await this.cache.deleteCacheEntryByKey(`session:${sessionId}`);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Distributed Processing Cache
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```typescript
 | 
				
			||||||
 | 
					// Share computed results across multiple workers using S3
 | 
				
			||||||
 | 
					const distributedCache = new LevelCache({
 | 
				
			||||||
 | 
					  cacheId: 'mlModelResults',
 | 
				
			||||||
 | 
					  maxMemoryStorageInMB: 512,
 | 
				
			||||||
 | 
					  maxDiskStorageInMB: 5120,
 | 
				
			||||||
 | 
					  maxS3StorageInMB: 102400, // 100GB for model outputs
 | 
				
			||||||
 | 
					  s3Config: {
 | 
				
			||||||
 | 
					    accessKeyId: process.env.AWS_ACCESS_KEY_ID,
 | 
				
			||||||
 | 
					    secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
 | 
				
			||||||
 | 
					    region: 'us-west-2'
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
 | 
					  s3BucketName: 'ml-computation-cache',
 | 
				
			||||||
 | 
					  persistentCache: true
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Worker process can store results
 | 
				
			||||||
 | 
					async function storeComputationResult(jobId: string, result: Buffer) {
 | 
				
			||||||
 | 
					  const entry = new CacheEntry({
 | 
				
			||||||
 | 
					    contents: result,
 | 
				
			||||||
 | 
					    ttl: 604800000, // 7 days
 | 
				
			||||||
 | 
					    typeInfo: 'application/octet-stream'
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					  await distributedCache.storeCacheEntryByKey(`job:${jobId}`, entry);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Other workers can retrieve results
 | 
				
			||||||
 | 
					async function getComputationResult(jobId: string) {
 | 
				
			||||||
 | 
					  const entry = await distributedCache.retrieveCacheEntryByKey(`job:${jobId}`);
 | 
				
			||||||
 | 
					  return entry ? entry.contents : null;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Performance Tips 🎯
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. **Size your tiers appropriately** - Set memory limits based on your hot data size
 | 
				
			||||||
 | 
					2. **Use meaningful cache keys** - Include version/hash in keys for cache invalidation
 | 
				
			||||||
 | 
					3. **Set realistic TTLs** - Balance freshness with performance
 | 
				
			||||||
 | 
					4. **Monitor cache hit rates** - Track `checkKeyPresence()` to optimize tier sizes
 | 
				
			||||||
 | 
					5. **Batch operations** - Group related cache operations when possible
 | 
				
			||||||
 | 
					6. **Use compression** - Compress large values before caching to maximize tier utilization
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Migration & Compatibility
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Coming from other caching solutions? Here's how LevelCache compares:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- **Redis** → LevelCache provides similar speed with added persistence and S3 backup
 | 
				
			||||||
 | 
					- **Memcached** → LevelCache adds persistence and automatic tier management  
 | 
				
			||||||
 | 
					- **Local storage** → LevelCache adds memory tier and S3 backup capabilities
 | 
				
			||||||
 | 
					- **S3 only** → LevelCache adds memory and disk tiers for dramatic speed improvements
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## API Reference
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### LevelCache Class
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Constructor Options
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| Option | Type | Default | Description |
 | 
				
			||||||
 | 
					|--------|------|---------|-------------|
 | 
				
			||||||
 | 
					| `cacheId` | string | required | Unique identifier for the cache instance |
 | 
				
			||||||
 | 
					| `maxMemoryStorageInMB` | number | 0.5 | Maximum memory storage in megabytes |
 | 
				
			||||||
 | 
					| `maxDiskStorageInMB` | number | 10 | Maximum disk storage in megabytes |
 | 
				
			||||||
 | 
					| `maxS3StorageInMB` | number | 50 | Maximum S3 storage in megabytes |
 | 
				
			||||||
 | 
					| `diskStoragePath` | string | '.nogit' | Path for disk cache storage |
 | 
				
			||||||
 | 
					| `s3Config` | object | undefined | AWS S3 configuration object |
 | 
				
			||||||
 | 
					| `s3BucketName` | string | undefined | S3 bucket name for cache storage |
 | 
				
			||||||
 | 
					| `forceLevel` | string | undefined | Force storage to specific tier |
 | 
				
			||||||
 | 
					| `immutableCache` | boolean | false | Prevent cache entry modifications |
 | 
				
			||||||
 | 
					| `persistentCache` | boolean | false | Persist cache across restarts |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Methods
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| Method | Returns | Description |
 | 
				
			||||||
 | 
					|--------|---------|-------------|
 | 
				
			||||||
 | 
					| `ready` | Promise<void> | Resolves when cache is initialized |
 | 
				
			||||||
 | 
					| `storeCacheEntryByKey(key, entry)` | Promise<void> | Store a cache entry |
 | 
				
			||||||
 | 
					| `retrieveCacheEntryByKey(key)` | Promise<CacheEntry\|null> | Retrieve a cache entry |
 | 
				
			||||||
 | 
					| `checkKeyPresence(key)` | Promise<boolean> | Check if key exists |
 | 
				
			||||||
 | 
					| `deleteCacheEntryByKey(key)` | Promise<void> | Delete a cache entry |
 | 
				
			||||||
 | 
					| `cleanOutdated()` | Promise<void> | Remove expired entries |
 | 
				
			||||||
 | 
					| `cleanAll()` | Promise<void> | Clear entire cache |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### CacheEntry Class
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Constructor Options
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| Option | Type | Required | Description |
 | 
				
			||||||
 | 
					|--------|------|----------|-------------|
 | 
				
			||||||
 | 
					| `contents` | Buffer | yes | The data to cache |
 | 
				
			||||||
 | 
					| `ttl` | number | yes | Time-to-live in milliseconds |
 | 
				
			||||||
 | 
					| `typeInfo` | string | no | Optional metadata about content type |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## License and Legal Information
 | 
					## License and Legal Information
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,4 +1,4 @@
 | 
				
			|||||||
import { expect, tap } from '@push.rocks/tapbundle';
 | 
					import { expect, tap } from '@git.zone/tstest/tapbundle';
 | 
				
			||||||
import * as levelcache from '../ts/index.js';
 | 
					import * as levelcache from '../ts/index.js';
 | 
				
			||||||
import { CacheEntry } from '../ts/index.js';
 | 
					import { CacheEntry } from '../ts/index.js';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3,6 +3,6 @@
 | 
				
			|||||||
 */
 | 
					 */
 | 
				
			||||||
export const commitinfo = {
 | 
					export const commitinfo = {
 | 
				
			||||||
  name: '@push.rocks/levelcache',
 | 
					  name: '@push.rocks/levelcache',
 | 
				
			||||||
  version: '3.1.2',
 | 
					  version: '3.2.0',
 | 
				
			||||||
  description: 'A versatile caching solution offering multi-level storage utilizing memory, disk, and Amazon S3 for efficient data management and backup.'
 | 
					  description: 'A versatile caching solution offering multi-level storage utilizing memory, disk, and Amazon S3 for efficient data management and backup.'
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user