Skip to content

Commit

Permalink
Merge pull request #12 from DIG-Network/release/v0.0.1-alpha.12
Browse files Browse the repository at this point in the history
Release/v0.0.1 alpha.12
  • Loading branch information
MichaelTaylor3D authored Sep 10, 2024
2 parents 85459b7 + 13c1f82 commit 84be9c5
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 24 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@

All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.


### [0.0.1-alpha.12](https://github.com/DIG-Network/dig-chia-sdk/compare/v0.0.1-alpha.10...v0.0.1-alpha.12) (2024-09-10)


### Features

* add plimit to directory add ([468cff2](https://github.com/DIG-Network/dig-chia-sdk/commit/468cff28397c993b22af7388a8472b1d2068aebd))

### [0.0.1-alpha.11](https://github.com/DIG-Network/dig-chia-sdk/compare/v0.0.1-alpha.10...v0.0.1-alpha.11) (2024-09-10)


Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@dignetwork/dig-sdk",
"version": "0.0.1-alpha.11",
"version": "0.0.1-alpha.12",
"description": "",
"type": "commonjs",
"main": "./dist/index.js",
Expand Down
35 changes: 14 additions & 21 deletions src/utils/directoryUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,36 +6,29 @@ import pLimit from "p-limit";
import { DataIntegrityTree } from "../DataIntegrityTree";


const limit = pLimit(10); // Limit the concurrency to 10 (adjust based on your system's file descriptor limit)
// Function to dynamically load p-limit since it's an ES module
async function loadPLimit() {
const { default: pLimit } = await import('p-limit');
return pLimit;
}

// Promisify fs methods
const readdir = promisify(fs.readdir);
const stat = promisify(fs.stat);
const readFile = promisify(fs.readFile);

/**
* Recursively add all files in a directory to the Merkle tree, skipping the .dig, .git folders, and files in .gitignore.
* @param datalayer - The DataStoreManager instance.
* @param dirPath - The directory path.
* @param baseDir - The base directory for relative paths.
*/
export const addDirectory = async (
datalayer: DataIntegrityTree,
dirPath: string,
baseDir: string = dirPath
): Promise<void> => {
const limit = await loadPLimit(); // Dynamically load p-limit and get the default export
const ig = ignore();
const gitignorePath = path.join(baseDir, ".gitignore");

// Load .gitignore rules if the file exists
if (fs.existsSync(gitignorePath)) {
const gitignoreContent = await readFile(gitignorePath, "utf-8");
const gitignoreContent = fs.readFileSync(gitignorePath, "utf-8");
ig.add(gitignoreContent);
}

const files = await readdir(dirPath);
const files = fs.readdirSync(dirPath);

// Process each file or directory
await Promise.all(
files.map(async (file) => {
const filePath = path.join(dirPath, file);
Expand All @@ -46,14 +39,13 @@ export const addDirectory = async (
return;
}

const fileStat = await stat(filePath);
const stat = fs.statSync(filePath);

if (fileStat.isDirectory()) {
// Recursively process the directory
return addDirectory(datalayer, filePath, baseDir);
if (stat.isDirectory()) {
await addDirectory(datalayer, filePath, baseDir);
} else {
// Process the file with limited concurrency
return limit(() =>
// Use the dynamically loaded p-limit to limit concurrent file processing
return limit(10)(() =>
new Promise<void>((resolve, reject) => {
const stream = fs.createReadStream(filePath);
datalayer
Expand All @@ -68,6 +60,7 @@ export const addDirectory = async (
};



/**
* Calculate the total size of the DIG_FOLDER_PATH
* @param folderPath - The path of the folder to calculate size.
Expand Down

0 comments on commit 84be9c5

Please sign in to comment.