diff --git a/config.js b/config.js index 8c743efb8c..5c3b2cf0bf 100644 --- a/config.js +++ b/config.js @@ -798,6 +798,9 @@ config.NSFS_GLACIER_MIGRATE_INTERVAL = 15 * 60 * 1000; // of `manage_nsfs glacier restore` config.NSFS_GLACIER_RESTORE_INTERVAL = 15 * 60 * 1000; +// enable/disable unsorted listing application level +config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; + // NSFS_GLACIER_EXPIRY_RUN_TIME must be of the format hh:mm which specifies // when NooBaa should allow running glacier expiry process // NOTE: This will also be in the same timezone as specified in diff --git a/src/api/object_api.js b/src/api/object_api.js index 366c8d4660..c8d51b6362 100644 --- a/src/api/object_api.js +++ b/src/api/object_api.js @@ -720,6 +720,9 @@ module.exports = { limit: { type: 'integer' }, + list_type: { + type: 'string', + }, } }, reply: { @@ -774,6 +777,9 @@ module.exports = { limit: { type: 'integer' }, + list_type: { + type: 'string', + }, } }, reply: { diff --git a/src/endpoint/s3/ops/s3_get_bucket.js b/src/endpoint/s3/ops/s3_get_bucket.js index e661b2863a..6ef12d76f9 100644 --- a/src/endpoint/s3/ops/s3_get_bucket.js +++ b/src/endpoint/s3/ops/s3_get_bucket.js @@ -41,6 +41,7 @@ async function get_bucket(req) { bucket: req.params.bucket, prefix: req.query.prefix, delimiter: req.query.delimiter, + list_type: list_type, limit: Math.min(max_keys_received, 1000), key_marker: list_type === '2' ? (cont_tok_to_key_marker(cont_tok) || start_after) : req.query.marker, diff --git a/src/native/fs/fs_napi.cpp b/src/native/fs/fs_napi.cpp index 803fc30225..9a14e01cb6 100644 --- a/src/native/fs/fs_napi.cpp +++ b/src/native/fs/fs_napi.cpp @@ -2042,7 +2042,7 @@ struct TellDir : public FSWrapWorker } virtual void OnOK() { - DBG0("FS::Telldir::OnOK: " << DVAL(_wrap->_path) << DVAL(_tell_res)); + DBG1("FS::Telldir::OnOK: " << DVAL(_wrap->_path) << DVAL(_tell_res)); Napi::Env env = Env(); auto res = Napi::BigInt::New(env, _tell_res); _deferred.Resolve(res); diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index 8637ed0d39..df61db0ad1 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -582,6 +582,7 @@ class NamespaceFS { * prefix?: string, * delimiter?: string, * key_marker?: string, + * list_type?: string, * limit?: number, * }} ListParams */ @@ -600,6 +601,7 @@ class NamespaceFS { * prefix?: string, * delimiter?: string, * key_marker?: string, + * list_type?: string, * version_id_marker?: string, * limit?: number, * }} ListVersionsParams @@ -634,6 +636,19 @@ class NamespaceFS { if (!limit) return { is_truncated: false, objects: [], common_prefixes: [] }; let is_truncated = false; + let skip_list = false; + let pre_dir = ""; + let pre_dir_position = ""; + let key_marker_obj = {}; + let key_marker_value; + if (typeof(key_marker) === "object") { + key_marker_obj = key_marker; + key_marker_value = key_marker_obj.marker; + } else { + key_marker_obj.pre_dir = []; + key_marker_obj.pre_dir_pos = []; + key_marker_value = key_marker; + } /** * @typedef {{ @@ -666,10 +681,10 @@ class NamespaceFS { // dbg.log0(`prefix dir does not match so no keys in this dir can apply: dir_key=${dir_key} prefix_dir=${prefix_dir}`); return; } - const marker_dir = key_marker.slice(0, dir_key.length); - const marker_ent = key_marker.slice(dir_key.length); + const marker_dir = key_marker_value.slice(0, dir_key.length); + const marker_ent = key_marker_value.slice(dir_key.length); // marker is after dir so no keys in this dir can apply - if (dir_key < marker_dir) { + if (delimiter && dir_key < marker_dir) { // dbg.log0(`marker is after dir so no keys in this dir can apply: dir_key=${dir_key} marker_dir=${marker_dir}`); return; } @@ -700,51 +715,129 @@ class NamespaceFS { return; // not added } if (!delimiter && r.common_prefix) { + if (r.pre_dir && !key_marker_obj.pre_dir.includes(r.pre_dir)) { + key_marker_obj.pre_dir.push(r.pre_dir); + key_marker_obj.pre_dir_pos.push(r.marker_pos.toString()); + } await process_dir(r.key); } else { + if (key_marker_value.includes(r.key)) { + return; + } if (pos < results.length) { - results.splice(pos, 0, r); + if (r.marker_pos) { + results.push(r); + } else { + results.splice(pos, 0, r); + } } else { results.push(r); } + if (results.length === limit && pre_dir) { + key_marker_obj.pre_dir.push(pre_dir); + key_marker_obj.pre_dir_pos.push(pre_dir_position.toString()); + } if (results.length > limit) { results.length = limit; is_truncated = true; } + pre_dir = ""; + pre_dir_position = ""; + } + }; + + const push_dir_entries = async (marker_index, sorted_entries) => { + if (marker_index) { + const prev_dir = sorted_entries[marker_index - 1]; + const prev_dir_name = prev_dir.name; + if (marker_curr.startsWith(prev_dir_name) && dir_key !== prev_dir.name) { + if (!delimiter) { + const isDir = await is_directory_or_symlink_to_directory( + prev_dir, fs_context, path.join(dir_path, prev_dir_name, '/')); + if (isDir) { + await process_dir(path.join(dir_key, prev_dir_name, '/')); + } + } + } } }; /** * @param {fs.Dirent} ent + * @param {string} pos */ - const process_entry = async ent => { + const process_entry = async (ent, pos = '') => { // dbg.log0('process_entry', dir_key, ent.name); if ((!ent.name.startsWith(prefix_ent) || - ent.name < marker_curr || + (pos === "" && ent.name < marker_curr.split('/')[0]) || ent.name === this.get_bucket_tmpdir_name() || ent.name === config.NSFS_FOLDER_OBJECT_NAME) || this._is_hidden_version_path(ent.name)) { return; } const isDir = await is_directory_or_symlink_to_directory(ent, fs_context, path.join(dir_path, ent.name)); - + const full_dir_path = dir_path.endsWith("/") ? dir_path : dir_path + "/"; let r; if (list_versions && _is_version_or_null_in_file_name(ent.name)) { r = { key: this._get_version_entry_key(dir_key, ent), common_prefix: isDir, - is_latest: false + is_latest: false, + marker_pos: pos, + pre_dir: pos ? full_dir_path : "", }; } else { r = { key: this._get_entry_key(dir_key, ent, isDir), common_prefix: isDir, - is_latest: true + is_latest: true, + marker_pos: pos, + pre_dir: pos ? full_dir_path : "", }; } await insert_entry_to_results_arr(r); }; + /** + * + */ + const process_unsort_entry = async () => { + for (;;) { + if (is_truncated) break; + const dir_entry = await dir_handle.read(fs_context); + // After listing the last item from sub dir, check for parent dirs and parent directory position + // and go back to that position. + if (!dir_entry) { + // Skip item listing in bucket root path when list flow coming from sub dir to bucket root dir, + // if do not skip items in bucket root path will list two times, + // first in normal flow, and second when return back from sub dir. + if (dir_path === this.bucket_path) { + skip_list = true; + } + // After iterating the last element in subdir flow will go back to the parent folder, + // to avoid listing items again from start use previous dir path and position from + // previous_dirs and previous_dir_positions arrays respectively. + if (key_marker_obj.pre_dir.length > 0) { + pre_dir = key_marker_obj.pre_dir.pop(); + const previous_dir_key = pre_dir.replace(this.bucket_path + "/", ""); + pre_dir_position = key_marker_obj.pre_dir_pos.pop(); + // Next dir process will use the previous dir path and position to iterate from + // the previously left parentt dir position. + key_marker_obj.marker_pos = pre_dir_position; + key_marker_obj.marker = pre_dir; + key_marker_value = key_marker_obj.marker; + await process_dir(previous_dir_key); + break; + } + break; + } + if ((dir_entry.name === config.NSFS_FOLDER_OBJECT_NAME && dir_key === marker_dir) || skip_list) { + continue; + } + await process_entry(dir_entry, params.key_marker && !key_marker_obj.marker ? "" : dir_entry.off); + } + }; + if (!(await this.check_access(fs_context, dir_path))) return; try { if (list_versions) { @@ -763,12 +856,12 @@ class NamespaceFS { // insert dir object to objects list if its key is lexicographicly bigger than the key marker && // no delimiter OR prefix is the current directory entry const is_dir_content = cached_dir.stat.xattr && cached_dir.stat.xattr[XATTR_DIR_CONTENT]; - if (is_dir_content && dir_key > key_marker && (!delimiter || dir_key === prefix)) { + if (is_dir_content && dir_key > key_marker_value && (!delimiter || dir_key === prefix)) { const r = { key: dir_key, common_prefix: false }; await insert_entry_to_results_arr(r); } - if (cached_dir.sorted_entries) { + if (!config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED && cached_dir.sorted_entries) { const sorted_entries = cached_dir.sorted_entries; let marker_index; // Two ways followed here to find the index. @@ -796,19 +889,7 @@ class NamespaceFS { // handling a scenario in which key_marker points to an object inside a directory // since there can be entries inside the directory that will need to be pushed // to results array - if (marker_index) { - const prev_dir = sorted_entries[marker_index - 1]; - const prev_dir_name = prev_dir.name; - if (marker_curr.startsWith(prev_dir_name) && dir_key !== prev_dir.name) { - if (!delimiter) { - const isDir = await is_directory_or_symlink_to_directory( - prev_dir, fs_context, path.join(dir_path, prev_dir_name, '/')); - if (isDir) { - await process_dir(path.join(dir_key, prev_dir_name, '/')); - } - } - } - } + await push_dir_entries(marker_index, sorted_entries); for (let i = marker_index; i < sorted_entries.length; ++i) { const ent = sorted_entries[i]; // when entry is NSFS_FOLDER_OBJECT_NAME=.folder file, @@ -826,14 +907,25 @@ class NamespaceFS { // for large dirs we cannot keep all entries in memory // so we have to stream the entries one by one while filtering only the needed ones. try { - dbg.warn('NamespaceFS: open dir streaming', dir_path, 'size', cached_dir.stat.size); - dir_handle = await nb_native().fs.opendir(fs_context, dir_path); //, { bufferSize: 128 }); - for (;;) { - const dir_entry = await dir_handle.read(fs_context); - if (!dir_entry) break; - await process_entry(dir_entry); - // since we dir entries streaming order is not sorted, - // we have to keep scanning all the keys before we can stop. + if (params.list_type === "2") { + // For unsorted listing dir position is used to when pagination split the items. + dbg.warn('NamespaceFS: open dir streaming', dir_path, 'size', cached_dir.stat.size, 'key_marker', key_marker_obj); + dir_handle = await nb_native().fs.opendir(fs_context, dir_path); //, { bufferSize: 128 }); + if (key_marker_obj.marker_pos) { + await dir_handle.seekdir(fs_context, BigInt(key_marker_obj.marker_pos)); + key_marker_obj.marker_pos = undefined; + } + await process_unsort_entry(); + } else { + dbg.warn('NamespaceFS: open dir streaming', dir_path, 'size', cached_dir.stat.size); + dir_handle = await nb_native().fs.opendir(fs_context, dir_path); //, { bufferSize: 128 }); + for (;;) { + const dir_entry = await dir_handle.read(fs_context); + if (!dir_entry) break; + await process_entry(dir_entry); + // since we dir entries streaming order is not sorted, + // we have to keep scanning all the keys before we can stop. + } } await dir_handle.close(fs_context); dir_handle = null; @@ -850,22 +942,11 @@ class NamespaceFS { } }; - let previous_key; - /** - * delete markers are always in the .versions folder, so we need to have special case to determine - * if they are delete markers. since the result list is ordered by latest entries first, the first - * entry of every key is the latest - * TODO need different way to check for isLatest in case of unordered list object versions - * @param {Object} obj_info - */ - const set_latest_delete_marker = obj_info => { - if (obj_info.delete_marker && previous_key !== obj_info.key) { - obj_info.is_latest = true; - } - }; - const prefix_dir_key = prefix.slice(0, prefix.lastIndexOf('/') + 1); - await process_dir(prefix_dir_key); + const current_dir = config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED && key_marker_obj.marker && key_marker_obj.marker.includes("/") ? + key_marker_obj.marker.substring(0, key_marker_obj.marker.lastIndexOf("/") + 1) : ""; + await process_dir(prefix_dir_key + current_dir); + await Promise.all(results.map(async r => { if (r.common_prefix) return; const entry_path = path.join(this.bucket_path, r.key); @@ -873,44 +954,81 @@ class NamespaceFS { const use_lstat = !(await this._is_path_in_bucket_boundaries(fs_context, entry_path)); r.stat = await nb_native().fs.stat(fs_context, entry_path, { use_lstat }); })); - const res = { - objects: [], - common_prefixes: [], - is_truncated, - next_marker: undefined, - next_version_id_marker: undefined, + const next_marker_backtrack = { + pre_dir: key_marker_obj.pre_dir, + pre_dir_pos: key_marker_obj.pre_dir_pos }; - for (const r of results) { - let obj_info; - if (r.common_prefix) { - res.common_prefixes.push(r.key); - } else { - obj_info = this._get_object_info(bucket, r.key, r.stat, false, r.is_latest); - if (!list_versions && obj_info.delete_marker) { - continue; - } - if (this._is_hidden_version_path(obj_info.key)) { - obj_info.key = path.normalize(obj_info.key.replace(HIDDEN_VERSIONS_PATH + '/', '')); - obj_info.key = _get_filename(obj_info.key); - set_latest_delete_marker(obj_info); - } - res.objects.push(obj_info); - previous_key = obj_info.key; + return await this.prepare_result(bucket, is_truncated, results, list_versions, next_marker_backtrack, params.list_type); + } catch (err) { + throw native_fs_utils.translate_error_codes(err, native_fs_utils.entity_enum.OBJECT); + } + } + + + /* Prepare result for list_type 1 and 2, + For list_type 1 : return simply `next_marke`, it cant hold complex objects, Because of that next marker dosnt + hold and position for file system, + For list_type 2 : Return object that contains `marker`, `marker_pos`, parent dir structure with positions for + back tracking when the child dir list all files. + */ + async prepare_result(bucket, is_truncated, results, list_versions, next_marker_backtrack, list_type) { + const res = { + objects: [], + common_prefixes: [], + is_truncated, + next_marker: undefined, + next_version_id_marker: undefined, + }; + + let previous_key; + /** + * delete markers are always in the .versions folder, so we need to have special case to determine + * if they are delete markers. since the result list is ordered by latest entries first, the first + * entry of every key is the latest + * TODO need different way to check for isLatest in case of unordered list object versions + * @param {Object} obj_info + */ + const set_latest_delete_marker = obj_info => { + if (obj_info.delete_marker && previous_key !== obj_info.key) { + obj_info.is_latest = true; + } + }; + for (const r of results) { + let obj_info; + if (r.common_prefix) { + res.common_prefixes.push(r.key); + } else { + obj_info = this._get_object_info(bucket, r.key, r.stat, false, r.is_latest); + if (!list_versions && obj_info.delete_marker) { + continue; } - if (res.is_truncated) { - if (list_versions && _is_version_object(r.key)) { - const next_version_id_marker = r.key.substring(r.key.lastIndexOf('/') + 1); - res.next_version_id_marker = next_version_id_marker; - res.next_marker = _get_filename(next_version_id_marker); - } else { - res.next_marker = r.key; - } + if (this._is_hidden_version_path(obj_info.key)) { + obj_info.key = path.normalize(obj_info.key.replace(HIDDEN_VERSIONS_PATH + '/', '')); + obj_info.key = _get_filename(obj_info.key); + set_latest_delete_marker(obj_info); + } + res.objects.push(obj_info); + previous_key = obj_info.key; + } + if (res.is_truncated) { + if (list_versions && _is_version_object(r.key)) { + const next_version_id_marker = r.key.substring(r.key.lastIndexOf('/') + 1); + res.next_version_id_marker = next_version_id_marker; + res.next_marker = list_type === "2" ? { + marker: _get_filename(next_version_id_marker), + marker_pos: r.marker_pos ? r.marker_pos.toString() : '', + ...next_marker_backtrack, + } : _get_filename(next_version_id_marker); + } else { + res.next_marker = list_type === "2" ? { + marker: r.key, + marker_pos: r.marker_pos ? r.marker_pos.toString() : '', + ...next_marker_backtrack + } : r.key; } } - return res; - } catch (err) { - throw native_fs_utils.translate_error_codes(err, native_fs_utils.entity_enum.OBJECT); } + return res; } ///////////////// diff --git a/src/test/system_tests/test_utils.js b/src/test/system_tests/test_utils.js index 5fc9cf1d32..ea5c73af05 100644 --- a/src/test/system_tests/test_utils.js +++ b/src/test/system_tests/test_utils.js @@ -432,6 +432,22 @@ function get_new_buckets_path_by_test_env(new_buckets_full_path, new_buckets_dir return is_nc_coretest ? path.join(new_buckets_full_path, new_buckets_dir) : new_buckets_dir; } +function make_dummy_object_sdk() { + return { + requesting_account: { + force_md5_etag: false, + nsfs_account_config: { + uid: process.getuid(), + gid: process.getgid(), + } + }, + abort_controller: new AbortController(), + throw_if_aborted() { + if (this.abort_controller.signal.aborted) throw new Error('request aborted signal'); + } + }; +} + /** * write_manual_config_file writes config file directly to the file system without using config FS * used for creating backward compatibility tests, invalid config files etc @@ -525,6 +541,7 @@ exports.delete_fs_user_by_platform = delete_fs_user_by_platform; exports.set_path_permissions_and_owner = set_path_permissions_and_owner; exports.set_nc_config_dir_in_config = set_nc_config_dir_in_config; exports.generate_anon_s3_client = generate_anon_s3_client; +exports.make_dummy_object_sdk = make_dummy_object_sdk; exports.TMP_PATH = TMP_PATH; exports.is_nc_coretest = is_nc_coretest; exports.generate_nsfs_account = generate_nsfs_account; diff --git a/src/test/unit_tests/jest_tests/test_list_object.test.js b/src/test/unit_tests/jest_tests/test_list_object.test.js deleted file mode 100644 index ea1fabdee8..0000000000 --- a/src/test/unit_tests/jest_tests/test_list_object.test.js +++ /dev/null @@ -1,175 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - - -const fs = require('fs'); -const path = require('path'); -const fs_utils = require('../../../util/fs_utils'); -const nb_native = require('../../../util/nb_native'); -const {TMP_PATH} = require('../../system_tests/test_utils'); -const { get_process_fs_context } = require('../../../util/native_fs_utils'); - -const tmp_fs_path = path.join(TMP_PATH, 'test_list_object'); -const DEFAULT_FS_CONFIG = get_process_fs_context(); - -// eslint-disable-next-line max-lines-per-function -describe('manage list objct flow', () => { - describe('Telldir and Seekdir implementation', () => { - const list_dir_root = path.join(tmp_fs_path, 'list_dir_root'); - const list_dir_1_1 = path.join(list_dir_root, 'list_dir_1_1'); - const total_files = 4; - - beforeAll(async () => { - await fs_utils.create_fresh_path(list_dir_root); - await fs_utils.create_fresh_path(list_dir_1_1); - for (let i = 0; i < total_files; i++) { - create_temp_file(list_dir_root, `test_${i}.json`, {test: test}); - } - }); - - afterAll(async () => { - await fs_utils.folder_delete(`${list_dir_root}`); - await fs_utils.folder_delete(`${list_dir_1_1}`); - }); - - it('telldir returns bigint', async () => { - const dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); - const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); - expect(typeof tell_dir).toStrictEqual('bigint'); - }); - - it('seekdir expects bigint', async () => { - const big_int = 2n ** 32n; - const dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); - const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); - expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, Number(tell_dir))).toThrow(); - expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, 2n ** 32n ** 32n)).toThrow(); - expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, -(2n ** 32n ** 32n))).toThrow(); - // valid scenario - expect(await dir_handle.seekdir(DEFAULT_FS_CONFIG, big_int)).toBeUndefined(); - - }); - - it('list dir files - telldir and seekdir.', async () => { - let tell_dir; - let dir_marker; - let total_dir_entries = 0; - let dir_entry; - let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); - // reak first read after 3 entries. - for (let i = 0; i <= 2; i++) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); - dir_marker = { - dir_path: list_dir_root, - pos: tell_dir, - }; - total_dir_entries += 1; - } - // Continue the read using dir location fetch from telldir - try { - dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); - await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); - for (;;) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - total_dir_entries += 1; - } - await dir_handle.close(DEFAULT_FS_CONFIG); - dir_handle = null; - } catch (err) { - console.log("Error :", err); - } - //total number of dir and files inside list_dir_root is 5 - expect(total_dir_entries).toBe(total_files + 1); - }); - - it('list dir files - Dir.read() and seekdir()', async () => { - let dir_marker; - let total_dir_entries = 0; - let dir_entry; - let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); - // reak first read after 3 entries. - for (let i = 0; i <= 2; i++) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); - //verify tell_dir and dir_entry.off return same value - expect(tell_dir).toBe(dir_entry.off); - dir_marker = { - dir_path: list_dir_root, - pos: dir_entry.off, - }; - total_dir_entries += 1; - } - // Continue the read using dir location fetch from Dir.read() - try { - dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); - await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); - for (;;) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - total_dir_entries += 1; - } - await dir_handle.close(DEFAULT_FS_CONFIG); - dir_handle = null; - } catch (err) { - console.log("Error :", err); - } - //total number of dir and files inside list_dir_root is 5 - expect(total_dir_entries).toBe(total_files + 1); - }); - - it('list 10000 dir files - telldir and seekdir', async () => { - for (let i = total_files; i < total_files + 9995; i++) { - create_temp_file(list_dir_root, `test_${i}.json`, {test: test}); - } - let tell_dir; - let dir_marker; - let total_dir_entries = 0; - let dir_entry; - let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); - // reak first read after 3 entries. - for (let i = 0; i <= 500; i++) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); - dir_marker = { - dir_path: list_dir_root, - pos: tell_dir, - }; - total_dir_entries += 1; - } - // Continue the read using dir location fetch from telldir - try { - dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); - await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); - for (;;) { - dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); - if (!dir_entry) break; - total_dir_entries += 1; - } - await dir_handle.close(DEFAULT_FS_CONFIG); - dir_handle = null; - } catch (err) { - console.log("Error :", err); - } - //total number of dir and files inside list_dir_root is 5 - expect(total_dir_entries).toBe(10000); - }, 10000); - }); -}); - -/** - * create_temp_file would create a file with the data - * @param {string} path_to_dir - * @param {string} file_name - * @param {object} data - */ -async function create_temp_file(path_to_dir, file_name, data) { - const path_to_temp_file_name = path.join(path_to_dir, file_name); - const content = JSON.stringify(data); - await fs.promises.writeFile(path_to_temp_file_name, content); - return path_to_temp_file_name; -} diff --git a/src/test/unit_tests/jest_tests/test_unsort_list_object.test.js b/src/test/unit_tests/jest_tests/test_unsort_list_object.test.js new file mode 100644 index 0000000000..53a2bd6c92 --- /dev/null +++ b/src/test/unit_tests/jest_tests/test_unsort_list_object.test.js @@ -0,0 +1,539 @@ +/* Copyright (C) 2016 NooBaa */ +/* eslint-disable no-undef */ +'use strict'; + + +const fs = require('fs'); +const path = require('path'); +const fs_utils = require('../../../util/fs_utils'); +const nb_native = require('../../../util/nb_native'); +const { TMP_PATH, make_dummy_object_sdk } = require('../../system_tests/test_utils'); +const { get_process_fs_context } = require('../../../util/native_fs_utils'); +const NamespaceFS = require('../../../sdk/namespace_fs'); +const buffer_utils = require('../../../util/buffer_utils'); +const crypto = require('crypto'); +const P = require('../../../util/promise'); +const config = require('../../../../config'); + +const DEFAULT_FS_CONFIG = get_process_fs_context(); +config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = true; +const tmp_nsfs_path = path.join(TMP_PATH, 'test_unsort_list_objects'); +const upload_bkt = 'test_ns_uploads_object'; +const src_bkt = 'src'; +const timeout = 50000; +const ns_tmp_bucket_path = `${tmp_nsfs_path}/${src_bkt}`; + +const tmp_ns_nsfs_path = path.join(TMP_PATH, 'test_nsfs_unsort_list'); +const nsfs_src_bkt = 'nsfs_src'; +const ns_nsfs_tmp_bucket_path = `${tmp_ns_nsfs_path}/${nsfs_src_bkt}`; +const list_bkt = 'test_ns_list_object'; + +const files_without_folders_to_upload = make_keys(264, i => `file_without_folder${i}`); +const folders_to_upload = make_keys(264, i => `folder${i}/`); +const files_in_folders_to_upload = make_keys(264, i => `folder1/file${i}`); +const files_in_utf_diff_delimiter = make_keys(264, i => `תיקיה#קובץ${i}`); +const files_in_inner_folders_to_upload_post = make_keys(264, i => `folder1/inner_folder/file${i}`); +const files_in_inner_folders_to_upload_pre = make_keys(264, i => `folder1/ainner_folder/file${i}`); +const dummy_object_sdk = make_dummy_object_sdk(); +const ns_tmp = new NamespaceFS({ bucket_path: ns_tmp_bucket_path, bucket_id: '2', namespace_resource_id: undefined }); +const ns_nsfs_tmp = new NamespaceFS({ bucket_path: ns_nsfs_tmp_bucket_path, bucket_id: '3', namespace_resource_id: undefined }); + +// eslint-disable-next-line max-lines-per-function +describe('manage unsorted list objcts flow', () => { + const keys_objects = make_keys(999, i => `max_keys_test${i}`); + describe('Unsorted List objects ', () => { + const data = crypto.randomBytes(100); + + beforeAll(async () => { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = true; + await fs_utils.create_fresh_path(ns_tmp_bucket_path); + }); + + afterAll(async () => { + await fs_utils.folder_delete(`${ns_tmp_bucket_path}`); + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; + }); + + it('List object unsorted with one object upload', async () => { + const upload_key_2 = 'my_data'; + await ns_tmp.upload_object({ + bucket: upload_bkt, + key: upload_key_2, + source_stream: buffer_utils.buffer_to_read_stream(data), + }, dummy_object_sdk); + const ls_obj_res = await ns_tmp.list_objects({ + bucket: upload_bkt, + delimiter: '/', + }, dummy_object_sdk); + expect(ls_obj_res.objects.map(obj => obj.key)).toStrictEqual([upload_key_2]); + }); + + it('List object unsorted with multiple object upload', async () => { + await create_keys(upload_bkt, ns_tmp, keys_objects); + const ls_obj_res = await ns_tmp.list_objects({ + bucket: upload_bkt, + delimiter: '/', + }, dummy_object_sdk); + expect(ls_obj_res.objects.map(it => it.key).length).toStrictEqual(keys_objects.length + 1); + }); + }); + + describe('Telldir and Seekdir implementation', () => { + const tmp_fs_path = path.join(TMP_PATH, 'test_list_object'); + const list_dir_root = path.join(tmp_fs_path, 'list_dir_root'); + const list_dir_1_1 = path.join(list_dir_root, 'list_dir_1_1'); + const total_files = 4; + beforeAll(async () => { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = true; + await fs_utils.create_fresh_path(list_dir_root); + await fs_utils.create_fresh_path(list_dir_1_1); + for (let i = 0; i < total_files; i++) { + create_temp_file(list_dir_root, `test_${i}.json`, {test: test}); + } + }); + + afterAll(async () => { + await fs_utils.folder_delete(`${list_dir_root}`); + await fs_utils.folder_delete(`${list_dir_1_1}`); + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; + }); + + it('telldir returns bigint', async () => { + const dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); + const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); + expect(typeof tell_dir).toStrictEqual('bigint'); + }); + + it('seekdir expects bigint', async () => { + const big_int = 2n ** 32n; + const dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); + const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); + expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, Number(tell_dir))).toThrow(); + expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, 2n ** 32n ** 32n)).toThrow(); + expect(() => dir_handle.seekdir(DEFAULT_FS_CONFIG, -(2n ** 32n ** 32n))).toThrow(); + // valid scenario + expect(await dir_handle.seekdir(DEFAULT_FS_CONFIG, big_int)).toBeUndefined(); + + }); + + it('list dir files - telldir and seekdir.', async () => { + let tell_dir; + let dir_marker; + let total_dir_entries = 0; + let dir_entry; + let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); + // reak first read after 3 entries. + for (let i = 0; i <= 2; i++) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); + dir_marker = { + dir_path: list_dir_root, + pos: tell_dir, + }; + total_dir_entries += 1; + } + // Continue the read using dir location fetch from telldir + try { + dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); + await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); + for (;;) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + total_dir_entries += 1; + } + await dir_handle.close(DEFAULT_FS_CONFIG); + dir_handle = null; + } catch (err) { + console.log("Error :", err); + } + //total number of dir and files inside list_dir_root is 5 + expect(total_dir_entries).toBe(total_files + 1); + }); + + it('list dir files - Dir.read() and seekdir()', async () => { + let dir_marker; + let total_dir_entries = 0; + let dir_entry; + let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); + // reak first read after 3 entries. + for (let i = 0; i <= 2; i++) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + const tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); + //verify tell_dir and dir_entry.off return same value + expect(tell_dir).toBe(dir_entry.off); + dir_marker = { + dir_path: list_dir_root, + pos: dir_entry.off, + }; + total_dir_entries += 1; + } + // Continue the read using dir location fetch from Dir.read() + try { + dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); + await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); + for (;;) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + total_dir_entries += 1; + } + await dir_handle.close(DEFAULT_FS_CONFIG); + dir_handle = null; + } catch (err) { + console.log("Error :", err); + } + //total number of dir and files inside list_dir_root is 5 + expect(total_dir_entries).toBe(total_files + 1); + }); + + it('list 10000 dir files - telldir and seekdir', async () => { + for (let i = total_files; i < total_files + 9995; i++) { + create_temp_file(list_dir_root, `test_${i}.json`, {test: test}); + } + let tell_dir; + let dir_marker; + let total_dir_entries = 0; + let dir_entry; + let dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, list_dir_root); + // Seak first read after 3 entries. + for (let i = 0; i <= 500; i++) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + tell_dir = await dir_handle.telldir(DEFAULT_FS_CONFIG); + dir_marker = { + dir_path: list_dir_root, + pos: tell_dir, + }; + total_dir_entries += 1; + } + // Continue the read using dir location fetch from telldir + try { + dir_handle = await nb_native().fs.opendir(DEFAULT_FS_CONFIG, dir_marker.dir_path); + await dir_handle.seekdir(DEFAULT_FS_CONFIG, dir_marker.pos); + for (;;) { + dir_entry = await dir_handle.read(DEFAULT_FS_CONFIG); + if (!dir_entry) break; + total_dir_entries += 1; + } + await dir_handle.close(DEFAULT_FS_CONFIG); + dir_handle = null; + } catch (err) { + console.log("Error :", err); + } + //total number of dir and files inside list_dir_root is 5 + expect(total_dir_entries).toBe(10000); + }); + }); + + + describe('list objects - dirs', () => { + beforeAll(async () => { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = true; + await fs_utils.create_fresh_path(ns_tmp_bucket_path); + await create_keys(upload_bkt, ns_tmp, [ + ...folders_to_upload, + ...files_in_folders_to_upload, + ...files_without_folders_to_upload, + ...files_in_utf_diff_delimiter, + ...files_in_inner_folders_to_upload_pre, + ...files_in_inner_folders_to_upload_post + ]); + }); + afterAll(async function() { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; + await delete_keys(upload_bkt, ns_tmp, [ + ...folders_to_upload, + ...files_in_folders_to_upload, + ...files_without_folders_to_upload, + ...files_in_utf_diff_delimiter, + ...files_in_inner_folders_to_upload_pre, + ...files_in_inner_folders_to_upload_post + ]); + await fs_utils.folder_delete(`${ns_tmp_bucket_path}`); + await P.delay(5 * 1000); + }, timeout); + it('key_marker=folder229/', async () => { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder229/' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + expect(r.common_prefixes).toStrictEqual([]); + const fd = folders_to_upload.filter(folder => folder > 'folder229/'); + expect(r.objects.map(it => it.key).sort()).toStrictEqual([...fd, ...files_in_utf_diff_delimiter].sort()); + }); + + it('key_marker=folder229', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder229' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + expect(r.common_prefixes).toStrictEqual([]); + const fd = folders_to_upload.filter(folder => folder >= 'folder229/'); + expect(r.objects.map(it => it.key).sort()).toEqual([...fd, ...files_in_utf_diff_delimiter].sort()); + }); + + it('key_marker=folder1/', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(true); + expect(r.common_prefixes).toStrictEqual([]); + expect(r.objects.length).toEqual(1000); + expect(r.objects.map(it => it.key)).not.toContain("folder0/"); + }); + + it('key_marker=folder1/file57', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/file57' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + expect(r.common_prefixes).toStrictEqual([]); + const fd = folders_to_upload.filter(folder => folder > 'folder1/'); + const fd1 = files_in_folders_to_upload.filter(folder => folder > 'folder1/file57'); + expect(r.objects.map(it => it.key).sort()).toEqual([...fd1, ...files_in_inner_folders_to_upload_post, + ...fd, ...files_in_utf_diff_delimiter].sort()); + }); + it('key_marker=folder1/inner_folder/file40', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/inner_folder/file40' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + expect(r.common_prefixes).toStrictEqual([]); + const fd1 = files_in_inner_folders_to_upload_post.filter(file => file > 'folder1/inner_folder/file40'); + const fd = folders_to_upload.filter(folder => folder > 'folder1/'); + expect(r.objects.map(it => it.key).sort()).toEqual([...fd1, ...fd, ...files_in_utf_diff_delimiter].sort()); + }); + + it('key_marker=folder1/inner_folder/', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/inner_folder/' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + expect(r.common_prefixes).toStrictEqual([]); + const fd1 = files_in_inner_folders_to_upload_post.filter(file => file > 'folder1/inner_folder/'); + const fd = folders_to_upload.filter(folder => folder > 'folder1/inner_folder/'); + expect(r.objects.map(it => it.key).sort()).toEqual([...fd1, ...fd, ...files_in_utf_diff_delimiter].sort()); + }); + + it('key_marker=folder1/ainner_folder/', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/ainner_folder/file50' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(true); + expect(r.common_prefixes).toStrictEqual([]); + expect(r.objects.length).toEqual(1000); + expect(r.objects.map(it => it.key)).not.toContain("folder1/ainner_folder/file50"); + expect(r.objects.map(it => it.key)).not.toContain("folder1/ainner_folder/file49"); + }); + + it('key_marker=folder1/inner_folder/file40 delimiter=/', async function() { + const r = await ns_tmp.list_objects({ + bucket: upload_bkt, + list_type: "2", + key_marker: 'folder1/inner_folder/file40', + delimiter: '/' + }, dummy_object_sdk); + expect(r.is_truncated).toStrictEqual(false); + const fd = folders_to_upload.filter(folder => folder > 'folder1/'); + expect(r.common_prefixes.sort()).toStrictEqual(fd.sort()); + expect(r.objects.map(it => it.key).sort()).toEqual(files_in_utf_diff_delimiter); + }); + }); + + describe('list objects - pagination', () => { + beforeAll(async () => { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = true; + await fs_utils.create_fresh_path(ns_nsfs_tmp_bucket_path); + await create_keys(list_bkt, ns_nsfs_tmp, [ + ...files_without_folders_to_upload, + ...folders_to_upload, + ...files_in_folders_to_upload, + ...files_in_inner_folders_to_upload_post, + ...files_in_inner_folders_to_upload_pre, + ...files_in_utf_diff_delimiter, + ]); + await P.delay(5 * 1000); + }, timeout); + afterAll(async function() { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; + await delete_keys(list_bkt, ns_nsfs_tmp, [ + ...folders_to_upload, + ...files_in_folders_to_upload, + ...files_without_folders_to_upload, + ...files_in_utf_diff_delimiter, + ...files_in_inner_folders_to_upload_pre, + ...files_in_inner_folders_to_upload_post + ]); + await fs_utils.folder_delete(`${ns_nsfs_tmp_bucket_path}`); + }); + it('page=1000 and list_type 2', async () => { + await P.delay(5 * 1000); + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + list_type: "2", + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }, timeout); + it('page=500 and list_type 2', async () => { + await P.delay(5 * 1000); + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + list_type: "2", + limit: 500, + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }, timeout); + it('page=250 and list_type 2', async () => { + await P.delay(5 * 1000); + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + list_type: "2", + limit: 250, + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }, timeout); + it('page=250 and list_type 1', async () => { + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + limit: 250, + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }); + it('page=500 and list_type 1', async () => { + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + limit: 500, + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }); + it('page=1000 and list_type 1', async () => { + let r; + let total_items = 0; + for (;;) { + r = await ns_nsfs_tmp.list_objects({ + bucket: list_bkt, + key_marker: r ? r.next_marker : "", + }, dummy_object_sdk); + total_items += r.objects.length; + await validat_pagination(r, total_items); + if (!r.next_marker) { + break; + } + } + }); + }); +}); + + +async function validat_pagination(r, total_items) { + if (r.next_marker) { + expect(r.is_truncated).toStrictEqual(true); + } else { + expect(total_items).toEqual(1584); + expect(r.is_truncated).toStrictEqual(false); + } +} +/** + * @param {number} count + * @param {(i:number)=>string} gen + * @returns {string[]} + */ +function make_keys(count, gen) { + const arr = new Array(count); + for (let i = 0; i < count; ++i) arr[i] = gen(i); + arr.sort(); + Object.freeze(arr); + return arr; +} + +async function delete_keys(bkt, nsfs_delete_tmp, keys) { + await nsfs_delete_tmp.delete_multiple_objects({ + bucket: bkt, + objects: keys.map(key => ({ key })), + }, dummy_object_sdk); +} + +async function create_keys(bkt, nsfs_create_tmp, keys) { + return Promise.all(keys.map(async key => { + await nsfs_create_tmp.upload_object({ + bucket: bkt, + key, + content_type: 'application/octet-stream', + source_stream: buffer_utils.buffer_to_read_stream(null), + size: 0 + }, dummy_object_sdk); + })); +} + +/** + * create_temp_file would create a file with the data + * @param {string} path_to_dir + * @param {string} file_name + * @param {object} data + */ +async function create_temp_file(path_to_dir, file_name, data) { + const path_to_temp_file_name = path.join(path_to_dir, file_name); + const content = JSON.stringify(data); + await fs.promises.writeFile(path_to_temp_file_name, content); + return path_to_temp_file_name; +} diff --git a/src/test/unit_tests/test_namespace_fs.js b/src/test/unit_tests/test_namespace_fs.js index 8c429937c6..577728dee7 100644 --- a/src/test/unit_tests/test_namespace_fs.js +++ b/src/test/unit_tests/test_namespace_fs.js @@ -20,10 +20,9 @@ const s3_utils = require('../../endpoint/s3/s3_utils'); const buffer_utils = require('../../util/buffer_utils'); const { S3Error } = require('../../endpoint/s3/s3_errors'); const test_ns_list_objects = require('./test_ns_list_objects'); -const { TMP_PATH } = require('../system_tests/test_utils'); +const { TMP_PATH, make_dummy_object_sdk } = require('../system_tests/test_utils'); const { get_process_fs_context } = require('../../util/native_fs_utils'); const endpoint_stats_collector = require('../../sdk/endpoint_stats_collector'); -const SensitiveString = require('../../util/sensitive_string'); const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); @@ -41,31 +40,6 @@ const DEFAULT_FS_CONFIG = get_process_fs_context(); const empty_data = crypto.randomBytes(0); const empty_stream = () => buffer_utils.buffer_to_read_stream(empty_data); -function make_dummy_object_sdk(config_root) { - return { - requesting_account: { - force_md5_etag: false, - nsfs_account_config: { - uid: process.getuid(), - gid: process.getgid(), - } - }, - abort_controller: new AbortController(), - throw_if_aborted() { - if (this.abort_controller.signal.aborted) throw new Error('request aborted signal'); - }, - - read_bucket_sdk_config_info(name) { - return { - bucket_owner: new SensitiveString('dummy-owner'), - owner_account: { - id: 'dummy-id-123', - } - }; - } - }; -} - mocha.describe('namespace_fs', function() { const src_bkt = 'src'; @@ -98,6 +72,7 @@ mocha.describe('namespace_fs', function() { }); mocha.before(async () => { + config.NSFS_LIST_OBJECTS_V2_UNSORTED_ENABLED = false; await P.all(_.map([src_bkt, upload_bkt, mpu_bkt], async buck => fs_utils.create_fresh_path(`${tmp_fs_path}/${buck}`))); }); diff --git a/src/test/unit_tests/test_namespace_fs_mpu.js b/src/test/unit_tests/test_namespace_fs_mpu.js index 9f11327de9..712cdf6229 100644 --- a/src/test/unit_tests/test_namespace_fs_mpu.js +++ b/src/test/unit_tests/test_namespace_fs_mpu.js @@ -17,35 +17,18 @@ const time_utils = require('../../util/time_utils'); const NamespaceFS = require('../../sdk/namespace_fs'); const s3_utils = require('../../endpoint/s3/s3_utils'); const buffer_utils = require('../../util/buffer_utils'); -const { TMP_PATH } = require('../system_tests/test_utils'); +const { TMP_PATH, make_dummy_object_sdk } = require('../system_tests/test_utils'); const endpoint_stats_collector = require('../../sdk/endpoint_stats_collector'); const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); const XATTR_MD5_KEY = 'content_md5'; -const DUMMY_OBJECT_SDK = make_DUMMY_OBJECT_SDK(); +const DUMMY_OBJECT_SDK = make_dummy_object_sdk(); const src_bkt = 'src'; const tmp_fs_path = path.join(TMP_PATH, 'test_namespace_fs_mpu'); const ns_tmp_bucket_path = `${tmp_fs_path}/${src_bkt}`; -function make_DUMMY_OBJECT_SDK() { - return { - requesting_account: { - force_md5_etag: false, - nsfs_account_config: { - uid: process.getuid(), - gid: process.getgid(), - } - }, - abort_controller: new AbortController(), - throw_if_aborted() { - if (this.abort_controller.signal.aborted) throw new Error('request aborted signal'); - } - }; -} - - mocha.describe('namespace_fs mpu optimization tests', function() { const upload_bkt = 'test_ns_uploads_object';