Skip to content

Commit

Permalink
Create new latency benchmark for writing of one-byte files (#1190)
Browse files Browse the repository at this point in the history
Creates a new latency benchmark for writing of one-byte files. Also
creates a folder for writing latency benchmarks and extends the
`fs_latency_bench.sh` script to handle multiple folders.

### Does this change impact existing behavior? No

### Does this change need a changelog entry? No

---

By submitting this pull request, I confirm that my contribution is made
under the terms of the Apache 2.0 license and I agree to the terms of
the [Developer Certificate of Origin
(DCO)](https://developercertificate.org/).

---------

Signed-off-by: Renan Magagnin <[email protected]>
  • Loading branch information
renanmagagnin authored Dec 11, 2024
1 parent 9d1196d commit eecf301
Show file tree
Hide file tree
Showing 2 changed files with 77 additions and 57 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[global]
name=fs_bench
bs=1B

[time_to_write_one_byte_file]
size=1B
rw=write
ioengine=sync
fallocate=none
create_on_open=1
fsync_on_close=1
unlink=1
unlink_each_loop=1
loops=10
120 changes: 63 additions & 57 deletions mountpoint-s3/scripts/fs_latency_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -126,64 +126,70 @@ do
dir_size=$(awk "BEGIN {print $dir_size*10}")
done

run_file_benchmarks() {
category=$1
jobs_dir=mountpoint-s3/scripts/fio/${category}_latency

for job_file in "${jobs_dir}"/*.fio; do
mount_dir=$(mktemp -d /tmp/fio-XXXXXXXXXXXX)
job_name=$(basename "${job_file}")
job_name="${job_name%.*}"

# start time to first byte benchmark
jobs_dir=mountpoint-s3/scripts/fio/read_latency
for job_file in "${jobs_dir}"/*.fio; do
mount_dir=$(mktemp -d /tmp/fio-XXXXXXXXXXXX)
job_name=$(basename "${job_file}")
job_name="${job_name%.*}"

log_dir=logs/${job_name}
mkdir -p $log_dir

echo "Running ${job_name}"

# mount file system
cargo run --release ${S3_BUCKET_NAME} ${mount_dir} \
--allow-delete \
--allow-overwrite \
--log-directory=$log_dir \
--prefix=${S3_BUCKET_TEST_PREFIX} \
--log-metrics \
${optional_args}
mount_status=$?
if [ $mount_status -ne 0 ]; then
echo "Failed to mount file system"
exit 1
fi

# Lay out files for the test:
echo >&2 Laying out files for $job_file
fio --thread \
--directory=${mount_dir} \
--create_only=1 \
--eta=never \
${job_file}

# run the benchmark
echo >&2 Running $job_file
timeout 300s fio --thread \
--output=${results_dir}/${job_name}.json \
--output-format=json \
--directory=${mount_dir} \
${job_file}
job_status=$?
if [ $job_status -ne 0 ]; then
tail -1000 ${log_dir}/mountpoint-s3-*
echo "Job ${job_name} failed with exit code ${job_status}"
exit 1
fi

jq -n 'inputs.jobs[] | if (."job options".rw == "read")
then {name: .jobname, value: (.read.lat_ns.mean / 1000000), unit: "milliseconds"}
elif (."job options".rw == "randread") then {name: .jobname, value: (.read.lat_ns.mean / 1000000), unit: "milliseconds"}
elif (."job options".rw == "randwrite") then {name: .jobname, value: (.write.lat_ns.mean / 1000000), unit: "milliseconds"}
else {name: .jobname, value: (.write.lat_ns.mean / 1000000), unit: "milliseconds"} end' ${results_dir}/${job_name}.json | tee ${results_dir}/${job_name}_parsed.json

# delete the raw output file from fio
rm ${results_dir}/${job_name}.json
done
log_dir=logs/${job_name}
mkdir -p $log_dir

echo "Running ${job_name}"

# mount file system
cargo run --release ${S3_BUCKET_NAME} ${mount_dir} \
--allow-delete \
--allow-overwrite \
--log-directory=$log_dir \
--prefix=${S3_BUCKET_TEST_PREFIX} \
--log-metrics \
${optional_args}
mount_status=$?
if [ $mount_status -ne 0 ]; then
echo "Failed to mount file system"
exit 1
fi

# Lay out files for the test:
echo >&2 Laying out files for $job_file
fio --thread \
--directory=${mount_dir} \
--create_only=1 \
--eta=never \
${job_file}

# run the benchmark
echo >&2 Running $job_file
timeout 300s fio --thread \
--output=${results_dir}/${job_name}.json \
--output-format=json \
--directory=${mount_dir} \
${job_file}
job_status=$?
if [ $job_status -ne 0 ]; then
tail -1000 ${log_dir}/mountpoint-s3-*
echo "Job ${job_name} failed with exit code ${job_status}"
exit 1
fi

jq -n 'inputs.jobs[] | if (."job options".rw == "read")
then {name: .jobname, value: (.read.lat_ns.mean / 1000000), unit: "milliseconds"}
elif (."job options".rw == "randread") then {name: .jobname, value: (.read.lat_ns.mean / 1000000), unit: "milliseconds"}
elif (."job options".rw == "randwrite") then {name: .jobname, value: (.write.lat_ns.mean / 1000000), unit: "milliseconds"}
else {name: .jobname, value: (.write.lat_ns.mean / 1000000), unit: "milliseconds"} end' ${results_dir}/${job_name}.json | tee ${results_dir}/${job_name}_parsed.json

# delete the raw output file from fio
rm ${results_dir}/${job_name}.json

done
}

run_file_benchmarks read
run_file_benchmarks write

# combine all bench results into one json file
jq -n '[inputs]' ${results_dir}/*.json | tee ${results_dir}/output.json

2 comments on commit eecf301

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Performance Alert ⚠️

Possible performance regression was detected for benchmark 'Throughput Benchmark (S3 Standard)'.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold 2.

Benchmark suite Current: eecf301 Previous: 9d1196d Ratio
sequential_read 595.602734375 MiB/s 1351.33427734375 MiB/s 2.27

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Performance Alert ⚠️

Possible performance regression was detected for benchmark 'Throughput Benchmark (S3 Standard)'.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold 2.

Benchmark suite Current: eecf301 Previous: 9d1196d Ratio
sequential_read 620.44560546875 MiB/s 1351.33427734375 MiB/s 2.18

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.