Skip to content

Commit

Permalink
Add very simple benchmarking against reactor-c (#140)
Browse files Browse the repository at this point in the history
* Remove any platform specific code in code-generated CMakeLists

* Fix FlexPRET

* Dump working changes

* Minimum reaction and event queue size is 1

* Ironing out the final wrinkles

* Add support for running in fast mode, simply return immediatly from any call to wait_until.

* Typo

* Update examples

* Fix preamble handling

* Add two simple benchmarks

* Run benchmarks in CI

* Report benchmark result units

* Post benchmark results to the PR

* Try to output something from a step

* CI

* Fix colorized output

* Remove the need for dynamically sized array on the stack

* Add a newline

* Another newline
  • Loading branch information
erlingrj authored Nov 28, 2024
1 parent 86c9fad commit a7d9dc4
Show file tree
Hide file tree
Showing 17 changed files with 744 additions and 13 deletions.
49 changes: 49 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: "Benchmarks"

permissions:
contents: write
pull-requests: write

on:
pull_request:

jobs:
ci:
name: Run benchmarks
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: recursive

- name: Setup java and gradle for compiling lfc
uses: ./.github/actions/lingua-franca

- name: Install lfc
run: curl -Ls https://install.lf-lang.org | bash -s cli
- name: Run benchmarks
id: run_benchmarks
run: |
source env.bash
cd benchmarks
./runAll.sh
# This in conjunction with create-or-update-comment allows us to only
# comment once and update it after
- name: Find Comment
uses: peter-evans/find-comment@v3
id: fc
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: Benchmark results

- name: Create or update comment
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.fc.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body-path: benchmarks/benchmark_results.md
edit-mode: replace

2 changes: 1 addition & 1 deletion .github/workflows/memory.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: Memory report
body-includes: Memory usage after merging this PR

- name: Create or update comment
uses: peter-evans/create-or-update-comment@v4
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
**/build/
**/src-gen/
**/bin/
benchmarks/include
.vscode/
cmake-build-debug
cmake-build-release
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ target_compile_options(reactor-uc PRIVATE -Wall -Wextra -Werror)

# Disable selected warnings
if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
target_compile_options(reactor-uc PUBLIC -Wno-zero-length-bounds -Wno-stack-usage)
target_compile_options(reactor-uc PRIVATE -Wno-zero-length-bounds)
endif()

add_compile_options (-fdiagnostics-color=always)
Expand Down
50 changes: 50 additions & 0 deletions benchmarks/runAll.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/bin/env bash
set -e

LFC=lfc
LFCG=${REACTOR_UC_PATH}/lfc/bin/lfc-dev

$LFC src/PingPongC.lf
$LFCG src/PingPongUc.lf

$LFC src/ReactionLatencyC.lf
$LFCG src/ReactionLatencyUc.lf

echo "Running benchmarks..."

ping_pong_c_result=$(bin/PingPongC | grep -E "Time: *.")
ping_pong_uc_result=$(bin/PingPongUc | grep -E "Time: *.")
latency_c_result=$(bin/ReactionLatencyC | grep -E " latency: *.")
latency_uc_result=$(bin/ReactionLatencyUc | grep -E "latency: *.")


# Create or clear the output file
output_file="benchmark_results.md"
: > "$output_file"

# Print and dump the results into the file
echo "Benchmark results after merging this PR: " >> "$output_file"
echo "<details><summary>Benchmark results</summary>" >> "$output_file"
echo "" >> "$output_file"
echo "## Performance:" >> "$output_file"
echo "" >> "$output_file"

benchmarks=("PingPongUc" "PingPongC" "ReactionLatencyUc" "ReactionLatencyC")
results=("$ping_pong_uc_result" "$ping_pong_c_result" "$latency_uc_result" "$latency_c_result")
echo $latency_uc_result >> test.md

for i in "${!benchmarks[@]}"; do
echo "${benchmarks[$i]}:" >> "$output_file"
echo "${results[$i]}" >> "$output_file"
echo "" >> "$output_file"
done

echo "## Memory usage:" >> "$output_file"
for benchmark in PingPongUc PingPongC ReactionLatencyUc ReactionLatencyC;
do
echo "$benchmark:" >> "$output_file"
echo "$(size -d bin/$benchmark)" >> "$output_file"
echo "" >> "$output_file"
done

cat "$output_file"
160 changes: 160 additions & 0 deletions benchmarks/src/BenchmarkRunnerC.lf
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
target C

/**
* Reactor that starts the kernel of a benchmark, measures its runtime and outputs the results for a
* given number of iterations.
*
* This reactor is instantiated by the main reactor of a benchmark and the startup reaction of this
* reactor is the starting point for that benchmark. The reactor runs a given number of iterations
* of the benchmark, measures the runtime of each iteration and outputs them. The benchmark itself
* is responsible to reset its state between the iterations. A benchmark can have an optional
* initialization phase that is run once before the first iteration and is not measured. A benchmark
* can have an optional cleanup phase after each iteration before the next iteration start which is
* not considered in the runtime measurement.
*
* How to use:
* - Instantiate this reactor in the main reactor of the benchmark.
* - Connect the ports start, finish with the appropriate reactors of the benchmark.
* - Create a startup reaction in the main reactor that calls printBenchmarkInfo(),
*
* Prototype startup reaction in the main reactor of a benchmark: runner = new
* BenchmarkRunner(num_iterations=num_iterations); reaction(startup) {=
* printBenchmarkInfo("ThreadRingReactorLFCppBenchmark"); printSystemInfo();
* =}
*
* @param num_iterations How many times to execute the kernel of the benchmark to measure.
*
* @author Hannes Klein
* @author Shaokai Lin
* @author Matt Chorlian
* @author Arthur Deng
*/
preamble {=
#include <stdio.h>
=}

reactor BenchmarkRunner(num_iterations: size_t = 12) {
/** Signal to start execution. Set this input from a startup reaction in the main reactor. */
input inStart: bool

/** Signals for starting and finishing the kernel and runtime measurement. */
output start: bool
input finish: bool

/** Events to switch between the phases of running the iterations. */
logical action nextIteration
logical action done

/** Number of iterations already executed. */
state count: unsigned = 0

/** Start time for runtime measurement. */
state startTime: instant_t

/** Runtime measurements. */
state measuredTimes: interval_t*

preamble {=
static double toMS(interval_t t) {
return t / 1000000.0;
}

int comp (const void * elem1, const void * elem2) {
int f = *((double*)elem1);
int s = *((double*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}

static double median(double* execTimes, int size) {
if (size == 0) {
return 0.0;
}

int middle = size / 2;
if(size % 2 == 1) {
return execTimes[middle];
} else {
return (execTimes[middle-1] + execTimes[middle]) / 2;
}
}

static double* getMSMeasurements(interval_t* measured_times, int num_iterations) {

double* msMeasurements = (double *) calloc(num_iterations, sizeof(double));
for (int i = 0; i < num_iterations; i++) {
msMeasurements[i] = toMS(measured_times[i]);
}

return msMeasurements;
}
=}

preamble {=
void printBenchmarkInfo(char* benchmarkId) {
printf("Benchmark: %s\n", benchmarkId);
}

void printSystemInfo() {

printf("System information\n");
printf("O/S Name: ");

#ifdef _WIN32
printf("Windows 32-bit");
#elif _WIN64
printf("Windows 64-bit");
#elif __APPLE__ || __MACH__
printf("Mac OSX");
#elif __linux__
printf("Linux");
#elif __FreeBSD__
printf("FreeBSD");
#elif __unix || __unix__
printf("Unix");
#else
printf("Other");
#endif

printf("\n");
}
=}

reaction(startup) -> nextIteration {=
// Initialize an array of interval_t
self->measuredTimes = (interval_t *) calloc(self->num_iterations, sizeof(interval_t));
lf_schedule(nextIteration, 0);
=}

reaction(nextIteration) -> start, done {=
if (self->count < self->num_iterations) {
self->startTime = lf_time_physical();
lf_set(start, true);
} else {
lf_schedule(done, 0);
}
=}

reaction(finish) -> nextIteration {=
interval_t end_time = lf_time_physical();
interval_t duration = end_time - self->startTime;
self->measuredTimes[self->count] = duration;
self->count += 1;

printf("Iteration %d - %.3f ms\n", self->count, toMS(duration));

lf_schedule(nextIteration, 0);
=}

reaction(done) {=
double* measuredMSTimes = getMSMeasurements(self->measuredTimes, self->num_iterations);
qsort(measuredMSTimes, self->num_iterations, sizeof(double), comp);

printf("Execution - Summary:\n");
printf("Best Time:\t %.3f msec\n", measuredMSTimes[0]);
printf("Worst Time:\t %.3f msec\n", measuredMSTimes[self->num_iterations - 1]);
printf("Median Time:\t %.3f msec\n", median(measuredMSTimes, self->num_iterations));
lf_request_stop();
=}
}
Loading

0 comments on commit a7d9dc4

Please sign in to comment.