Skip to content

Commit

Permalink
Benchmarking platform (#521)
Browse files Browse the repository at this point in the history
* Added benchmark app + deployment workflow
* Added data.json and improved app with logo
* Improved benchmark readme

---------

Co-authored-by: Yolan Romailler <[email protected]>
  • Loading branch information
matteosz and AnomalRoil authored Jul 1, 2024
1 parent 72a3fc0 commit 8e4fa30
Show file tree
Hide file tree
Showing 27 changed files with 35,029 additions and 0 deletions.
39 changes: 39 additions & 0 deletions .github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: Deploy Static Site and React App to GitHub Pages

on:
push:
branches:
- master

jobs:
build-and-deploy:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 22

- name: Install dependencies and build React app
run: |
cd ./docs/benchmark-app/
npm install
npm run build
cd ../../
mkdir -p public/benchmark
mv ./docs/benchmark-app/build/* public/benchmark/
- name: Copy static index.html
run: |
cp ./docs/index.html public/
cp ./docs/*.md public/
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./public
88 changes: 88 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# Benchmark

This folder contains the script to measure the performance of various components in Kyber.

## Running the Benchmarks on your local machine

To run the benchmarks, follow these steps:

1. Clone the repository:

```shell
git clone https://github.com/dedis/kyber.git
```

2. Navigate to the benchmark directory:

```shell
cd benchmark
```

3. Run the benchmarks:

```shell
go run benchmark.go
```

If you want a data visualization tool for the benchmarks, then simply fork the repository, execute the steps above and push your changes to master branch. Then, wait for the deploy workflow to finish and you'll have the platform on the endpoint ___your_username.github.io/kyber/benchmark___ (or a different domain if you set a custom one).
## Benchmarked Items
So far, the following items are benchmarked in this project:
- All the groups implemented in kyber (Ed25519, P256, Residue512, bn254, bn256)
- Anon and Bls signatures
For more up-to-date details on the benchmarked items, refer to the `benchmark.go` file.
## Adding benchmarks
### Adding a group
To add a new group to the benchmarks you just need to add to the `suites` list the suite you want to test.
Here's an example:
<pre>
var (
...
suites = []kyber.Group{
...
edwards25519.NewBlakeSHA256Ed25519(),
<b>suiteYouWannaAdd.suiteFactory()</b>
}
...
)
</pre>

### Adding a signature
For signatures there's no a unified interface as for groups, thus you would need to:
1. Add the new signature name `newSignature` to the `signatures` list
2. Add custom code in `benchmarkSign` when `sigType` == `newSignature`
Future work can be focused on creating a homogeneous benchmarking interface for all signatures and simplify their inclusion in the benchmark collection script.
### Adding a different module
So far only groups and signatures are supported. If you want to add a new module:
1. Fill the `main` method by adding a new key to `results` with the name of the new module to benchmark.
2. Create a custom function `benchmarkNewModule` which returns a dictionary following the Json structure:
```json
{
"instance": { // instance of the given module
"benchmarks": { // fixed key word
"type": { // type of operations supported
"operation": { // name of the specific atomic operation
"N": // total number of iterations
"T": // total time in nanoseconds
"Bytes": // total number of bytes allocated
...
}, ...
}, ...
},
"description": // description displayed on front-end
"name": // name displayed on front-end
}, ...
}
```
Take a look at the actual Json [data](../docs/benchmark-app/src/data/data.json) as reference.
3. Update the [benchmark-app](../docs/benchmark-app/) to support the new module.
# Public benchmarks
If you don't want to run the benchmarks yourself, you can find them [here](https://dedis.github.io/kyber/benchmark)
204 changes: 204 additions & 0 deletions benchmark/benchmark.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
package main

import (
"encoding/json"
"fmt"
"os"
"testing"

"go.dedis.ch/kyber/v4"
"go.dedis.ch/kyber/v4/group/edwards25519"
nist "go.dedis.ch/kyber/v4/group/p256"
"go.dedis.ch/kyber/v4/pairing/bn254"
"go.dedis.ch/kyber/v4/pairing/bn256"
"go.dedis.ch/kyber/v4/sign/anon"
"go.dedis.ch/kyber/v4/sign/bls"
"go.dedis.ch/kyber/v4/util/test"
)

var (
outputFile = "../docs/benchmark-app/src/data/data.json"
suites = []kyber.Group{
nist.NewBlakeSHA256P256(), nist.NewBlakeSHA256QR512(),
bn256.NewSuiteG1(),
bn254.NewSuiteG1(),
edwards25519.NewBlakeSHA256Ed25519()}
signatures = []string{"anon", "bls"}
)

// BenchmarkGroup runs benchmarks for the given group and writes the results to a JSON file.
func benchmarkGroup(name string, description string, gb *test.GroupBench) map[string]interface{} {
fmt.Printf("Running benchmarks for group %s...\n", name)
results := make(map[string]map[string]testing.BenchmarkResult)

// Scalar operations
results["scalar"] = make(map[string]testing.BenchmarkResult)
results["scalar"]["add"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarAdd(b.N)
})
results["scalar"]["sub"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarSub(b.N)
})
results["scalar"]["neg"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarNeg(b.N)
})
results["scalar"]["mul"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarMul(b.N)
})
results["scalar"]["div"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarDiv(b.N)
})
results["scalar"]["inv"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarInv(b.N)
})
results["scalar"]["pick"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarPick(b.N)
})
results["scalar"]["encode"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarEncode(b.N)
})
results["scalar"]["decode"] = testing.Benchmark(func(b *testing.B) {
gb.ScalarDecode(b.N)
})

// Point operations
results["point"] = make(map[string]testing.BenchmarkResult)
results["point"]["add"] = testing.Benchmark(func(b *testing.B) {
gb.PointAdd(b.N)
})
results["point"]["sub"] = testing.Benchmark(func(b *testing.B) {
gb.PointSub(b.N)
})
results["point"]["neg"] = testing.Benchmark(func(b *testing.B) {
gb.PointNeg(b.N)
})
results["point"]["mul"] = testing.Benchmark(func(b *testing.B) {
gb.PointMul(b.N)
})
results["point"]["baseMul"] = testing.Benchmark(func(b *testing.B) {
gb.PointBaseMul(b.N)
})
results["point"]["pick"] = testing.Benchmark(func(b *testing.B) {
gb.PointPick(b.N)
})
results["point"]["encode"] = testing.Benchmark(func(b *testing.B) {
gb.PointEncode(b.N)
})
results["point"]["decode"] = testing.Benchmark(func(b *testing.B) {
gb.PointDecode(b.N)
})

result := map[string]interface{}{
"group": name,
"description": description,
"benchmarks": results,
}

return result
}

// BenchmarkSign runs benchmarks for the some signature schemes.
func benchmarkSign(sigType string) map[string]interface{} {
fmt.Printf("Running benchmarks for %s signature scheme...\n", sigType)
results := make(map[string]map[string]testing.BenchmarkResult)
results["keygen"] = make(map[string]testing.BenchmarkResult)
results["sign"] = make(map[string]testing.BenchmarkResult)
results["verify"] = make(map[string]testing.BenchmarkResult)

benchMessage := []byte("Hello World!")
keys := []int{1, 10, 100}

if sigType == "anon" {
// Generate keys
for _, i := range keys {
results["keygen"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
anon.BenchGenKeys(edwards25519.NewBlakeSHA256Ed25519(), i)
})
}
benchPubEd25519, benchPriEd25519 := anon.BenchGenKeys(edwards25519.NewBlakeSHA256Ed25519(), keys[len(keys)-1])

// Signing
for _, i := range keys {
results["sign"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
anon.BenchSign(edwards25519.NewBlakeSHA256Ed25519(), benchPubEd25519[:i], benchPriEd25519, b.N, benchMessage)
})
}

// Verification
for _, i := range keys {
results["verify"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
anon.BenchVerify(edwards25519.NewBlakeSHA256Ed25519(), benchPubEd25519[:i],
anon.BenchGenSig(edwards25519.NewBlakeSHA256Ed25519(), i, benchMessage, benchPubEd25519, benchPriEd25519),
b.N, benchMessage)
})
}
} else if sigType == "bls" {
// Key generation
for _, i := range keys {
scheme := bls.NewSchemeOnG1(bn256.NewSuite())
results["keygen"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
test.BenchCreateKeys(b, scheme, i)
})
}

// Signing
for _, i := range keys {
results["sign"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
_, scheme, _, privates, _, _ := test.PrepareBLS(i)
test.BenchSign(b, scheme, benchMessage, privates)
})
}

// Verification
for _, i := range keys {
results["verify"][fmt.Sprintf("%d", i)] = testing.Benchmark(func(b *testing.B) {
suite, scheme, publics, _, msgs, sigs := test.PrepareBLS(i)
test.BLSBenchVerify(b, sigs, scheme, suite, publics, msgs)
})
}
}

result := map[string]interface{}{
"name": sigType,
"description": "",
"benchmarks": results,
}

return result
}

func main() {
// Write results to JSON file
results := make(map[string]map[string]map[string]interface{})

file, err := os.Create(outputFile)
if err != nil {
fmt.Println("Error creating output file:", err)
return
}
defer file.Close()

encoder := json.NewEncoder(file)
encoder.SetIndent("", " ")

// Run benchmarks for each group
results["groups"] = make(map[string]map[string]interface{})
for _, suite := range suites {
groupBench := test.NewGroupBench(suite)
result := benchmarkGroup(suite.String(), "Description", groupBench)
results["groups"][suite.String()] = result
}

// Run benchmarks for signatures
results["sign"] = make(map[string]map[string]interface{})
for _, sigType := range signatures {
result := benchmarkSign(sigType)
results["sign"][sigType] = result
}

if err := encoder.Encode(results); err != nil {
fmt.Println("Error encoding JSON:", err)
return
}
fmt.Printf("Benchmark results written to %s\n", outputFile)
}
23 changes: 23 additions & 0 deletions docs/benchmark-app/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.

# dependencies
/node_modules
/.pnp
.pnp.js

# testing
/coverage

# production
/build

# misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local

npm-debug.log*
yarn-debug.log*
yarn-error.log*
Loading

0 comments on commit 8e4fa30

Please sign in to comment.